tally.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521
  1. /* Licensed under LGPLv3+ - see LICENSE file for details */
  2. #include <ccan/tally/tally.h>
  3. #include <ccan/build_assert/build_assert.h>
  4. #include <ccan/likely/likely.h>
  5. #include <stdint.h>
  6. #include <limits.h>
  7. #include <string.h>
  8. #include <stdio.h>
  9. #include <assert.h>
  10. #include <stdlib.h>
  11. #define SIZET_BITS (sizeof(size_t)*CHAR_BIT)
  12. /* We use power of 2 steps. I tried being tricky, but it got buggy. */
  13. struct tally {
  14. ssize_t min, max;
  15. size_t total[2];
  16. /* This allows limited frequency analysis. */
  17. unsigned buckets, step_bits;
  18. size_t counts[1 /* Actually: [buckets] */ ];
  19. };
  20. struct tally *tally_new(unsigned buckets)
  21. {
  22. struct tally *tally;
  23. /* There is always 1 bucket. */
  24. if (buckets == 0) {
  25. buckets = 1;
  26. }
  27. /* Overly cautious check for overflow. */
  28. if (sizeof(*tally) * buckets / sizeof(*tally) != buckets) {
  29. return NULL;
  30. }
  31. tally = (struct tally *)malloc(
  32. sizeof(*tally) + sizeof(tally->counts[0])*(buckets-1));
  33. if (tally == NULL) {
  34. return NULL;
  35. }
  36. tally->max = ((size_t)1 << (SIZET_BITS - 1));
  37. tally->min = ~tally->max;
  38. tally->total[0] = tally->total[1] = 0;
  39. tally->buckets = buckets;
  40. tally->step_bits = 0;
  41. memset(tally->counts, 0, sizeof(tally->counts[0])*buckets);
  42. return tally;
  43. }
  44. static unsigned bucket_of(ssize_t min, unsigned step_bits, ssize_t val)
  45. {
  46. /* Don't over-shift. */
  47. if (step_bits == SIZET_BITS) {
  48. return 0;
  49. }
  50. assert(step_bits < SIZET_BITS);
  51. return (size_t)(val - min) >> step_bits;
  52. }
  53. /* Return the min value in bucket b. */
  54. static ssize_t bucket_min(ssize_t min, unsigned step_bits, unsigned b)
  55. {
  56. /* Don't over-shift. */
  57. if (step_bits == SIZET_BITS) {
  58. return min;
  59. }
  60. assert(step_bits < SIZET_BITS);
  61. return min + ((ssize_t)b << step_bits);
  62. }
  63. /* Does shifting by this many bits truncate the number? */
  64. static bool shift_overflows(size_t num, unsigned bits)
  65. {
  66. if (bits == 0) {
  67. return false;
  68. }
  69. return ((num << bits) >> 1) != (num << (bits - 1));
  70. }
  71. /* When min or max change, we may need to shuffle the frequency counts. */
  72. static void renormalize(struct tally *tally,
  73. ssize_t new_min, ssize_t new_max)
  74. {
  75. size_t range, spill;
  76. unsigned int i, old_min;
  77. /* Uninitialized? Don't do anything... */
  78. if (tally->max < tally->min) {
  79. goto update;
  80. }
  81. /* If we don't have sufficient range, increase step bits until
  82. * buckets cover entire range of ssize_t anyway. */
  83. range = (new_max - new_min) + 1;
  84. while (!shift_overflows(tally->buckets, tally->step_bits)
  85. && range > ((size_t)tally->buckets << tally->step_bits)) {
  86. /* Collapse down. */
  87. for (i = 1; i < tally->buckets; i++) {
  88. tally->counts[i/2] += tally->counts[i];
  89. tally->counts[i] = 0;
  90. }
  91. tally->step_bits++;
  92. }
  93. /* Now if minimum has dropped, move buckets up. */
  94. old_min = bucket_of(new_min, tally->step_bits, tally->min);
  95. memmove(tally->counts + old_min,
  96. tally->counts,
  97. sizeof(tally->counts[0]) * (tally->buckets - old_min));
  98. memset(tally->counts, 0, sizeof(tally->counts[0]) * old_min);
  99. /* If we moved boundaries, adjust buckets to that ratio. */
  100. spill = (tally->min - new_min) % (1 << tally->step_bits);
  101. for (i = 0; i < tally->buckets-1; i++) {
  102. size_t adjust = (tally->counts[i] >> tally->step_bits) * spill;
  103. tally->counts[i] -= adjust;
  104. tally->counts[i+1] += adjust;
  105. }
  106. update:
  107. tally->min = new_min;
  108. tally->max = new_max;
  109. }
  110. void tally_add(struct tally *tally, ssize_t val)
  111. {
  112. ssize_t new_min = tally->min, new_max = tally->max;
  113. bool need_renormalize = false;
  114. if (val < tally->min) {
  115. new_min = val;
  116. need_renormalize = true;
  117. }
  118. if (val > tally->max) {
  119. new_max = val;
  120. need_renormalize = true;
  121. }
  122. if (need_renormalize) {
  123. renormalize(tally, new_min, new_max);
  124. }
  125. /* 128-bit arithmetic! If we didn't want exact mean, we could just
  126. * pull it out of counts. */
  127. if (val > 0 && tally->total[0] + val < tally->total[0]) {
  128. tally->total[1]++;
  129. } else if (val < 0 && tally->total[0] + val > tally->total[0]) {
  130. tally->total[1]--;
  131. }
  132. tally->total[0] += val;
  133. tally->counts[bucket_of(tally->min, tally->step_bits, val)]++;
  134. }
  135. size_t tally_num(const struct tally *tally)
  136. {
  137. size_t i, num = 0;
  138. for (i = 0; i < tally->buckets; i++) {
  139. num += tally->counts[i];
  140. }
  141. return num;
  142. }
  143. ssize_t tally_min(const struct tally *tally)
  144. {
  145. return tally->min;
  146. }
  147. ssize_t tally_max(const struct tally *tally)
  148. {
  149. return tally->max;
  150. }
  151. /* FIXME: Own ccan module please! */
  152. static unsigned fls64(uint64_t val)
  153. {
  154. #if HAVE_BUILTIN_CLZL
  155. if (val <= ULONG_MAX) {
  156. /* This is significantly faster! */
  157. return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0;
  158. } else {
  159. #endif
  160. uint64_t r = 64;
  161. if (!val) {
  162. return 0;
  163. }
  164. if (!(val & 0xffffffff00000000ull)) {
  165. val <<= 32;
  166. r -= 32;
  167. }
  168. if (!(val & 0xffff000000000000ull)) {
  169. val <<= 16;
  170. r -= 16;
  171. }
  172. if (!(val & 0xff00000000000000ull)) {
  173. val <<= 8;
  174. r -= 8;
  175. }
  176. if (!(val & 0xf000000000000000ull)) {
  177. val <<= 4;
  178. r -= 4;
  179. }
  180. if (!(val & 0xc000000000000000ull)) {
  181. val <<= 2;
  182. r -= 2;
  183. }
  184. if (!(val & 0x8000000000000000ull)) {
  185. val <<= 1;
  186. r -= 1;
  187. }
  188. return r;
  189. #if HAVE_BUILTIN_CLZL
  190. }
  191. #endif
  192. }
  193. /* This is stolen straight from Hacker's Delight. */
  194. static uint64_t divlu64(uint64_t u1, uint64_t u0, uint64_t v)
  195. {
  196. const uint64_t b = 4294967296ULL; /* Number base (32 bits). */
  197. uint32_t un[4], /* Dividend and divisor */
  198. vn[2]; /* normalized and broken */
  199. /* up into halfwords. */
  200. uint32_t q[2]; /* Quotient as halfwords. */
  201. uint64_t un1, un0, /* Dividend and divisor */
  202. vn0; /* as fullwords. */
  203. uint64_t qhat; /* Estimated quotient digit. */
  204. uint64_t rhat; /* A remainder. */
  205. uint64_t p; /* Product of two digits. */
  206. int64_t s, i, j, t, k;
  207. if (u1 >= v) { /* If overflow, return the largest */
  208. return (uint64_t)-1; /* possible quotient. */
  209. }
  210. s = 64 - fls64(v); /* 0 <= s <= 63. */
  211. vn0 = v << s; /* Normalize divisor. */
  212. vn[1] = vn0 >> 32; /* Break divisor up into */
  213. vn[0] = vn0 & 0xFFFFFFFF; /* two 32-bit halves. */
  214. // Shift dividend left.
  215. un1 = ((u1 << s) | (u0 >> (64 - s))) & (-s >> 63);
  216. un0 = u0 << s;
  217. un[3] = un1 >> 32; /* Break dividend up into */
  218. un[2] = un1; /* four 32-bit halfwords */
  219. un[1] = un0 >> 32; /* Note: storing into */
  220. un[0] = un0; /* halfwords truncates. */
  221. for (j = 1; j >= 0; j--) {
  222. /* Compute estimate qhat of q[j]. */
  223. qhat = (un[j+2]*b + un[j+1])/vn[1];
  224. rhat = (un[j+2]*b + un[j+1]) - qhat*vn[1];
  225. again:
  226. if (qhat >= b || qhat*vn[0] > b*rhat + un[j]) {
  227. qhat = qhat - 1;
  228. rhat = rhat + vn[1];
  229. if (rhat < b) {
  230. goto again;
  231. }
  232. }
  233. /* Multiply and subtract. */
  234. k = 0;
  235. for (i = 0; i < 2; i++) {
  236. p = qhat*vn[i];
  237. t = un[i+j] - k - (p & 0xFFFFFFFF);
  238. un[i+j] = t;
  239. k = (p >> 32) - (t >> 32);
  240. }
  241. t = un[j+2] - k;
  242. un[j+2] = t;
  243. q[j] = qhat; /* Store quotient digit. */
  244. if (t < 0) { /* If we subtracted too */
  245. q[j] = q[j] - 1; /* much, add back. */
  246. k = 0;
  247. for (i = 0; i < 2; i++) {
  248. t = un[i+j] + vn[i] + k;
  249. un[i+j] = t;
  250. k = t >> 32;
  251. }
  252. un[j+2] = un[j+2] + k;
  253. }
  254. } /* End j. */
  255. return q[1]*b + q[0];
  256. }
  257. static int64_t divls64(int64_t u1, uint64_t u0, int64_t v)
  258. {
  259. int64_t q, uneg, vneg, diff, borrow;
  260. uneg = u1 >> 63; /* -1 if u < 0. */
  261. if (uneg) { /* Compute the absolute */
  262. u0 = -u0; /* value of the dividend u. */
  263. borrow = (u0 != 0);
  264. u1 = -u1 - borrow;
  265. }
  266. vneg = v >> 63; /* -1 if v < 0. */
  267. v = (v ^ vneg) - vneg; /* Absolute value of v. */
  268. if ((uint64_t)u1 >= (uint64_t)v) {
  269. goto overflow;
  270. }
  271. q = divlu64(u1, u0, v);
  272. diff = uneg ^ vneg; /* Negate q if signs of */
  273. q = (q ^ diff) - diff; /* u and v differed. */
  274. if ((diff ^ q) < 0 && q != 0) { /* If overflow, return the
  275. largest */
  276. overflow: /* possible neg. quotient. */
  277. q = 0x8000000000000000ULL;
  278. }
  279. return q;
  280. }
  281. ssize_t tally_mean(const struct tally *tally)
  282. {
  283. size_t count = tally_num(tally);
  284. if (!count) {
  285. return 0;
  286. }
  287. if (sizeof(tally->total[0]) == sizeof(uint32_t)) {
  288. /* Use standard 64-bit arithmetic. */
  289. int64_t total = tally->total[0]
  290. | (((uint64_t)tally->total[1]) << 32);
  291. return total / count;
  292. }
  293. return divls64(tally->total[1], tally->total[0], count);
  294. }
  295. ssize_t tally_total(const struct tally *tally, ssize_t *overflow)
  296. {
  297. if (overflow) {
  298. *overflow = tally->total[1];
  299. return tally->total[0];
  300. }
  301. /* If result is negative, make sure we can represent it. */
  302. if (tally->total[1] & ((size_t)1 << (SIZET_BITS-1))) {
  303. /* Must have only underflowed once, and must be able to
  304. * represent result at ssize_t. */
  305. if ((~tally->total[1])+1 != 0
  306. || (ssize_t)tally->total[0] >= 0) {
  307. /* Underflow, return minimum. */
  308. return (ssize_t)((size_t)1 << (SIZET_BITS - 1));
  309. }
  310. } else {
  311. /* Result is positive, must not have overflowed, and must be
  312. * able to represent as ssize_t. */
  313. if (tally->total[1] || (ssize_t)tally->total[0] < 0) {
  314. /* Overflow. Return maximum. */
  315. return (ssize_t)~((size_t)1 << (SIZET_BITS - 1));
  316. }
  317. }
  318. return tally->total[0];
  319. }
  320. static ssize_t bucket_range(const struct tally *tally, unsigned b, size_t *err)
  321. {
  322. ssize_t min, max;
  323. min = bucket_min(tally->min, tally->step_bits, b);
  324. if (b == tally->buckets - 1) {
  325. max = tally->max;
  326. } else {
  327. max = bucket_min(tally->min, tally->step_bits, b+1) - 1;
  328. }
  329. /* FIXME: Think harder about cumulative error; is this enough?. */
  330. *err = (max - min + 1) / 2;
  331. /* Avoid overflow. */
  332. return min + (max - min) / 2;
  333. }
  334. ssize_t tally_approx_median(const struct tally *tally, size_t *err)
  335. {
  336. size_t count = tally_num(tally), total = 0;
  337. unsigned int i;
  338. for (i = 0; i < tally->buckets; i++) {
  339. total += tally->counts[i];
  340. if (total * 2 >= count) {
  341. break;
  342. }
  343. }
  344. return bucket_range(tally, i, err);
  345. }
  346. ssize_t tally_approx_mode(const struct tally *tally, size_t *err)
  347. {
  348. unsigned int i, min_best = 0, max_best = 0;
  349. for (i = 0; i < tally->buckets; i++) {
  350. if (tally->counts[i] > tally->counts[min_best]) {
  351. min_best = max_best = i;
  352. } else if (tally->counts[i] == tally->counts[min_best]) {
  353. max_best = i;
  354. }
  355. }
  356. /* We can have more than one best, making our error huge. */
  357. if (min_best != max_best) {
  358. ssize_t min, max;
  359. min = bucket_range(tally, min_best, err);
  360. max = bucket_range(tally, max_best, err);
  361. max += *err;
  362. *err += (size_t)(max - min);
  363. return min + (max - min) / 2;
  364. }
  365. return bucket_range(tally, min_best, err);
  366. }
  367. static unsigned get_max_bucket(const struct tally *tally)
  368. {
  369. unsigned int i;
  370. for (i = tally->buckets; i > 0; i--) {
  371. if (tally->counts[i-1]) {
  372. break;
  373. }
  374. }
  375. return i;
  376. }
  377. char *tally_histogram(const struct tally *tally,
  378. unsigned width, unsigned height)
  379. {
  380. unsigned int i, count, max_bucket, largest_bucket;
  381. struct tally *tmp;
  382. char *graph, *p;
  383. assert(width >= TALLY_MIN_HISTO_WIDTH);
  384. assert(height >= TALLY_MIN_HISTO_HEIGHT);
  385. /* Ignore unused buckets. */
  386. max_bucket = get_max_bucket(tally);
  387. /* FIXME: It'd be nice to smooth here... */
  388. if (height >= max_bucket) {
  389. height = max_bucket;
  390. tmp = NULL;
  391. } else {
  392. /* We create a temporary then renormalize so < height. */
  393. /* FIXME: Antialias properly! */
  394. tmp = tally_new(tally->buckets);
  395. if (!tmp) {
  396. return NULL;
  397. }
  398. tmp->min = tally->min;
  399. tmp->max = tally->max;
  400. tmp->step_bits = tally->step_bits;
  401. memcpy(tmp->counts, tally->counts,
  402. sizeof(tally->counts[0]) * tmp->buckets);
  403. while ((max_bucket = get_max_bucket(tmp)) >= height) {
  404. renormalize(tmp, tmp->min, tmp->max * 2);
  405. }
  406. /* Restore max */
  407. tmp->max = tally->max;
  408. tally = tmp;
  409. height = max_bucket;
  410. }
  411. /* Figure out longest line, for scale. */
  412. largest_bucket = 0;
  413. for (i = 0; i < tally->buckets; i++) {
  414. if (tally->counts[i] > largest_bucket) {
  415. largest_bucket = tally->counts[i];
  416. }
  417. }
  418. p = graph = (char *)malloc(height * (width + 1) + 1);
  419. if (!graph) {
  420. free(tmp);
  421. return NULL;
  422. }
  423. for (i = 0; i < height; i++) {
  424. unsigned covered = 1, row;
  425. /* People expect minimum at the bottom. */
  426. row = height - i - 1;
  427. count = (double)tally->counts[row] / largest_bucket * (width-1)+1;
  428. if (row == 0) {
  429. covered = snprintf(p, width, "%zi", tally->min);
  430. } else if (row == height - 1) {
  431. covered = snprintf(p, width, "%zi", tally->max);
  432. } else if (row == bucket_of(tally->min, tally->step_bits, 0)) {
  433. *p = '+';
  434. } else {
  435. *p = '|';
  436. }
  437. if (covered > width) {
  438. covered = width;
  439. }
  440. p += covered;
  441. if (count > covered) {
  442. count -= covered;
  443. } else {
  444. count = 0;
  445. }
  446. memset(p, '*', count);
  447. p += count;
  448. *p = '\n';
  449. p++;
  450. }
  451. *p = '\0';
  452. free(tmp);
  453. return graph;
  454. }