tally.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491
  1. #include "config.h"
  2. #include <ccan/tally/tally.h>
  3. #include <ccan/build_assert/build_assert.h>
  4. #include <ccan/likely/likely.h>
  5. #include <stdint.h>
  6. #include <limits.h>
  7. #include <string.h>
  8. #include <stdio.h>
  9. #include <assert.h>
  10. #define SIZET_BITS (sizeof(size_t)*CHAR_BIT)
  11. /* We use power of 2 steps. I tried being tricky, but it got buggy. */
  12. struct tally {
  13. ssize_t min, max;
  14. size_t total[2];
  15. /* This allows limited frequency analysis. */
  16. size_t buckets;
  17. size_t step_bits;
  18. size_t counts[1 /* [buckets] */ ];
  19. };
  20. struct tally *tally_new(size_t buckets)
  21. {
  22. struct tally *tally;
  23. /* Check for overflow. */
  24. if (buckets && SIZE_MAX / buckets < sizeof(tally->counts[0]))
  25. return NULL;
  26. tally = malloc(sizeof(*tally) + sizeof(tally->counts[0])*buckets);
  27. if (tally) {
  28. /* SSIZE_MAX isn't portable, so make it one of these types. */
  29. BUILD_ASSERT(sizeof(tally->min) == sizeof(int)
  30. || sizeof(tally->min) == sizeof(long)
  31. || sizeof(tally->min) == sizeof(long long));
  32. if (sizeof(tally->min) == sizeof(int)) {
  33. tally->min = INT_MAX;
  34. tally->max = INT_MIN;
  35. } else if (sizeof(tally->min) == sizeof(long)) {
  36. tally->min = LONG_MAX;
  37. tally->max = LONG_MIN;
  38. } else if (sizeof(tally->min) == sizeof(long long)) {
  39. tally->min = (ssize_t)LLONG_MAX;
  40. tally->max = (ssize_t)LLONG_MIN;
  41. }
  42. tally->total[0] = tally->total[1] = 0;
  43. /* There is always 1 bucket. */
  44. tally->buckets = buckets+1;
  45. tally->step_bits = 0;
  46. memset(tally->counts, 0, sizeof(tally->counts[0])*(buckets+1));
  47. }
  48. return tally;
  49. }
  50. static unsigned bucket_of(ssize_t min, unsigned step_bits, ssize_t val)
  51. {
  52. /* Don't over-shift. */
  53. if (step_bits == SIZET_BITS)
  54. return 0;
  55. assert(step_bits < SIZET_BITS);
  56. return (size_t)(val - min) >> step_bits;
  57. }
  58. /* Return the min value in bucket b. */
  59. static ssize_t bucket_min(ssize_t min, unsigned step_bits, unsigned b)
  60. {
  61. /* Don't over-shift. */
  62. if (step_bits == SIZET_BITS)
  63. return min;
  64. assert(step_bits < SIZET_BITS);
  65. return min + ((ssize_t)b << step_bits);
  66. }
  67. /* Does shifting by this many bits truncate the number? */
  68. static bool shift_overflows(size_t num, unsigned bits)
  69. {
  70. if (bits == 0)
  71. return false;
  72. return ((num << bits) >> 1) != (num << (bits - 1));
  73. }
  74. /* When min or max change, we may need to shuffle the frequency counts. */
  75. static void renormalize(struct tally *tally,
  76. ssize_t new_min, ssize_t new_max)
  77. {
  78. size_t range, spill;
  79. unsigned int i, old_min;
  80. /* Uninitialized? Don't do anything... */
  81. if (tally->max < tally->min)
  82. goto update;
  83. /* If we don't have sufficient range, increase step bits until
  84. * buckets cover entire range of ssize_t anyway. */
  85. range = (new_max - new_min) + 1;
  86. while (!shift_overflows(tally->buckets, tally->step_bits)
  87. && range > ((size_t)tally->buckets << tally->step_bits)) {
  88. /* Collapse down. */
  89. for (i = 1; i < tally->buckets; i++) {
  90. tally->counts[i/2] += tally->counts[i];
  91. tally->counts[i] = 0;
  92. }
  93. tally->step_bits++;
  94. }
  95. /* Now if minimum has dropped, move buckets up. */
  96. old_min = bucket_of(new_min, tally->step_bits, tally->min);
  97. memmove(tally->counts + old_min,
  98. tally->counts,
  99. sizeof(tally->counts[0]) * (tally->buckets - old_min));
  100. memset(tally->counts, 0, sizeof(tally->counts[0]) * old_min);
  101. /* If we moved boundaries, adjust buckets to that ratio. */
  102. spill = (tally->min - new_min) % (1 << tally->step_bits);
  103. for (i = 0; i < tally->buckets-1; i++) {
  104. size_t adjust = (tally->counts[i] >> tally->step_bits) * spill;
  105. tally->counts[i] -= adjust;
  106. tally->counts[i+1] += adjust;
  107. }
  108. update:
  109. tally->min = new_min;
  110. tally->max = new_max;
  111. }
  112. void tally_add(struct tally *tally, ssize_t val)
  113. {
  114. ssize_t new_min = tally->min, new_max = tally->max;
  115. bool need_renormalize = false;
  116. if (val < tally->min) {
  117. new_min = val;
  118. need_renormalize = true;
  119. }
  120. if (val > tally->max) {
  121. new_max = val;
  122. need_renormalize = true;
  123. }
  124. if (need_renormalize)
  125. renormalize(tally, new_min, new_max);
  126. /* 128-bit arithmetic! If we didn't want exact mean, we could just
  127. * pull it out of counts. */
  128. if (val > 0 && tally->total[0] + val < tally->total[0])
  129. tally->total[1]++;
  130. else if (val < 0 && tally->total[0] + val > tally->total[0])
  131. tally->total[1]--;
  132. tally->total[0] += val;
  133. tally->counts[bucket_of(tally->min, tally->step_bits, val)]++;
  134. }
  135. size_t tally_num(const struct tally *tally)
  136. {
  137. size_t i, num = 0;
  138. for (i = 0; i < tally->buckets; i++)
  139. num += tally->counts[i];
  140. return num;
  141. }
  142. ssize_t tally_min(const struct tally *tally)
  143. {
  144. return tally->min;
  145. }
  146. ssize_t tally_max(const struct tally *tally)
  147. {
  148. return tally->max;
  149. }
  150. /* FIXME: Own ccan module please! */
  151. static unsigned fls64(uint64_t val)
  152. {
  153. #if HAVE_BUILTIN_CLZL
  154. if (val <= ULONG_MAX) {
  155. /* This is significantly faster! */
  156. return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0;
  157. } else {
  158. #endif
  159. uint64_t r = 64;
  160. if (!val)
  161. return 0;
  162. if (!(val & 0xffffffff00000000ull)) {
  163. val <<= 32;
  164. r -= 32;
  165. }
  166. if (!(val & 0xffff000000000000ull)) {
  167. val <<= 16;
  168. r -= 16;
  169. }
  170. if (!(val & 0xff00000000000000ull)) {
  171. val <<= 8;
  172. r -= 8;
  173. }
  174. if (!(val & 0xf000000000000000ull)) {
  175. val <<= 4;
  176. r -= 4;
  177. }
  178. if (!(val & 0xc000000000000000ull)) {
  179. val <<= 2;
  180. r -= 2;
  181. }
  182. if (!(val & 0x8000000000000000ull)) {
  183. val <<= 1;
  184. r -= 1;
  185. }
  186. return r;
  187. #if HAVE_BUILTIN_CLZL
  188. }
  189. #endif
  190. }
  191. /* This is stolen straight from Hacker's Delight. */
  192. static uint64_t divlu64(uint64_t u1, uint64_t u0, uint64_t v)
  193. {
  194. const uint64_t b = 4294967296ULL; // Number base (32 bits).
  195. uint32_t un[4], // Dividend and divisor
  196. vn[2]; // normalized and broken
  197. // up into halfwords.
  198. uint32_t q[2]; // Quotient as halfwords.
  199. uint64_t un1, un0, // Dividend and divisor
  200. vn0; // as fullwords.
  201. uint64_t qhat; // Estimated quotient digit.
  202. uint64_t rhat; // A remainder.
  203. uint64_t p; // Product of two digits.
  204. int64_t s, i, j, t, k;
  205. if (u1 >= v) // If overflow, return the largest
  206. return (uint64_t)-1; // possible quotient.
  207. s = 64 - fls64(v); // 0 <= s <= 63.
  208. vn0 = v << s; // Normalize divisor.
  209. vn[1] = vn0 >> 32; // Break divisor up into
  210. vn[0] = vn0 & 0xFFFFFFFF; // two 32-bit halves.
  211. // Shift dividend left.
  212. un1 = ((u1 << s) | (u0 >> (64 - s))) & (-s >> 63);
  213. un0 = u0 << s;
  214. un[3] = un1 >> 32; // Break dividend up into
  215. un[2] = un1; // four 32-bit halfwords
  216. un[1] = un0 >> 32; // Note: storing into
  217. un[0] = un0; // halfwords truncates.
  218. for (j = 1; j >= 0; j--) {
  219. // Compute estimate qhat of q[j].
  220. qhat = (un[j+2]*b + un[j+1])/vn[1];
  221. rhat = (un[j+2]*b + un[j+1]) - qhat*vn[1];
  222. again:
  223. if (qhat >= b || qhat*vn[0] > b*rhat + un[j]) {
  224. qhat = qhat - 1;
  225. rhat = rhat + vn[1];
  226. if (rhat < b) goto again;
  227. }
  228. // Multiply and subtract.
  229. k = 0;
  230. for (i = 0; i < 2; i++) {
  231. p = qhat*vn[i];
  232. t = un[i+j] - k - (p & 0xFFFFFFFF);
  233. un[i+j] = t;
  234. k = (p >> 32) - (t >> 32);
  235. }
  236. t = un[j+2] - k;
  237. un[j+2] = t;
  238. q[j] = qhat; // Store quotient digit.
  239. if (t < 0) { // If we subtracted too
  240. q[j] = q[j] - 1; // much, add back.
  241. k = 0;
  242. for (i = 0; i < 2; i++) {
  243. t = un[i+j] + vn[i] + k;
  244. un[i+j] = t;
  245. k = t >> 32;
  246. }
  247. un[j+2] = un[j+2] + k;
  248. }
  249. } // End j.
  250. return q[1]*b + q[0];
  251. }
  252. static int64_t divls64(int64_t u1, uint64_t u0, int64_t v)
  253. {
  254. int64_t q, uneg, vneg, diff, borrow;
  255. uneg = u1 >> 63; // -1 if u < 0.
  256. if (uneg) { // Compute the absolute
  257. u0 = -u0; // value of the dividend u.
  258. borrow = (u0 != 0);
  259. u1 = -u1 - borrow;
  260. }
  261. vneg = v >> 63; // -1 if v < 0.
  262. v = (v ^ vneg) - vneg; // Absolute value of v.
  263. if ((uint64_t)u1 >= (uint64_t)v)
  264. goto overflow;
  265. q = divlu64(u1, u0, v);
  266. diff = uneg ^ vneg; // Negate q if signs of
  267. q = (q ^ diff) - diff; // u and v differed.
  268. if ((diff ^ q) < 0 && q != 0) { // If overflow, return the largest
  269. overflow: // possible neg. quotient.
  270. q = 0x8000000000000000ULL;
  271. }
  272. return q;
  273. }
  274. ssize_t tally_mean(const struct tally *tally)
  275. {
  276. size_t count = tally_num(tally);
  277. if (!count)
  278. return 0;
  279. if (sizeof(tally->total[0]) == sizeof(uint32_t)) {
  280. /* Use standard 64-bit arithmetic. */
  281. int64_t total = tally->total[0]
  282. | (((uint64_t)tally->total[1]) << 32);
  283. return total / count;
  284. }
  285. return divls64(tally->total[1], tally->total[0], count);
  286. }
  287. ssize_t tally_total(const struct tally *tally, ssize_t *overflow)
  288. {
  289. if (overflow) {
  290. *overflow = tally->total[1];
  291. return tally->total[0];
  292. }
  293. /* If result is negative, make sure we can represent it. */
  294. if (tally->total[1] & (1 << (SIZET_BITS-1))) {
  295. /* Must have only underflowed once, and must be able to
  296. * represent result at ssize_t. */
  297. if ((~tally->total[1])+1 != 0
  298. || (ssize_t)tally->total[0] >= 0) {
  299. /* Underflow, return minimum. */
  300. return (ssize_t)((size_t)1 << (SIZET_BITS - 1));
  301. }
  302. } else {
  303. /* Result is positive, must not have overflowed, and must be
  304. * able to represent as ssize_t. */
  305. if (tally->total[1] || (ssize_t)tally->total[0] < 0) {
  306. /* Overflow. Return maximum. */
  307. return (ssize_t)~((size_t)1 << (SIZET_BITS - 1));
  308. }
  309. }
  310. return tally->total[0];
  311. }
  312. static ssize_t bucket_range(const struct tally *tally, unsigned b, size_t *err)
  313. {
  314. ssize_t min, max;
  315. min = bucket_min(tally->min, tally->step_bits, b);
  316. if (b == tally->buckets - 1)
  317. max = tally->max;
  318. else
  319. max = bucket_min(tally->min, tally->step_bits, b+1) - 1;
  320. /* FIXME: Think harder about cumulative error; is this enough?. */
  321. *err = (max - min + 1) / 2;
  322. /* Avoid overflow. */
  323. return min + (max - min) / 2;
  324. }
  325. ssize_t tally_approx_median(const struct tally *tally, size_t *err)
  326. {
  327. size_t count = tally_num(tally), total = 0;
  328. unsigned int i;
  329. for (i = 0; i < tally->buckets; i++) {
  330. total += tally->counts[i];
  331. if (total * 2 >= count)
  332. break;
  333. }
  334. return bucket_range(tally, i, err);
  335. }
  336. ssize_t tally_approx_mode(const struct tally *tally, size_t *err)
  337. {
  338. unsigned int i, min_best = 0, max_best = 0;
  339. for (i = 0; i < tally->buckets; i++) {
  340. if (tally->counts[i] > tally->counts[min_best]) {
  341. min_best = max_best = i;
  342. } else if (tally->counts[i] == tally->counts[min_best]) {
  343. max_best = i;
  344. }
  345. }
  346. /* We can have more than one best, making our error huge. */
  347. if (min_best != max_best) {
  348. ssize_t min, max;
  349. min = bucket_range(tally, min_best, err);
  350. max = bucket_range(tally, max_best, err);
  351. max += *err;
  352. *err += (size_t)(max - min);
  353. return min + (max - min) / 2;
  354. }
  355. return bucket_range(tally, min_best, err);
  356. }
  357. static unsigned get_max_bucket(const struct tally *tally)
  358. {
  359. unsigned int i;
  360. for (i = tally->buckets; i > 0; i--)
  361. if (tally->counts[i-1])
  362. break;
  363. return i;
  364. }
  365. char *tally_histogram(const struct tally *tally,
  366. unsigned width, unsigned height)
  367. {
  368. unsigned int i, count, max_bucket, largest_bucket;
  369. struct tally *tmp;
  370. char *graph, *p;
  371. assert(width >= TALLY_MIN_HISTO_WIDTH);
  372. assert(height >= TALLY_MIN_HISTO_HEIGHT);
  373. /* Ignore unused buckets. */
  374. max_bucket = get_max_bucket(tally);
  375. /* FIXME: It'd be nice to smooth here... */
  376. if (height >= max_bucket) {
  377. height = max_bucket;
  378. tmp = NULL;
  379. } else {
  380. /* We create a temporary then renormalize so < height. */
  381. /* FIXME: Antialias properly! */
  382. tmp = tally_new(tally->buckets-1);
  383. if (!tmp)
  384. return NULL;
  385. tmp->min = tally->min;
  386. tmp->max = tally->max;
  387. tmp->step_bits = tally->step_bits;
  388. memcpy(tmp->counts, tally->counts,
  389. sizeof(tally->counts[0]) * tmp->buckets);
  390. while ((max_bucket = get_max_bucket(tmp)) >= height)
  391. renormalize(tmp, tmp->min, tmp->max *= 2);
  392. /* Restore max */
  393. tmp->max = tally->max;
  394. tally = tmp;
  395. height = max_bucket;
  396. }
  397. /* Figure out longest line, for scale. */
  398. largest_bucket = 0;
  399. for (i = 0; i < tally->buckets; i++) {
  400. if (tally->counts[i] > largest_bucket)
  401. largest_bucket = tally->counts[i];
  402. }
  403. p = graph = malloc(height * (width + 1) + 1);
  404. if (!graph) {
  405. free(tmp);
  406. return NULL;
  407. }
  408. for (i = 0; i < height; i++) {
  409. unsigned covered = 0;
  410. count = (double)tally->counts[i] / largest_bucket * width;
  411. if (i == 0)
  412. covered = snprintf(p, width, "%zi", tally->min);
  413. else if (i == height - 1)
  414. covered = snprintf(p, width, "%zi", tally->max);
  415. if (covered) {
  416. if (covered > width)
  417. covered = width;
  418. p += covered;
  419. if (count > covered)
  420. count -= covered;
  421. else
  422. count = 0;
  423. }
  424. memset(p, '*', count);
  425. p += count;
  426. *p = '\n';
  427. p++;
  428. }
  429. *p = '\0';
  430. free(tmp);
  431. return graph;
  432. }