alloc.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739
  1. #include <unistd.h>
  2. #include <stdint.h>
  3. #include <string.h>
  4. #include <limits.h>
  5. #include <assert.h>
  6. #include "alloc.h"
  7. #include "build_assert/build_assert.h"
  8. #include "config.h"
  9. #if HAVE_ALIGNOF
  10. #define ALIGNOF(t) __alignof__(t)
  11. #else
  12. /* Alignment by measuring structure padding. */
  13. #define ALIGNOF(t) (sizeof(struct { char c; t _h; }) - 1 - sizeof(t))
  14. #endif
  15. /* FIXME: Doesn't handle non-page-aligned poolsize. */
  16. /* FIXME: Reduce. */
  17. #define MIN_SIZE (getpagesize() * 2)
  18. /* What's the granularity of sub-page allocs? */
  19. #define BITMAP_GRANULARITY 4
  20. /* File layout:
  21. *
  22. * file := pagestates pad metadata
  23. * pagestates := pages * 2-bits-per-page
  24. * pad := pad to next ALIGNOF(metadata)
  25. *
  26. * metadata := metalen next-ptr metabits
  27. * metabits := freeblock | bitblock
  28. * freeblock := 0+
  29. * bitblock := 2-bits-per-bit-in-page 1
  30. */
  31. struct metaheader
  32. {
  33. /* Length (after this header). (FIXME: implied by page bits!). */
  34. unsigned long metalen;
  35. /* Next meta header, or 0 */
  36. unsigned long next;
  37. /* Bits start here. */
  38. };
  39. /* Assumes a is a power of two. */
  40. static unsigned long align_up(unsigned long x, unsigned long a)
  41. {
  42. return (x + a - 1) & ~(a - 1);
  43. }
  44. static unsigned long div_up(unsigned long x, unsigned long a)
  45. {
  46. return (x + a - 1) / a;
  47. }
  48. /* It turns out that we spend a lot of time dealing with bit pairs.
  49. * These routines manipulate them.
  50. */
  51. static uint8_t get_bit_pair(const uint8_t *bits, unsigned long index)
  52. {
  53. return bits[index * 2 / CHAR_BIT] >> (index * 2 % CHAR_BIT) & 3;
  54. }
  55. static void set_bit_pair(uint8_t *bits, unsigned long index, uint8_t val)
  56. {
  57. bits[index * 2 / CHAR_BIT] &= ~(3 << (index * 2 % CHAR_BIT));
  58. bits[index * 2 / CHAR_BIT] |= (val << (index * 2 % CHAR_BIT));
  59. }
  60. /* This is used for page states and subpage allocations */
  61. enum alloc_state
  62. {
  63. FREE,
  64. TAKEN,
  65. TAKEN_START,
  66. SPECIAL, /* Sub-page allocation for page states. */
  67. };
  68. /* The types for subpage metadata. */
  69. enum sub_metadata_type
  70. {
  71. /* FREE is same as alloc state */
  72. BITMAP = 1,
  73. };
  74. /* Page states are represented by bitpairs, at the start of the pool. */
  75. #define BITS_PER_PAGE 2
  76. static enum alloc_state get_page_state(const void *pool, unsigned long page)
  77. {
  78. return get_bit_pair(pool, page);
  79. }
  80. static void set_page_state(void *pool, unsigned long page, enum alloc_state s)
  81. {
  82. set_bit_pair(pool, page, s);
  83. }
  84. /* The offset of metadata for a subpage allocation is found at the end
  85. * of the subpage */
  86. #define SUBPAGE_METAOFF (getpagesize() - sizeof(unsigned long))
  87. /* This is the length of metadata in bits. It consists of two bits
  88. * for every BITMAP_GRANULARITY of usable bytes in the page, then two
  89. * bits for the tailer.. */
  90. #define BITMAP_METABITLEN \
  91. ((div_up(SUBPAGE_METAOFF, BITMAP_GRANULARITY) + 1) * BITS_PER_PAGE)
  92. /* This is the length in bytes. */
  93. #define BITMAP_METALEN (div_up(BITMAP_METABITLEN, CHAR_BIT))
  94. static struct metaheader *first_mheader(void *pool, unsigned long poolsize)
  95. {
  96. unsigned int pagestatelen;
  97. pagestatelen = align_up(div_up(poolsize/getpagesize() * BITS_PER_PAGE,
  98. CHAR_BIT),
  99. ALIGNOF(struct metaheader));
  100. return (struct metaheader *)((char *)pool + pagestatelen);
  101. }
  102. static struct metaheader *next_mheader(void *pool, struct metaheader *mh)
  103. {
  104. if (!mh->next)
  105. return NULL;
  106. return (struct metaheader *)((char *)pool + mh->next);
  107. }
  108. static unsigned long pool_offset(void *pool, void *p)
  109. {
  110. return (char *)p - (char *)pool;
  111. }
  112. void alloc_init(void *pool, unsigned long poolsize)
  113. {
  114. /* FIXME: Alignment assumptions about pool. */
  115. unsigned long len, i;
  116. struct metaheader *mh;
  117. if (poolsize < MIN_SIZE)
  118. return;
  119. mh = first_mheader(pool, poolsize);
  120. /* Mark all page states FREE, and all of metaheader bitmap which takes
  121. * rest of first page. */
  122. len = align_up(pool_offset(pool, mh + 1), getpagesize());
  123. BUILD_ASSERT(FREE == 0);
  124. memset(pool, 0, len);
  125. /* Set up metalen */
  126. mh->metalen = len - pool_offset(pool, mh + 1);
  127. /* Mark the pagestate and metadata page(s) allocated. */
  128. set_page_state(pool, 0, TAKEN_START);
  129. for (i = 1; i < div_up(len, getpagesize()); i++)
  130. set_page_state(pool, i, TAKEN);
  131. }
  132. /* Two bits per element, representing page states. Returns 0 on fail.
  133. * off is used to allocate from subpage bitmaps, which use the first 2
  134. * bits as the type, so the real bitmap is offset by 1. */
  135. static unsigned long alloc_from_bitmap(uint8_t *bits, unsigned long off,
  136. unsigned long elems,
  137. unsigned long want, unsigned long align)
  138. {
  139. long i;
  140. unsigned long free;
  141. free = 0;
  142. /* We allocate from far end, to increase ability to expand metadata. */
  143. for (i = elems - 1; i >= 0; i--) {
  144. switch (get_bit_pair(bits, off+i)) {
  145. case FREE:
  146. if (++free >= want) {
  147. unsigned long j;
  148. /* They might ask for large alignment. */
  149. if (align && i % align)
  150. continue;
  151. set_bit_pair(bits, off+i, TAKEN_START);
  152. for (j = i+1; j < i + want; j++)
  153. set_bit_pair(bits, off+j, TAKEN);
  154. return off+i;
  155. }
  156. break;
  157. case SPECIAL:
  158. case TAKEN_START:
  159. case TAKEN:
  160. free = 0;
  161. break;
  162. }
  163. }
  164. return 0;
  165. }
  166. static unsigned long alloc_get_pages(void *pool, unsigned long poolsize,
  167. unsigned long pages, unsigned long align)
  168. {
  169. return alloc_from_bitmap(pool, 0, poolsize / getpagesize(), pages,
  170. align / getpagesize());
  171. }
  172. /* Offset to metadata is at end of page. */
  173. static unsigned long *metadata_off(void *pool, unsigned long page)
  174. {
  175. return (unsigned long *)
  176. ((char *)pool + (page+1)*getpagesize() - sizeof(unsigned long));
  177. }
  178. static uint8_t *get_page_metadata(void *pool, unsigned long page)
  179. {
  180. return (uint8_t *)pool + *metadata_off(pool, page);
  181. }
  182. static void set_page_metadata(void *pool, unsigned long page, uint8_t *meta)
  183. {
  184. *metadata_off(pool, page) = meta - (uint8_t *)pool;
  185. }
  186. static unsigned long sub_page_alloc(void *pool, unsigned long page,
  187. unsigned long size, unsigned long align)
  188. {
  189. uint8_t *bits = get_page_metadata(pool, page);
  190. unsigned long i;
  191. /* TAKEN at start means a bitwise alloc. */
  192. assert(get_bit_pair(bits, 0) == BITMAP);
  193. /* We use a standart bitmap, but offset because of that BITMAP
  194. * header. */
  195. i = alloc_from_bitmap(bits, 1, SUBPAGE_METAOFF/BITMAP_GRANULARITY,
  196. div_up(size, BITMAP_GRANULARITY),
  197. align / BITMAP_GRANULARITY);
  198. /* Can't allocate? */
  199. if (i == 0)
  200. return 0;
  201. /* i-1 because of the header. */
  202. return page*getpagesize() + (i-1)*BITMAP_GRANULARITY;
  203. }
  204. static uint8_t *alloc_metaspace(struct metaheader *mh, unsigned long bytes,
  205. enum sub_metadata_type type)
  206. {
  207. uint8_t *meta = (uint8_t *)(mh + 1);
  208. unsigned long free = 0, len;
  209. unsigned long i;
  210. /* TAKEN tags end a subpage alloc. */
  211. for (i = 0; i < mh->metalen * CHAR_BIT / BITS_PER_PAGE; i += len) {
  212. switch (get_bit_pair(meta, i)) {
  213. case FREE:
  214. len = 1;
  215. free++;
  216. if (free == bytes * CHAR_BIT / BITS_PER_PAGE) {
  217. /* Mark this as a bitmap. */
  218. set_bit_pair(meta, i - free + 1, type);
  219. return meta + (i - free + 1)
  220. / (CHAR_BIT / BITS_PER_PAGE);
  221. }
  222. break;
  223. case BITMAP:
  224. /* Skip over this allocated part. */
  225. len = BITMAP_METALEN * CHAR_BIT / BITS_PER_PAGE;
  226. free = 0;
  227. break;
  228. default:
  229. assert(0);
  230. return NULL;
  231. }
  232. }
  233. return NULL;
  234. }
  235. /* We need this many bytes of metadata. */
  236. static uint8_t *new_metadata(void *pool, unsigned long poolsize,
  237. unsigned long bytes, enum sub_metadata_type type)
  238. {
  239. struct metaheader *mh, *newmh;
  240. unsigned long page;
  241. for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
  242. uint8_t *meta = alloc_metaspace(mh, bytes, type);
  243. if (meta)
  244. return meta;
  245. }
  246. /* No room for metadata? Can we expand an existing one? */
  247. for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
  248. /* It should end on a page boundary. */
  249. unsigned long nextpage;
  250. nextpage = pool_offset(pool, (char *)(mh + 1) + mh->metalen);
  251. assert(nextpage % getpagesize() == 0);
  252. /* Now, can we grab that page? */
  253. if (get_page_state(pool, nextpage / getpagesize()) != FREE)
  254. continue;
  255. /* OK, expand metadata, do it again. */
  256. set_page_state(pool, nextpage / getpagesize(), TAKEN);
  257. BUILD_ASSERT(FREE == 0);
  258. memset((char *)pool + nextpage, 0, getpagesize());
  259. mh->metalen += getpagesize();
  260. return alloc_metaspace(mh, bytes, type);
  261. }
  262. /* No metadata left at all? */
  263. page = alloc_get_pages(pool, poolsize, div_up(bytes, getpagesize()), 1);
  264. if (!page)
  265. return NULL;
  266. newmh = (struct metaheader *)((char *)pool + page * getpagesize());
  267. newmh->metalen = getpagesize() - sizeof(*mh);
  268. BUILD_ASSERT(FREE == 0);
  269. memset(newmh + 1, 0, newmh->metalen);
  270. /* Sew it into linked list */
  271. mh = first_mheader(pool,poolsize);
  272. newmh->next = mh->next;
  273. mh->next = pool_offset(pool, newmh);
  274. return alloc_metaspace(newmh, bytes, type);
  275. }
  276. static void alloc_free_pages(void *pool, unsigned long pagenum)
  277. {
  278. assert(get_page_state(pool, pagenum) == TAKEN_START);
  279. set_page_state(pool, pagenum, FREE);
  280. while (get_page_state(pool, ++pagenum) == TAKEN)
  281. set_page_state(pool, pagenum, FREE);
  282. }
  283. static unsigned long alloc_sub_page(void *pool, unsigned long poolsize,
  284. unsigned long size, unsigned long align)
  285. {
  286. unsigned long i;
  287. uint8_t *metadata;
  288. /* Look for partial page. */
  289. for (i = 0; i < poolsize / getpagesize(); i++) {
  290. unsigned long ret;
  291. if (get_page_state(pool, i) != SPECIAL)
  292. continue;
  293. ret = sub_page_alloc(pool, i, size, align);
  294. if (ret)
  295. return ret;
  296. }
  297. /* Create new SUBPAGE page. */
  298. i = alloc_get_pages(pool, poolsize, 1, 1);
  299. if (i == 0)
  300. return 0;
  301. /* Get metadata for page. */
  302. metadata = new_metadata(pool, poolsize, BITMAP_METALEN, BITMAP);
  303. if (!metadata) {
  304. alloc_free_pages(pool, i);
  305. return 0;
  306. }
  307. /* Actually, this is a subpage page now. */
  308. set_page_state(pool, i, SPECIAL);
  309. /* Set metadata pointer for page. */
  310. set_page_metadata(pool, i, metadata);
  311. /* Do allocation like normal */
  312. return sub_page_alloc(pool, i, size, align);
  313. }
  314. /* Returns true if we cleaned any pages. */
  315. static bool clean_empty_subpages(void *pool, unsigned long poolsize)
  316. {
  317. unsigned long i;
  318. bool progress = false;
  319. for (i = 0; i < poolsize/getpagesize(); i++) {
  320. uint8_t *meta;
  321. unsigned int j;
  322. if (get_page_state(pool, i) != SPECIAL)
  323. continue;
  324. meta = get_page_metadata(pool, i);
  325. /* Skip the header (first bit of metadata). */
  326. for (j = 1; j < SUBPAGE_METAOFF/BITMAP_GRANULARITY+1; j++)
  327. if (get_bit_pair(meta, j) != FREE)
  328. break;
  329. /* So, is this page totally empty? */
  330. if (j == SUBPAGE_METAOFF/BITMAP_GRANULARITY+1) {
  331. set_page_state(pool, i, FREE);
  332. progress = true;
  333. }
  334. }
  335. return progress;
  336. }
  337. /* Returns true if we cleaned any pages. */
  338. static bool clean_metadata(void *pool, unsigned long poolsize)
  339. {
  340. struct metaheader *mh, *prev_mh = NULL;
  341. bool progress = false;
  342. for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
  343. uint8_t *meta;
  344. long i;
  345. meta = (uint8_t *)(mh + 1);
  346. BUILD_ASSERT(FREE == 0);
  347. for (i = mh->metalen - 1; i > 0; i--)
  348. if (meta[i] != 0)
  349. break;
  350. /* Completely empty? */
  351. if (prev_mh && i == mh->metalen) {
  352. alloc_free_pages(pool,
  353. pool_offset(pool, mh)/getpagesize());
  354. prev_mh->next = mh->next;
  355. mh = prev_mh;
  356. progress = true;
  357. } else {
  358. uint8_t *p;
  359. /* Some pages at end are free? */
  360. for (p = (uint8_t *)(mh+1)+mh->metalen - getpagesize();
  361. p > meta + i;
  362. p -= getpagesize()) {
  363. set_page_state(pool,
  364. pool_offset(pool, p)
  365. / getpagesize(),
  366. FREE);
  367. progress = true;
  368. }
  369. }
  370. }
  371. return progress;
  372. }
  373. void *alloc_get(void *pool, unsigned long poolsize,
  374. unsigned long size, unsigned long align)
  375. {
  376. bool subpage_clean = false, metadata_clean = false;
  377. unsigned long ret;
  378. if (poolsize < MIN_SIZE)
  379. return NULL;
  380. again:
  381. /* Sub-page allocations have an overhead of ~12%. */
  382. if (size + size/8 >= getpagesize() || align >= getpagesize()) {
  383. unsigned long pages = div_up(size, getpagesize());
  384. ret = alloc_get_pages(pool, poolsize, pages, align)
  385. * getpagesize();
  386. } else
  387. ret = alloc_sub_page(pool, poolsize, size, align);
  388. if (ret != 0)
  389. return (char *)pool + ret;
  390. /* Allocation failed: garbage collection. */
  391. if (!subpage_clean) {
  392. subpage_clean = true;
  393. if (clean_empty_subpages(pool, poolsize))
  394. goto again;
  395. }
  396. if (!metadata_clean) {
  397. metadata_clean = true;
  398. if (clean_metadata(pool, poolsize))
  399. goto again;
  400. }
  401. /* FIXME: Compact metadata? */
  402. return NULL;
  403. }
  404. static void subpage_free(void *pool, unsigned long pagenum, void *free)
  405. {
  406. unsigned long off = (unsigned long)free % getpagesize();
  407. uint8_t *metadata;
  408. assert(off < SUBPAGE_METAOFF);
  409. assert(off % BITMAP_GRANULARITY == 0);
  410. metadata = get_page_metadata(pool, pagenum);
  411. off /= BITMAP_GRANULARITY;
  412. /* Offset by one because first bit is used for header. */
  413. off++;
  414. set_bit_pair(metadata, off++, FREE);
  415. while (off < SUBPAGE_METAOFF / BITMAP_GRANULARITY
  416. && get_bit_pair(metadata, off) == TAKEN)
  417. set_bit_pair(metadata, off++, FREE);
  418. }
  419. void alloc_free(void *pool, unsigned long poolsize, void *free)
  420. {
  421. unsigned long pagenum;
  422. struct metaheader *mh;
  423. if (!free)
  424. return;
  425. assert(poolsize >= MIN_SIZE);
  426. mh = first_mheader(pool, poolsize);
  427. assert((char *)free >= (char *)(mh + 1) + mh->metalen);
  428. assert((char *)pool + poolsize > (char *)free);
  429. pagenum = pool_offset(pool, free) / getpagesize();
  430. if (get_page_state(pool, pagenum) == SPECIAL)
  431. subpage_free(pool, pagenum, free);
  432. else {
  433. assert((unsigned long)free % getpagesize() == 0);
  434. alloc_free_pages(pool, pagenum);
  435. }
  436. }
  437. static bool is_metadata_page(void *pool, unsigned long poolsize,
  438. unsigned long page)
  439. {
  440. struct metaheader *mh;
  441. for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
  442. unsigned long start, end;
  443. start = pool_offset(pool, mh);
  444. end = pool_offset(pool, (char *)(mh+1) + mh->metalen);
  445. if (page >= start/getpagesize() && page < end/getpagesize())
  446. return true;
  447. }
  448. return false;
  449. }
  450. static bool check_subpage(void *pool, unsigned long poolsize,
  451. unsigned long page)
  452. {
  453. unsigned long *mhoff = metadata_off(pool, page);
  454. unsigned int i;
  455. enum alloc_state last_state = FREE;
  456. if (*mhoff + sizeof(struct metaheader) > poolsize)
  457. return false;
  458. if (*mhoff % ALIGNOF(struct metaheader) != 0)
  459. return false;
  460. /* It must point to a metadata page. */
  461. if (!is_metadata_page(pool, poolsize, *mhoff / getpagesize()))
  462. return false;
  463. /* Header at start of subpage allocation */
  464. if (get_bit_pair((uint8_t *)pool + *mhoff, 0) != BITMAP)
  465. return false;
  466. for (i = 0; i < SUBPAGE_METAOFF / BITMAP_GRANULARITY; i++) {
  467. enum alloc_state state;
  468. /* +1 because header is the first byte. */
  469. state = get_bit_pair((uint8_t *)pool + *mhoff, i+1);
  470. switch (state) {
  471. case SPECIAL:
  472. return false;
  473. case TAKEN:
  474. if (last_state == FREE)
  475. return false;
  476. break;
  477. default:
  478. break;
  479. }
  480. last_state = state;
  481. }
  482. return true;
  483. }
  484. bool alloc_check(void *pool, unsigned long poolsize)
  485. {
  486. unsigned long i;
  487. struct metaheader *mh;
  488. enum alloc_state last_state = FREE;
  489. bool was_metadata = false;
  490. if (poolsize < MIN_SIZE)
  491. return true;
  492. if (get_page_state(pool, 0) != TAKEN_START)
  493. return false;
  494. /* First check metadata pages. */
  495. /* Metadata pages will be marked TAKEN. */
  496. for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
  497. unsigned long start, end;
  498. start = pool_offset(pool, mh);
  499. if (start + sizeof(*mh) > poolsize)
  500. return false;
  501. end = pool_offset(pool, (char *)(mh+1) + mh->metalen);
  502. if (end > poolsize)
  503. return false;
  504. /* Non-first pages should start on a page boundary. */
  505. if (mh != first_mheader(pool, poolsize)
  506. && start % getpagesize() != 0)
  507. return false;
  508. /* It should end on a page boundary. */
  509. if (end % getpagesize() != 0)
  510. return false;
  511. }
  512. for (i = 0; i < poolsize / getpagesize(); i++) {
  513. enum alloc_state state = get_page_state(pool, i);
  514. bool is_metadata = is_metadata_page(pool, poolsize,i);
  515. switch (state) {
  516. case FREE:
  517. /* metadata pages are never free. */
  518. if (is_metadata)
  519. return false;
  520. case TAKEN_START:
  521. break;
  522. case TAKEN:
  523. /* This should continue a previous block. */
  524. if (last_state == FREE)
  525. return false;
  526. if (is_metadata != was_metadata)
  527. return false;
  528. break;
  529. case SPECIAL:
  530. /* Check metadata pointer etc. */
  531. if (!check_subpage(pool, poolsize, i))
  532. return false;
  533. }
  534. last_state = state;
  535. was_metadata = is_metadata;
  536. }
  537. return true;
  538. }
  539. void alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
  540. {
  541. struct metaheader *mh;
  542. unsigned long pagebitlen, metadata_pages, count[1<<BITS_PER_PAGE], tot;
  543. long i;
  544. if (poolsize < MIN_SIZE) {
  545. fprintf(out, "Pool smaller than %u: no content\n", MIN_SIZE);
  546. return;
  547. }
  548. memset(count, 0, sizeof(count));
  549. for (i = 0; i < poolsize / getpagesize(); i++)
  550. count[get_page_state(pool, i)]++;
  551. mh = first_mheader(pool, poolsize);
  552. pagebitlen = (char *)mh - (char *)pool;
  553. fprintf(out, "%lu bytes of page bits: FREE/TAKEN/TAKEN_START/SUBPAGE = %lu/%lu/%lu/%lu\n",
  554. pagebitlen, count[0], count[1], count[2], count[3]);
  555. /* One metadata page for every page of page bits. */
  556. metadata_pages = div_up(pagebitlen, getpagesize());
  557. /* Now do each metadata page. */
  558. for (; mh; mh = next_mheader(pool,mh)) {
  559. unsigned long free = 0, subpageblocks = 0, len = 0;
  560. uint8_t *meta = (uint8_t *)(mh + 1);
  561. metadata_pages += (sizeof(*mh) + mh->metalen) / getpagesize();
  562. for (i = 0;
  563. i < mh->metalen * CHAR_BIT / BITS_PER_PAGE;
  564. i += len) {
  565. switch (get_page_state(meta, i)) {
  566. case FREE:
  567. len = 1;
  568. free++;
  569. break;
  570. case BITMAP:
  571. /* Skip over this allocated part. */
  572. len = BITMAP_METALEN * CHAR_BIT;
  573. subpageblocks++;
  574. break;
  575. default:
  576. assert(0);
  577. }
  578. }
  579. fprintf(out, "Metadata %lu-%lu: %lu free, %lu subpageblocks, %lu%% density\n",
  580. pool_offset(pool, mh),
  581. pool_offset(pool, (char *)(mh+1) + mh->metalen),
  582. free, subpageblocks,
  583. subpageblocks * BITMAP_METALEN * 100
  584. / (free + subpageblocks * BITMAP_METALEN));
  585. }
  586. /* Account for total pages allocated. */
  587. tot = (count[1] + count[2] - metadata_pages) * getpagesize();
  588. fprintf(out, "Total metadata bytes = %lu\n",
  589. metadata_pages * getpagesize());
  590. /* Now do every subpage. */
  591. for (i = 0; i < poolsize / getpagesize(); i++) {
  592. uint8_t *meta;
  593. unsigned int j;
  594. if (get_page_state(pool, i) != SPECIAL)
  595. continue;
  596. memset(count, 0, sizeof(count));
  597. meta = get_page_metadata(pool, i);
  598. for (j = 0; j < SUBPAGE_METAOFF/BITMAP_GRANULARITY; j++)
  599. count[get_page_state(meta, j)]++;
  600. fprintf(out, "Subpage %lu: "
  601. "FREE/TAKEN/TAKEN_START = %lu/%lu/%lu %lu%% density\n",
  602. i, count[0], count[1], count[2],
  603. ((count[1] + count[2]) * BITMAP_GRANULARITY) * 100
  604. / getpagesize());
  605. tot += (count[1] + count[2]) * BITMAP_GRANULARITY;
  606. }
  607. /* This is optimistic, since we overalloc in several cases. */
  608. fprintf(out, "Best possible allocation density = %lu%%\n",
  609. tot * 100 / poolsize);
  610. }