alloc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759
  1. #include <unistd.h>
  2. #include <stdint.h>
  3. #include <string.h>
  4. #include <limits.h>
  5. #include <assert.h>
  6. #include "alloc.h"
  7. #include "build_assert/build_assert.h"
  8. #include "config.h"
  9. #if HAVE_ALIGNOF
  10. #define ALIGNOF(t) __alignof__(t)
  11. #else
  12. /* Alignment by measuring structure padding. */
  13. #define ALIGNOF(t) (sizeof(struct { char c; t _h; }) - 1 - sizeof(t))
  14. #endif
  15. /* FIXME: Doesn't handle non-page-aligned poolsize. */
  16. /* FIXME: Reduce. */
  17. #define MIN_SIZE (getpagesize() * 2)
  18. /* What's the granularity of sub-page allocs? */
  19. #define BITMAP_GRANULARITY 4
  20. /* File layout:
  21. *
  22. * file := pagestates pad metadata
  23. * pagestates := pages * 2-bits-per-page
  24. * pad := pad to next ALIGNOF(metadata)
  25. *
  26. * metadata := metalen next-ptr metabits
  27. * metabits := freeblock | bitblock
  28. * freeblock := 0+
  29. * bitblock := 2-bits-per-bit-in-page 1
  30. */
  31. struct metaheader
  32. {
  33. /* Length (after this header). (FIXME: implied by page bits!). */
  34. unsigned long metalen;
  35. /* Next meta header, or 0 */
  36. unsigned long next;
  37. /* Bits start here. */
  38. };
  39. /* Assumes a is a power of two. */
  40. static unsigned long align_up(unsigned long x, unsigned long a)
  41. {
  42. return (x + a - 1) & ~(a - 1);
  43. }
  44. static unsigned long div_up(unsigned long x, unsigned long a)
  45. {
  46. return (x + a - 1) / a;
  47. }
  48. /* It turns out that we spend a lot of time dealing with bit pairs.
  49. * These routines manipulate them.
  50. */
  51. static uint8_t get_bit_pair(const uint8_t *bits, unsigned long index)
  52. {
  53. return bits[index * 2 / CHAR_BIT] >> (index * 2 % CHAR_BIT) & 3;
  54. }
  55. static void set_bit_pair(uint8_t *bits, unsigned long index, uint8_t val)
  56. {
  57. bits[index * 2 / CHAR_BIT] &= ~(3 << (index * 2 % CHAR_BIT));
  58. bits[index * 2 / CHAR_BIT] |= (val << (index * 2 % CHAR_BIT));
  59. }
  60. /* This is used for page states and subpage allocations */
  61. enum alloc_state
  62. {
  63. FREE,
  64. TAKEN,
  65. TAKEN_START,
  66. SPECIAL, /* Sub-page allocation for page states. */
  67. };
  68. /* The types for subpage metadata. */
  69. enum sub_metadata_type
  70. {
  71. /* FREE is same as alloc state */
  72. BITMAP = 1,
  73. };
  74. /* Page states are represented by bitpairs, at the start of the pool. */
  75. #define BITS_PER_PAGE 2
  76. static enum alloc_state get_page_state(const void *pool, unsigned long page)
  77. {
  78. return get_bit_pair(pool, page);
  79. }
  80. static void set_page_state(void *pool, unsigned long page, enum alloc_state s)
  81. {
  82. set_bit_pair(pool, page, s);
  83. }
  84. /* The offset of metadata for a subpage allocation is found at the end
  85. * of the subpage */
  86. #define SUBPAGE_METAOFF (getpagesize() - sizeof(unsigned long))
  87. /* This is the length of metadata in bits. It consists of two bits
  88. * for every BITMAP_GRANULARITY of usable bytes in the page, then two
  89. * bits for the tailer.. */
  90. #define BITMAP_METABITLEN \
  91. ((div_up(SUBPAGE_METAOFF, BITMAP_GRANULARITY) + 1) * BITS_PER_PAGE)
  92. /* This is the length in bytes. */
  93. #define BITMAP_METALEN (div_up(BITMAP_METABITLEN, CHAR_BIT))
  94. static struct metaheader *first_mheader(void *pool, unsigned long poolsize)
  95. {
  96. unsigned int pagestatelen;
  97. pagestatelen = align_up(div_up(poolsize/getpagesize() * BITS_PER_PAGE,
  98. CHAR_BIT),
  99. ALIGNOF(struct metaheader));
  100. return (struct metaheader *)((char *)pool + pagestatelen);
  101. }
  102. static struct metaheader *next_mheader(void *pool, struct metaheader *mh)
  103. {
  104. if (!mh->next)
  105. return NULL;
  106. return (struct metaheader *)((char *)pool + mh->next);
  107. }
  108. static unsigned long pool_offset(void *pool, void *p)
  109. {
  110. return (char *)p - (char *)pool;
  111. }
  112. void alloc_init(void *pool, unsigned long poolsize)
  113. {
  114. /* FIXME: Alignment assumptions about pool. */
  115. unsigned long len, i;
  116. struct metaheader *mh;
  117. if (poolsize < MIN_SIZE)
  118. return;
  119. mh = first_mheader(pool, poolsize);
  120. /* len covers all page states, plus the metaheader. */
  121. len = (char *)(mh + 1) - (char *)pool;
  122. /* Mark all page states FREE */
  123. BUILD_ASSERT(FREE == 0);
  124. memset(pool, 0, len);
  125. /* metaheader len takes us up to next page boundary. */
  126. mh->metalen = align_up(len, getpagesize()) - len;
  127. /* Mark the pagestate and metadata page(s) allocated. */
  128. set_page_state(pool, 0, TAKEN_START);
  129. for (i = 1; i < div_up(len, getpagesize()); i++)
  130. set_page_state(pool, i, TAKEN);
  131. }
  132. /* Two bits per element, representing page states. Returns -1 on fail. */
  133. static long alloc_from_bitmap(uint8_t *bits, unsigned long elems,
  134. unsigned long want, unsigned long align)
  135. {
  136. long i;
  137. unsigned long free;
  138. free = 0;
  139. /* We allocate from far end, to increase ability to expand metadata. */
  140. for (i = elems - 1; i >= 0; i--) {
  141. switch (get_bit_pair(bits, i)) {
  142. case FREE:
  143. if (++free >= want) {
  144. unsigned long j;
  145. /* They might ask for large alignment. */
  146. if (align && i % align)
  147. continue;
  148. for (j = i+1; j < i + want; j++)
  149. set_bit_pair(bits, j, TAKEN);
  150. set_bit_pair(bits, i, TAKEN_START);
  151. return i;
  152. }
  153. break;
  154. case SPECIAL:
  155. case TAKEN_START:
  156. case TAKEN:
  157. free = 0;
  158. break;
  159. }
  160. }
  161. return -1;
  162. }
  163. static unsigned long alloc_get_pages(void *pool, unsigned long poolsize,
  164. unsigned long pages, unsigned long align)
  165. {
  166. long i;
  167. unsigned long free;
  168. free = 0;
  169. /* We allocate from far end, to increase ability to expand metadata. */
  170. for (i = poolsize / getpagesize() - 1; i >= 0; i--) {
  171. switch (get_page_state(pool, i)) {
  172. case FREE:
  173. if (++free >= pages) {
  174. unsigned long j, addr;
  175. addr = (unsigned long)pool + i * getpagesize();
  176. /* They might ask for multi-page alignment. */
  177. if (addr % align)
  178. continue;
  179. for (j = i+1; j < i + pages; j++)
  180. set_page_state(pool, j, TAKEN);
  181. set_page_state(pool, i, TAKEN_START);
  182. return i;
  183. }
  184. break;
  185. case SPECIAL:
  186. case TAKEN_START:
  187. case TAKEN:
  188. free = 0;
  189. break;
  190. }
  191. }
  192. return 0;
  193. }
  194. /* Offset to metadata is at end of page. */
  195. static unsigned long *metadata_off(void *pool, unsigned long page)
  196. {
  197. return (unsigned long *)
  198. ((char *)pool + (page+1)*getpagesize() - sizeof(unsigned long));
  199. }
  200. static uint8_t *get_page_metadata(void *pool, unsigned long page)
  201. {
  202. return (uint8_t *)pool + *metadata_off(pool, page);
  203. }
  204. static void set_page_metadata(void *pool, unsigned long page, uint8_t *meta)
  205. {
  206. *metadata_off(pool, page) = meta - (uint8_t *)pool;
  207. }
  208. static unsigned long sub_page_alloc(void *pool, unsigned long page,
  209. unsigned long size, unsigned long align)
  210. {
  211. uint8_t *bits = get_page_metadata(pool, page);
  212. long i;
  213. /* TAKEN at end means a bitwise alloc. */
  214. assert(get_bit_pair(bits, getpagesize()/BITMAP_GRANULARITY-1) == TAKEN);
  215. /* Our bits are the same as the page bits. */
  216. i = alloc_from_bitmap(bits, SUBPAGE_METAOFF/BITMAP_GRANULARITY,
  217. div_up(size, BITMAP_GRANULARITY),
  218. align / BITMAP_GRANULARITY);
  219. /* Can't allocate? */
  220. if (i < 0)
  221. return 0;
  222. return page*getpagesize() + i*BITMAP_GRANULARITY;
  223. }
  224. static uint8_t *alloc_metaspace(struct metaheader *mh, unsigned long bytes)
  225. {
  226. uint8_t *meta = (uint8_t *)(mh + 1);
  227. unsigned long free = 0, len;
  228. long i;
  229. /* TAKEN tags end a subpage alloc. */
  230. for (i = mh->metalen * CHAR_BIT / BITS_PER_PAGE - 1; i >= 0; i -= len) {
  231. switch (get_bit_pair(meta, i)) {
  232. case FREE:
  233. len = 1;
  234. free++;
  235. if (free == bytes * CHAR_BIT / BITS_PER_PAGE) {
  236. /* TAKEN marks end of metablock. */
  237. set_page_state(meta, i + free - 1, TAKEN);
  238. return meta + i / (CHAR_BIT / BITS_PER_PAGE);
  239. }
  240. break;
  241. case BITMAP:
  242. /* Skip over this allocated part. */
  243. len = BITMAP_METALEN * CHAR_BIT / BITS_PER_PAGE;
  244. free = 0;
  245. break;
  246. default:
  247. assert(0);
  248. return NULL;
  249. }
  250. }
  251. return NULL;
  252. }
  253. /* We need this many bytes of metadata. */
  254. static uint8_t *new_metadata(void *pool, unsigned long poolsize,
  255. unsigned long bytes)
  256. {
  257. struct metaheader *mh, *newmh;
  258. unsigned long page;
  259. for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
  260. uint8_t *meta = alloc_metaspace(mh, bytes);
  261. if (meta)
  262. return meta;
  263. }
  264. /* No room for metadata? Can we expand an existing one? */
  265. for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
  266. /* It should end on a page boundary. */
  267. unsigned long nextpage;
  268. nextpage = pool_offset(pool, (char *)(mh + 1) + mh->metalen);
  269. assert(nextpage % getpagesize() == 0);
  270. /* Now, can we grab that page? */
  271. if (get_page_state(pool, nextpage / getpagesize()) != FREE)
  272. continue;
  273. /* OK, expand metadata, do it again. */
  274. set_page_state(pool, nextpage / getpagesize(), TAKEN);
  275. BUILD_ASSERT(FREE == 0);
  276. memset((char *)pool + nextpage, 0, getpagesize());
  277. mh->metalen += getpagesize();
  278. return alloc_metaspace(mh, bytes);
  279. }
  280. /* No metadata left at all? */
  281. page = alloc_get_pages(pool, poolsize, div_up(bytes, getpagesize()), 1);
  282. if (!page)
  283. return NULL;
  284. newmh = (struct metaheader *)((char *)pool + page * getpagesize());
  285. newmh->metalen = getpagesize() - sizeof(*mh);
  286. BUILD_ASSERT(FREE == 0);
  287. memset(newmh + 1, 0, newmh->metalen);
  288. /* Sew it into linked list */
  289. mh = first_mheader(pool,poolsize);
  290. newmh->next = mh->next;
  291. mh->next = pool_offset(pool, newmh);
  292. return alloc_metaspace(newmh, bytes);
  293. }
  294. static void alloc_free_pages(void *pool, unsigned long pagenum)
  295. {
  296. assert(get_page_state(pool, pagenum) == TAKEN_START);
  297. set_page_state(pool, pagenum, FREE);
  298. while (get_page_state(pool, ++pagenum) == TAKEN)
  299. set_page_state(pool, pagenum, FREE);
  300. }
  301. static unsigned long alloc_sub_page(void *pool, unsigned long poolsize,
  302. unsigned long size, unsigned long align)
  303. {
  304. unsigned long i;
  305. uint8_t *metadata;
  306. /* Look for partial page. */
  307. for (i = 0; i < poolsize / getpagesize(); i++) {
  308. unsigned long ret;
  309. if (get_page_state(pool, i) != SPECIAL)
  310. continue;
  311. ret = sub_page_alloc(pool, i, size, align);
  312. if (ret)
  313. return ret;
  314. }
  315. /* Create new SUBPAGE page. */
  316. i = alloc_get_pages(pool, poolsize, 1, 1);
  317. if (i == 0)
  318. return 0;
  319. /* Get metadata for page. */
  320. metadata = new_metadata(pool, poolsize, BITMAP_METALEN);
  321. if (!metadata) {
  322. alloc_free_pages(pool, i);
  323. return 0;
  324. }
  325. /* Actually, this is a subpage page now. */
  326. set_page_state(pool, i, SPECIAL);
  327. /* Set metadata pointer for page. */
  328. set_page_metadata(pool, i, metadata);
  329. /* Do allocation like normal */
  330. return sub_page_alloc(pool, i, size, align);
  331. }
  332. /* Returns true if we cleaned any pages. */
  333. static bool clean_empty_subpages(void *pool, unsigned long poolsize)
  334. {
  335. unsigned long i;
  336. bool progress = false;
  337. for (i = 0; i < poolsize/getpagesize(); i++) {
  338. uint8_t *meta;
  339. unsigned int j;
  340. if (get_page_state(pool, i) != SPECIAL)
  341. continue;
  342. meta = get_page_metadata(pool, i);
  343. for (j = 0; j < SUBPAGE_METAOFF/BITMAP_GRANULARITY; j++)
  344. if (get_page_state(meta, j) != FREE)
  345. break;
  346. /* So, is this page totally empty? */
  347. if (j == SUBPAGE_METAOFF/BITMAP_GRANULARITY) {
  348. set_page_state(pool, i, FREE);
  349. progress = true;
  350. }
  351. }
  352. return progress;
  353. }
  354. /* Returns true if we cleaned any pages. */
  355. static bool clean_metadata(void *pool, unsigned long poolsize)
  356. {
  357. struct metaheader *mh, *prev_mh = NULL;
  358. bool progress = false;
  359. for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
  360. uint8_t *meta;
  361. long i;
  362. meta = (uint8_t *)(mh + 1);
  363. BUILD_ASSERT(FREE == 0);
  364. for (i = mh->metalen - 1; i > 0; i--)
  365. if (meta[i] != 0)
  366. break;
  367. /* Completely empty? */
  368. if (prev_mh && i == mh->metalen) {
  369. alloc_free_pages(pool,
  370. pool_offset(pool, mh)/getpagesize());
  371. prev_mh->next = mh->next;
  372. mh = prev_mh;
  373. progress = true;
  374. } else {
  375. uint8_t *p;
  376. /* Some pages at end are free? */
  377. for (p = (uint8_t *)(mh+1)+mh->metalen - getpagesize();
  378. p > meta + i;
  379. p -= getpagesize()) {
  380. set_page_state(pool,
  381. pool_offset(pool, p)
  382. / getpagesize(),
  383. FREE);
  384. progress = true;
  385. }
  386. }
  387. }
  388. return progress;
  389. }
  390. void *alloc_get(void *pool, unsigned long poolsize,
  391. unsigned long size, unsigned long align)
  392. {
  393. bool subpage_clean = false, metadata_clean = false;
  394. unsigned long ret;
  395. if (poolsize < MIN_SIZE)
  396. return NULL;
  397. again:
  398. /* Sub-page allocations have an overhead of ~12%. */
  399. if (size + size/8 >= getpagesize() || align >= getpagesize()) {
  400. unsigned long pages = div_up(size, getpagesize());
  401. ret = alloc_get_pages(pool, poolsize, pages, align)
  402. * getpagesize();
  403. } else
  404. ret = alloc_sub_page(pool, poolsize, size, align);
  405. if (ret != 0)
  406. return (char *)pool + ret;
  407. /* Allocation failed: garbage collection. */
  408. if (!subpage_clean) {
  409. subpage_clean = true;
  410. if (clean_empty_subpages(pool, poolsize))
  411. goto again;
  412. }
  413. if (!metadata_clean) {
  414. metadata_clean = true;
  415. if (clean_metadata(pool, poolsize))
  416. goto again;
  417. }
  418. /* FIXME: Compact metadata? */
  419. return NULL;
  420. }
  421. static void subpage_free(void *pool, unsigned long pagenum, void *free)
  422. {
  423. unsigned long off = (unsigned long)free % getpagesize();
  424. uint8_t *metadata;
  425. assert(off < SUBPAGE_METAOFF);
  426. assert(off % BITMAP_GRANULARITY == 0);
  427. metadata = get_page_metadata(pool, pagenum);
  428. off /= BITMAP_GRANULARITY;
  429. set_page_state(metadata, off++, FREE);
  430. while (off < SUBPAGE_METAOFF / BITMAP_GRANULARITY
  431. && get_page_state(metadata, off) == TAKEN)
  432. set_page_state(metadata, off++, FREE);
  433. }
  434. void alloc_free(void *pool, unsigned long poolsize, void *free)
  435. {
  436. unsigned long pagenum;
  437. struct metaheader *mh;
  438. if (!free)
  439. return;
  440. assert(poolsize >= MIN_SIZE);
  441. mh = first_mheader(pool, poolsize);
  442. assert((char *)free >= (char *)(mh + 1) + mh->metalen);
  443. assert((char *)pool + poolsize > (char *)free);
  444. pagenum = pool_offset(pool, free) / getpagesize();
  445. if (get_page_state(pool, pagenum) == SPECIAL)
  446. subpage_free(pool, pagenum, free);
  447. else {
  448. assert((unsigned long)free % getpagesize() == 0);
  449. alloc_free_pages(pool, pagenum);
  450. }
  451. }
  452. static bool is_metadata_page(void *pool, unsigned long poolsize,
  453. unsigned long page)
  454. {
  455. struct metaheader *mh;
  456. for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
  457. unsigned long start, end;
  458. start = pool_offset(pool, mh);
  459. end = pool_offset(pool, (char *)(mh+1) + mh->metalen);
  460. if (page >= start/getpagesize() && page < end/getpagesize())
  461. return true;
  462. }
  463. return false;
  464. }
  465. static bool check_subpage(void *pool, unsigned long poolsize,
  466. unsigned long page)
  467. {
  468. unsigned long *mhoff = metadata_off(pool, page);
  469. unsigned int i;
  470. enum alloc_state last_state = FREE;
  471. if (*mhoff + sizeof(struct metaheader) > poolsize)
  472. return false;
  473. if (*mhoff % ALIGNOF(struct metaheader) != 0)
  474. return false;
  475. /* It must point to a metadata page. */
  476. if (!is_metadata_page(pool, poolsize, *mhoff / getpagesize()))
  477. return false;
  478. /* Marker at end of subpage allocation is "taken" */
  479. if (get_bit_pair((uint8_t *)pool + *mhoff,
  480. getpagesize()/BITMAP_GRANULARITY - 1) != TAKEN)
  481. return false;
  482. for (i = 0; i < SUBPAGE_METAOFF / BITMAP_GRANULARITY; i++) {
  483. enum alloc_state state;
  484. state = get_bit_pair((uint8_t *)pool + *mhoff, i);
  485. switch (state) {
  486. case SPECIAL:
  487. return false;
  488. case TAKEN:
  489. if (last_state == FREE)
  490. return false;
  491. break;
  492. default:
  493. break;
  494. }
  495. last_state = state;
  496. }
  497. return true;
  498. }
  499. bool alloc_check(void *pool, unsigned long poolsize)
  500. {
  501. unsigned long i;
  502. struct metaheader *mh;
  503. enum alloc_state last_state = FREE;
  504. bool was_metadata = false;
  505. if (poolsize < MIN_SIZE)
  506. return true;
  507. if (get_page_state(pool, 0) != TAKEN_START)
  508. return false;
  509. /* First check metadata pages. */
  510. /* Metadata pages will be marked TAKEN. */
  511. for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
  512. unsigned long start, end;
  513. start = pool_offset(pool, mh);
  514. if (start + sizeof(*mh) > poolsize)
  515. return false;
  516. end = pool_offset(pool, (char *)(mh+1) + mh->metalen);
  517. if (end > poolsize)
  518. return false;
  519. /* Non-first pages should start on a page boundary. */
  520. if (mh != first_mheader(pool, poolsize)
  521. && start % getpagesize() != 0)
  522. return false;
  523. /* It should end on a page boundary. */
  524. if (end % getpagesize() != 0)
  525. return false;
  526. }
  527. for (i = 0; i < poolsize / getpagesize(); i++) {
  528. enum alloc_state state = get_page_state(pool, i);
  529. bool is_metadata = is_metadata_page(pool, poolsize,i);
  530. switch (state) {
  531. case FREE:
  532. /* metadata pages are never free. */
  533. if (is_metadata)
  534. return false;
  535. case TAKEN_START:
  536. break;
  537. case TAKEN:
  538. /* This should continue a previous block. */
  539. if (last_state == FREE)
  540. return false;
  541. if (is_metadata != was_metadata)
  542. return false;
  543. break;
  544. case SPECIAL:
  545. /* Check metadata pointer etc. */
  546. if (!check_subpage(pool, poolsize, i))
  547. return false;
  548. }
  549. last_state = state;
  550. was_metadata = is_metadata;
  551. }
  552. return true;
  553. }
  554. void alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
  555. {
  556. struct metaheader *mh;
  557. unsigned long pagebitlen, metadata_pages, count[1<<BITS_PER_PAGE], tot;
  558. long i;
  559. if (poolsize < MIN_SIZE) {
  560. fprintf(out, "Pool smaller than %u: no content\n", MIN_SIZE);
  561. return;
  562. }
  563. memset(count, 0, sizeof(count));
  564. for (i = 0; i < poolsize / getpagesize(); i++)
  565. count[get_page_state(pool, i)]++;
  566. mh = first_mheader(pool, poolsize);
  567. pagebitlen = (char *)mh - (char *)pool;
  568. fprintf(out, "%lu bytes of page bits: FREE/TAKEN/TAKEN_START/SUBPAGE = %lu/%lu/%lu/%lu\n",
  569. pagebitlen, count[0], count[1], count[2], count[3]);
  570. /* One metadata page for every page of page bits. */
  571. metadata_pages = div_up(pagebitlen, getpagesize());
  572. /* Now do each metadata page. */
  573. for (; mh; mh = next_mheader(pool,mh)) {
  574. unsigned long free = 0, subpageblocks = 0, len = 0;
  575. uint8_t *meta = (uint8_t *)(mh + 1);
  576. metadata_pages += (sizeof(*mh) + mh->metalen) / getpagesize();
  577. /* TAKEN tags end a subpage alloc. */
  578. for (i = mh->metalen * CHAR_BIT/BITS_PER_PAGE - 1;
  579. i >= 0;
  580. i -= len) {
  581. switch (get_page_state(meta, i)) {
  582. case FREE:
  583. len = 1;
  584. free++;
  585. break;
  586. case TAKEN:
  587. /* Skip over this allocated part. */
  588. len = BITMAP_METALEN * CHAR_BIT;
  589. subpageblocks++;
  590. break;
  591. default:
  592. assert(0);
  593. }
  594. }
  595. fprintf(out, "Metadata %lu-%lu: %lu free, %lu subpageblocks, %lu%% density\n",
  596. pool_offset(pool, mh),
  597. pool_offset(pool, (char *)(mh+1) + mh->metalen),
  598. free, subpageblocks,
  599. subpageblocks * BITMAP_METALEN * 100
  600. / (free + subpageblocks * BITMAP_METALEN));
  601. }
  602. /* Account for total pages allocated. */
  603. tot = (count[1] + count[2] - metadata_pages) * getpagesize();
  604. fprintf(out, "Total metadata bytes = %lu\n",
  605. metadata_pages * getpagesize());
  606. /* Now do every subpage. */
  607. for (i = 0; i < poolsize / getpagesize(); i++) {
  608. uint8_t *meta;
  609. unsigned int j;
  610. if (get_page_state(pool, i) != SPECIAL)
  611. continue;
  612. memset(count, 0, sizeof(count));
  613. meta = get_page_metadata(pool, i);
  614. for (j = 0; j < SUBPAGE_METAOFF/BITMAP_GRANULARITY; j++)
  615. count[get_page_state(meta, j)]++;
  616. fprintf(out, "Subpage %lu: "
  617. "FREE/TAKEN/TAKEN_START = %lu/%lu/%lu %lu%% density\n",
  618. i, count[0], count[1], count[2],
  619. ((count[1] + count[2]) * BITMAP_GRANULARITY) * 100
  620. / getpagesize());
  621. tot += (count[1] + count[2]) * BITMAP_GRANULARITY;
  622. }
  623. /* This is optimistic, since we overalloc in several cases. */
  624. fprintf(out, "Best possible allocation density = %lu%%\n",
  625. tot * 100 / poolsize);
  626. }