alloc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751
  1. #include <unistd.h>
  2. #include <stdint.h>
  3. #include <string.h>
  4. #include <limits.h>
  5. #include <assert.h>
  6. #include "alloc.h"
  7. #include "build_assert/build_assert.h"
  8. #include "config.h"
  9. #if HAVE_ALIGNOF
  10. #define ALIGNOF(t) __alignof__(t)
  11. #else
  12. /* Alignment by measuring structure padding. */
  13. #define ALIGNOF(t) (sizeof(struct { char c; t _h; }) - 1 - sizeof(t))
  14. #endif
  15. /* FIXME: Doesn't handle non-page-aligned poolsize. */
  16. /* FIXME: Reduce. */
  17. #define MIN_SIZE (getpagesize() * 2)
  18. /* What's the granularity of sub-page allocs? */
  19. #define BITMAP_GRANULARITY 4
  20. /* File layout:
  21. *
  22. * file := pagestates pad metadata
  23. * pagestates := pages * 2-bits-per-page
  24. * pad := pad to next ALIGNOF(metadata)
  25. *
  26. * metadata := metalen next-ptr metabits
  27. * metabits := freeblock | bitblock
  28. * freeblock := 0+
  29. * bitblock := 2-bits-per-bit-in-page 1
  30. */
  31. struct metaheader
  32. {
  33. /* Next meta header, or 0 */
  34. unsigned long next;
  35. /* Bits start here. */
  36. };
  37. /* Assumes a is a power of two. */
  38. static unsigned long align_up(unsigned long x, unsigned long a)
  39. {
  40. return (x + a - 1) & ~(a - 1);
  41. }
  42. static unsigned long div_up(unsigned long x, unsigned long a)
  43. {
  44. return (x + a - 1) / a;
  45. }
  46. /* It turns out that we spend a lot of time dealing with bit pairs.
  47. * These routines manipulate them.
  48. */
  49. static uint8_t get_bit_pair(const uint8_t *bits, unsigned long index)
  50. {
  51. return bits[index * 2 / CHAR_BIT] >> (index * 2 % CHAR_BIT) & 3;
  52. }
  53. static void set_bit_pair(uint8_t *bits, unsigned long index, uint8_t val)
  54. {
  55. bits[index * 2 / CHAR_BIT] &= ~(3 << (index * 2 % CHAR_BIT));
  56. bits[index * 2 / CHAR_BIT] |= (val << (index * 2 % CHAR_BIT));
  57. }
  58. /* This is used for page states and subpage allocations */
  59. enum alloc_state
  60. {
  61. FREE,
  62. TAKEN,
  63. TAKEN_START,
  64. SPECIAL, /* Sub-page allocation for page states. */
  65. };
  66. /* The types for subpage metadata. */
  67. enum sub_metadata_type
  68. {
  69. /* FREE is same as alloc state */
  70. BITMAP = 1,
  71. };
  72. /* Page states are represented by bitpairs, at the start of the pool. */
  73. #define BITS_PER_PAGE 2
  74. static enum alloc_state get_page_state(const void *pool, unsigned long page)
  75. {
  76. return get_bit_pair(pool, page);
  77. }
  78. static void set_page_state(void *pool, unsigned long page, enum alloc_state s)
  79. {
  80. set_bit_pair(pool, page, s);
  81. }
  82. /* The offset of metadata for a subpage allocation is found at the end
  83. * of the subpage */
  84. #define SUBPAGE_METAOFF (getpagesize() - sizeof(unsigned long))
  85. /* This is the length of metadata in bits. It consists of two bits
  86. * for every BITMAP_GRANULARITY of usable bytes in the page, then two
  87. * bits for the tailer.. */
  88. #define BITMAP_METABITLEN \
  89. ((div_up(SUBPAGE_METAOFF, BITMAP_GRANULARITY) + 1) * BITS_PER_PAGE)
  90. /* This is the length in bytes. */
  91. #define BITMAP_METALEN (div_up(BITMAP_METABITLEN, CHAR_BIT))
  92. static struct metaheader *first_mheader(void *pool, unsigned long poolsize)
  93. {
  94. unsigned int pagestatelen;
  95. pagestatelen = align_up(div_up(poolsize/getpagesize() * BITS_PER_PAGE,
  96. CHAR_BIT),
  97. ALIGNOF(struct metaheader));
  98. return (struct metaheader *)((char *)pool + pagestatelen);
  99. }
  100. static struct metaheader *next_mheader(void *pool, struct metaheader *mh)
  101. {
  102. if (!mh->next)
  103. return NULL;
  104. return (struct metaheader *)((char *)pool + mh->next);
  105. }
  106. static unsigned long pool_offset(void *pool, void *p)
  107. {
  108. return (char *)p - (char *)pool;
  109. }
  110. void alloc_init(void *pool, unsigned long poolsize)
  111. {
  112. /* FIXME: Alignment assumptions about pool. */
  113. unsigned long len, i;
  114. struct metaheader *mh;
  115. if (poolsize < MIN_SIZE)
  116. return;
  117. mh = first_mheader(pool, poolsize);
  118. /* Mark all page states FREE, and all of metaheader bitmap which takes
  119. * rest of first page. */
  120. len = align_up(pool_offset(pool, mh + 1), getpagesize());
  121. BUILD_ASSERT(FREE == 0);
  122. memset(pool, 0, len);
  123. /* Mark the pagestate and metadata page(s) allocated. */
  124. set_page_state(pool, 0, TAKEN_START);
  125. for (i = 1; i < div_up(len, getpagesize()); i++)
  126. set_page_state(pool, i, TAKEN);
  127. }
  128. /* Two bits per element, representing page states. Returns 0 on fail.
  129. * off is used to allocate from subpage bitmaps, which use the first 2
  130. * bits as the type, so the real bitmap is offset by 1. */
  131. static unsigned long alloc_from_bitmap(uint8_t *bits, unsigned long off,
  132. unsigned long elems,
  133. unsigned long want, unsigned long align)
  134. {
  135. long i;
  136. unsigned long free;
  137. free = 0;
  138. /* We allocate from far end, to increase ability to expand metadata. */
  139. for (i = elems - 1; i >= 0; i--) {
  140. switch (get_bit_pair(bits, off+i)) {
  141. case FREE:
  142. if (++free >= want) {
  143. unsigned long j;
  144. /* They might ask for large alignment. */
  145. if (align && i % align)
  146. continue;
  147. set_bit_pair(bits, off+i, TAKEN_START);
  148. for (j = i+1; j < i + want; j++)
  149. set_bit_pair(bits, off+j, TAKEN);
  150. return off+i;
  151. }
  152. break;
  153. case SPECIAL:
  154. case TAKEN_START:
  155. case TAKEN:
  156. free = 0;
  157. break;
  158. }
  159. }
  160. return 0;
  161. }
  162. static unsigned long alloc_get_pages(void *pool, unsigned long poolsize,
  163. unsigned long pages, unsigned long align)
  164. {
  165. return alloc_from_bitmap(pool, 0, poolsize / getpagesize(), pages,
  166. align / getpagesize());
  167. }
  168. /* Offset to metadata is at end of page. */
  169. static unsigned long *metadata_off(void *pool, unsigned long page)
  170. {
  171. return (unsigned long *)
  172. ((char *)pool + (page+1)*getpagesize() - sizeof(unsigned long));
  173. }
  174. static uint8_t *get_page_metadata(void *pool, unsigned long page)
  175. {
  176. return (uint8_t *)pool + *metadata_off(pool, page);
  177. }
  178. static void set_page_metadata(void *pool, unsigned long page, uint8_t *meta)
  179. {
  180. *metadata_off(pool, page) = meta - (uint8_t *)pool;
  181. }
  182. static unsigned long sub_page_alloc(void *pool, unsigned long page,
  183. unsigned long size, unsigned long align)
  184. {
  185. uint8_t *bits = get_page_metadata(pool, page);
  186. unsigned long i;
  187. /* TAKEN at start means a bitwise alloc. */
  188. assert(get_bit_pair(bits, 0) == BITMAP);
  189. /* We use a standart bitmap, but offset because of that BITMAP
  190. * header. */
  191. i = alloc_from_bitmap(bits, 1, SUBPAGE_METAOFF/BITMAP_GRANULARITY,
  192. div_up(size, BITMAP_GRANULARITY),
  193. align / BITMAP_GRANULARITY);
  194. /* Can't allocate? */
  195. if (i == 0)
  196. return 0;
  197. /* i-1 because of the header. */
  198. return page*getpagesize() + (i-1)*BITMAP_GRANULARITY;
  199. }
  200. /* We look at the page states to figure out where the allocation for this
  201. * metadata ends. */
  202. static unsigned long get_metalen(void *pool, unsigned long poolsize,
  203. struct metaheader *mh)
  204. {
  205. unsigned long i, first, pages = poolsize / getpagesize();
  206. first = pool_offset(pool, mh + 1)/getpagesize();
  207. for (i = first + 1; i < pages && get_page_state(pool,i) == TAKEN; i++);
  208. return i * getpagesize() - pool_offset(pool, mh + 1);
  209. }
  210. static uint8_t *alloc_metaspace(void *pool, unsigned long poolsize,
  211. struct metaheader *mh, unsigned long bytes,
  212. enum sub_metadata_type type)
  213. {
  214. uint8_t *meta = (uint8_t *)(mh + 1);
  215. unsigned long free = 0, len, i, metalen;
  216. metalen = get_metalen(pool, poolsize, mh);
  217. /* TAKEN tags end a subpage alloc. */
  218. for (i = 0; i < metalen * CHAR_BIT / BITS_PER_PAGE; i += len) {
  219. switch (get_bit_pair(meta, i)) {
  220. case FREE:
  221. len = 1;
  222. free++;
  223. if (free == bytes * CHAR_BIT / BITS_PER_PAGE) {
  224. /* Mark this as a bitmap. */
  225. set_bit_pair(meta, i - free + 1, type);
  226. return meta + (i - free + 1)
  227. / (CHAR_BIT / BITS_PER_PAGE);
  228. }
  229. break;
  230. case BITMAP:
  231. /* Skip over this allocated part. */
  232. len = BITMAP_METALEN * CHAR_BIT / BITS_PER_PAGE;
  233. free = 0;
  234. break;
  235. default:
  236. assert(0);
  237. return NULL;
  238. }
  239. }
  240. return NULL;
  241. }
  242. /* We need this many bytes of metadata. */
  243. static uint8_t *new_metadata(void *pool, unsigned long poolsize,
  244. unsigned long bytes, enum sub_metadata_type type)
  245. {
  246. struct metaheader *mh, *newmh;
  247. unsigned long page;
  248. for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
  249. uint8_t *meta = alloc_metaspace(pool, poolsize, mh, bytes,type);
  250. if (meta)
  251. return meta;
  252. }
  253. /* No room for metadata? Can we expand an existing one? */
  254. for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
  255. unsigned long nextpage;
  256. /* We start on this page. */
  257. nextpage = pool_offset(pool, (char *)(mh+1))/getpagesize();
  258. /* Iterate through any other pages we own. */
  259. while (get_page_state(pool, ++nextpage) == TAKEN)
  260. /* Now, can we grab that page? */
  261. if (get_page_state(pool, nextpage) != FREE)
  262. continue;
  263. /* OK, expand metadata, do it again. */
  264. set_page_state(pool, nextpage, TAKEN);
  265. BUILD_ASSERT(FREE == 0);
  266. memset((char *)pool + nextpage*getpagesize(), 0, getpagesize());
  267. return alloc_metaspace(pool, poolsize, mh, bytes, type);
  268. }
  269. /* No metadata left at all? */
  270. page = alloc_get_pages(pool, poolsize, div_up(bytes, getpagesize()), 1);
  271. if (!page)
  272. return NULL;
  273. newmh = (struct metaheader *)((char *)pool + page * getpagesize());
  274. BUILD_ASSERT(FREE == 0);
  275. memset(newmh + 1, 0, getpagesize() - sizeof(*mh));
  276. /* Sew it into linked list */
  277. mh = first_mheader(pool,poolsize);
  278. newmh->next = mh->next;
  279. mh->next = pool_offset(pool, newmh);
  280. return alloc_metaspace(pool, poolsize, newmh, bytes, type);
  281. }
  282. static void alloc_free_pages(void *pool, unsigned long pagenum)
  283. {
  284. assert(get_page_state(pool, pagenum) == TAKEN_START);
  285. set_page_state(pool, pagenum, FREE);
  286. while (get_page_state(pool, ++pagenum) == TAKEN)
  287. set_page_state(pool, pagenum, FREE);
  288. }
  289. static unsigned long alloc_sub_page(void *pool, unsigned long poolsize,
  290. unsigned long size, unsigned long align)
  291. {
  292. unsigned long i;
  293. uint8_t *metadata;
  294. /* Look for partial page. */
  295. for (i = 0; i < poolsize / getpagesize(); i++) {
  296. unsigned long ret;
  297. if (get_page_state(pool, i) != SPECIAL)
  298. continue;
  299. ret = sub_page_alloc(pool, i, size, align);
  300. if (ret)
  301. return ret;
  302. }
  303. /* Create new SUBPAGE page. */
  304. i = alloc_get_pages(pool, poolsize, 1, 1);
  305. if (i == 0)
  306. return 0;
  307. /* Get metadata for page. */
  308. metadata = new_metadata(pool, poolsize, BITMAP_METALEN, BITMAP);
  309. if (!metadata) {
  310. alloc_free_pages(pool, i);
  311. return 0;
  312. }
  313. /* Actually, this is a subpage page now. */
  314. set_page_state(pool, i, SPECIAL);
  315. /* Set metadata pointer for page. */
  316. set_page_metadata(pool, i, metadata);
  317. /* Do allocation like normal */
  318. return sub_page_alloc(pool, i, size, align);
  319. }
  320. /* Returns true if we cleaned any pages. */
  321. static bool clean_empty_subpages(void *pool, unsigned long poolsize)
  322. {
  323. unsigned long i;
  324. bool progress = false;
  325. for (i = 0; i < poolsize/getpagesize(); i++) {
  326. uint8_t *meta;
  327. unsigned int j;
  328. if (get_page_state(pool, i) != SPECIAL)
  329. continue;
  330. meta = get_page_metadata(pool, i);
  331. /* Skip the header (first bit of metadata). */
  332. for (j = 1; j < SUBPAGE_METAOFF/BITMAP_GRANULARITY+1; j++)
  333. if (get_bit_pair(meta, j) != FREE)
  334. break;
  335. /* So, is this page totally empty? */
  336. if (j == SUBPAGE_METAOFF/BITMAP_GRANULARITY+1) {
  337. set_page_state(pool, i, FREE);
  338. progress = true;
  339. }
  340. }
  341. return progress;
  342. }
  343. /* Returns true if we cleaned any pages. */
  344. static bool clean_metadata(void *pool, unsigned long poolsize)
  345. {
  346. struct metaheader *mh, *prev_mh = NULL;
  347. bool progress = false;
  348. for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
  349. uint8_t *meta;
  350. long i;
  351. unsigned long metalen = get_metalen(pool, poolsize, mh);
  352. meta = (uint8_t *)(mh + 1);
  353. BUILD_ASSERT(FREE == 0);
  354. for (i = metalen - 1; i > 0; i--)
  355. if (meta[i] != 0)
  356. break;
  357. /* Completely empty? */
  358. if (prev_mh && i == metalen) {
  359. alloc_free_pages(pool,
  360. pool_offset(pool, mh)/getpagesize());
  361. prev_mh->next = mh->next;
  362. mh = prev_mh;
  363. progress = true;
  364. } else {
  365. uint8_t *p;
  366. /* Some pages at end are free? */
  367. for (p = (uint8_t *)(mh+1) + metalen - getpagesize();
  368. p > meta + i;
  369. p -= getpagesize()) {
  370. set_page_state(pool,
  371. pool_offset(pool, p)
  372. / getpagesize(),
  373. FREE);
  374. progress = true;
  375. }
  376. }
  377. }
  378. return progress;
  379. }
  380. void *alloc_get(void *pool, unsigned long poolsize,
  381. unsigned long size, unsigned long align)
  382. {
  383. bool subpage_clean = false, metadata_clean = false;
  384. unsigned long ret;
  385. if (poolsize < MIN_SIZE)
  386. return NULL;
  387. again:
  388. /* Sub-page allocations have an overhead of ~12%. */
  389. if (size + size/8 >= getpagesize() || align >= getpagesize()) {
  390. unsigned long pages = div_up(size, getpagesize());
  391. ret = alloc_get_pages(pool, poolsize, pages, align)
  392. * getpagesize();
  393. } else
  394. ret = alloc_sub_page(pool, poolsize, size, align);
  395. if (ret != 0)
  396. return (char *)pool + ret;
  397. /* Allocation failed: garbage collection. */
  398. if (!subpage_clean) {
  399. subpage_clean = true;
  400. if (clean_empty_subpages(pool, poolsize))
  401. goto again;
  402. }
  403. if (!metadata_clean) {
  404. metadata_clean = true;
  405. if (clean_metadata(pool, poolsize))
  406. goto again;
  407. }
  408. /* FIXME: Compact metadata? */
  409. return NULL;
  410. }
  411. static void subpage_free(void *pool, unsigned long pagenum, void *free)
  412. {
  413. unsigned long off = (unsigned long)free % getpagesize();
  414. uint8_t *metadata;
  415. assert(off < SUBPAGE_METAOFF);
  416. assert(off % BITMAP_GRANULARITY == 0);
  417. metadata = get_page_metadata(pool, pagenum);
  418. off /= BITMAP_GRANULARITY;
  419. /* Offset by one because first bit is used for header. */
  420. off++;
  421. set_bit_pair(metadata, off++, FREE);
  422. while (off < SUBPAGE_METAOFF / BITMAP_GRANULARITY
  423. && get_bit_pair(metadata, off) == TAKEN)
  424. set_bit_pair(metadata, off++, FREE);
  425. }
  426. void alloc_free(void *pool, unsigned long poolsize, void *free)
  427. {
  428. unsigned long pagenum;
  429. struct metaheader *mh;
  430. if (!free)
  431. return;
  432. assert(poolsize >= MIN_SIZE);
  433. mh = first_mheader(pool, poolsize);
  434. assert((char *)free >= (char *)(mh + 1));
  435. assert((char *)pool + poolsize > (char *)free);
  436. pagenum = pool_offset(pool, free) / getpagesize();
  437. if (get_page_state(pool, pagenum) == SPECIAL)
  438. subpage_free(pool, pagenum, free);
  439. else {
  440. assert((unsigned long)free % getpagesize() == 0);
  441. alloc_free_pages(pool, pagenum);
  442. }
  443. }
  444. static bool is_metadata_page(void *pool, unsigned long poolsize,
  445. unsigned long page)
  446. {
  447. struct metaheader *mh;
  448. for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
  449. unsigned long start, end;
  450. start = pool_offset(pool, mh);
  451. end = pool_offset(pool, (char *)(mh+1)
  452. + get_metalen(pool, poolsize, mh));
  453. if (page >= start/getpagesize() && page < end/getpagesize())
  454. return true;
  455. }
  456. return false;
  457. }
  458. static bool check_subpage(void *pool, unsigned long poolsize,
  459. unsigned long page)
  460. {
  461. unsigned long *mhoff = metadata_off(pool, page);
  462. unsigned int i;
  463. enum alloc_state last_state = FREE;
  464. if (*mhoff + sizeof(struct metaheader) > poolsize)
  465. return false;
  466. if (*mhoff % ALIGNOF(struct metaheader) != 0)
  467. return false;
  468. /* It must point to a metadata page. */
  469. if (!is_metadata_page(pool, poolsize, *mhoff / getpagesize()))
  470. return false;
  471. /* Header at start of subpage allocation */
  472. if (get_bit_pair((uint8_t *)pool + *mhoff, 0) != BITMAP)
  473. return false;
  474. for (i = 0; i < SUBPAGE_METAOFF / BITMAP_GRANULARITY; i++) {
  475. enum alloc_state state;
  476. /* +1 because header is the first byte. */
  477. state = get_bit_pair((uint8_t *)pool + *mhoff, i+1);
  478. switch (state) {
  479. case SPECIAL:
  480. return false;
  481. case TAKEN:
  482. if (last_state == FREE)
  483. return false;
  484. break;
  485. default:
  486. break;
  487. }
  488. last_state = state;
  489. }
  490. return true;
  491. }
  492. bool alloc_check(void *pool, unsigned long poolsize)
  493. {
  494. unsigned long i;
  495. struct metaheader *mh;
  496. enum alloc_state last_state = FREE;
  497. bool was_metadata = false;
  498. if (poolsize < MIN_SIZE)
  499. return true;
  500. if (get_page_state(pool, 0) != TAKEN_START)
  501. return false;
  502. /* First check metadata pages. */
  503. /* Metadata pages will be marked TAKEN. */
  504. for (mh = first_mheader(pool,poolsize); mh; mh = next_mheader(pool,mh)){
  505. unsigned long start, end;
  506. start = pool_offset(pool, mh);
  507. if (start + sizeof(*mh) > poolsize)
  508. return false;
  509. end = pool_offset(pool, (char *)(mh+1)
  510. + get_metalen(pool, poolsize, mh));
  511. if (end > poolsize)
  512. return false;
  513. /* Non-first pages should start on a page boundary. */
  514. if (mh != first_mheader(pool, poolsize)
  515. && start % getpagesize() != 0)
  516. return false;
  517. /* It should end on a page boundary. */
  518. if (end % getpagesize() != 0)
  519. return false;
  520. }
  521. for (i = 0; i < poolsize / getpagesize(); i++) {
  522. enum alloc_state state = get_page_state(pool, i);
  523. bool is_metadata = is_metadata_page(pool, poolsize,i);
  524. switch (state) {
  525. case FREE:
  526. /* metadata pages are never free. */
  527. if (is_metadata)
  528. return false;
  529. case TAKEN_START:
  530. break;
  531. case TAKEN:
  532. /* This should continue a previous block. */
  533. if (last_state == FREE)
  534. return false;
  535. if (is_metadata != was_metadata)
  536. return false;
  537. break;
  538. case SPECIAL:
  539. /* Check metadata pointer etc. */
  540. if (!check_subpage(pool, poolsize, i))
  541. return false;
  542. }
  543. last_state = state;
  544. was_metadata = is_metadata;
  545. }
  546. return true;
  547. }
  548. void alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
  549. {
  550. struct metaheader *mh;
  551. unsigned long pagebitlen, metadata_pages, count[1<<BITS_PER_PAGE], tot;
  552. long i;
  553. if (poolsize < MIN_SIZE) {
  554. fprintf(out, "Pool smaller than %u: no content\n", MIN_SIZE);
  555. return;
  556. }
  557. memset(count, 0, sizeof(count));
  558. for (i = 0; i < poolsize / getpagesize(); i++)
  559. count[get_page_state(pool, i)]++;
  560. mh = first_mheader(pool, poolsize);
  561. pagebitlen = (char *)mh - (char *)pool;
  562. fprintf(out, "%lu bytes of page bits: FREE/TAKEN/TAKEN_START/SUBPAGE = %lu/%lu/%lu/%lu\n",
  563. pagebitlen, count[0], count[1], count[2], count[3]);
  564. /* One metadata page for every page of page bits. */
  565. metadata_pages = div_up(pagebitlen, getpagesize());
  566. /* Now do each metadata page. */
  567. for (; mh; mh = next_mheader(pool,mh)) {
  568. unsigned long free = 0, subpageblocks = 0, len = 0, metalen;
  569. uint8_t *meta = (uint8_t *)(mh + 1);
  570. metalen = get_metalen(pool, poolsize, mh);
  571. metadata_pages += (sizeof(*mh) + metalen) / getpagesize();
  572. for (i = 0; i < metalen * CHAR_BIT / BITS_PER_PAGE; i += len) {
  573. switch (get_page_state(meta, i)) {
  574. case FREE:
  575. len = 1;
  576. free++;
  577. break;
  578. case BITMAP:
  579. /* Skip over this allocated part. */
  580. len = BITMAP_METALEN * CHAR_BIT;
  581. subpageblocks++;
  582. break;
  583. default:
  584. assert(0);
  585. }
  586. }
  587. fprintf(out, "Metadata %lu-%lu: %lu free, %lu subpageblocks, %lu%% density\n",
  588. pool_offset(pool, mh),
  589. pool_offset(pool, (char *)(mh+1) + metalen),
  590. free, subpageblocks,
  591. subpageblocks * BITMAP_METALEN * 100
  592. / (free + subpageblocks * BITMAP_METALEN));
  593. }
  594. /* Account for total pages allocated. */
  595. tot = (count[1] + count[2] - metadata_pages) * getpagesize();
  596. fprintf(out, "Total metadata bytes = %lu\n",
  597. metadata_pages * getpagesize());
  598. /* Now do every subpage. */
  599. for (i = 0; i < poolsize / getpagesize(); i++) {
  600. uint8_t *meta;
  601. unsigned int j;
  602. if (get_page_state(pool, i) != SPECIAL)
  603. continue;
  604. memset(count, 0, sizeof(count));
  605. meta = get_page_metadata(pool, i);
  606. for (j = 0; j < SUBPAGE_METAOFF/BITMAP_GRANULARITY; j++)
  607. count[get_page_state(meta, j)]++;
  608. fprintf(out, "Subpage %lu: "
  609. "FREE/TAKEN/TAKEN_START = %lu/%lu/%lu %lu%% density\n",
  610. i, count[0], count[1], count[2],
  611. ((count[1] + count[2]) * BITMAP_GRANULARITY) * 100
  612. / getpagesize());
  613. tot += (count[1] + count[2]) * BITMAP_GRANULARITY;
  614. }
  615. /* This is optimistic, since we overalloc in several cases. */
  616. fprintf(out, "Best possible allocation density = %lu%%\n",
  617. tot * 100 / poolsize);
  618. }