Browse Source

Minor cleanups, a little new testing and a "visualize" routine which
found one bug.

Rusty Russell 18 years ago
parent
commit
4f5988a8db
3 changed files with 141 additions and 6 deletions
  1. 96 5
      alloc/alloc.c
  2. 2 0
      alloc/alloc.h
  3. 43 1
      alloc/test/run.c

+ 96 - 5
alloc/alloc.c

@@ -67,10 +67,11 @@ static unsigned long div_up(unsigned long x, unsigned long a)
  * of the subpage */
  * of the subpage */
 #define SUBPAGE_METAOFF (getpagesize() - sizeof(unsigned long))
 #define SUBPAGE_METAOFF (getpagesize() - sizeof(unsigned long))
 
 
-/* This is the length of metadata in bits.  It consists of a TAKEN header,
- * then two bits for every BITMAP_GRANULARITY of usable bytes in the. */
+/* This is the length of metadata in bits.  It consists of two bits
+ * for every BITMAP_GRANULARITY of usable bytes in the page, then two
+ * bits for the TAKEN tailer.. */
 #define BITMAP_METABITLEN						\
 #define BITMAP_METABITLEN						\
-    ((1 + div_up(SUBPAGE_METAOFF, BITMAP_GRANULARITY)) * BITS_PER_PAGE)
+    ((div_up(SUBPAGE_METAOFF, BITMAP_GRANULARITY) + 1) * BITS_PER_PAGE)
 
 
 /* This is the length in bytes. */
 /* This is the length in bytes. */
 #define BITMAP_METALEN (div_up(BITMAP_METABITLEN, CHAR_BIT))
 #define BITMAP_METALEN (div_up(BITMAP_METABITLEN, CHAR_BIT))
@@ -236,7 +237,7 @@ static void *sub_page_alloc(void *pool, unsigned long page,
 	       == TAKEN);
 	       == TAKEN);
 
 
 	/* Our bits are the same as the page bits. */
 	/* Our bits are the same as the page bits. */
-	i = alloc_from_bitmap(bits, getpagesize()/BITMAP_GRANULARITY,
+	i = alloc_from_bitmap(bits, SUBPAGE_METAOFF/BITMAP_GRANULARITY,
 			      div_up(size, BITMAP_GRANULARITY),
 			      div_up(size, BITMAP_GRANULARITY),
 			      align / BITMAP_GRANULARITY);
 			      align / BITMAP_GRANULARITY);
 
 
@@ -267,7 +268,7 @@ static uint8_t *alloc_metaspace(struct metaheader *mh, unsigned long bytes)
 			break;
 			break;
 		case TAKEN:
 		case TAKEN:
 			/* Skip over this allocated part. */
 			/* Skip over this allocated part. */
-			len = BITMAP_METALEN * CHAR_BIT;
+			len = BITMAP_METALEN * CHAR_BIT / BITS_PER_PAGE;
 			free = 0;
 			free = 0;
 			break;
 			break;
 		default:
 		default:
@@ -306,6 +307,7 @@ static uint8_t *new_metadata(void *pool, unsigned long poolsize,
 
 
 		/* OK, expand metadata, do it again. */
 		/* OK, expand metadata, do it again. */
 		set_page_state(pool, nextpage / getpagesize(), TAKEN);
 		set_page_state(pool, nextpage / getpagesize(), TAKEN);
+		BUILD_ASSERT(FREE == 0);
 		memset((char *)pool + nextpage, 0, getpagesize());
 		memset((char *)pool + nextpage, 0, getpagesize());
 		mh->metalen += getpagesize();
 		mh->metalen += getpagesize();
 		return alloc_metaspace(mh, bytes);
 		return alloc_metaspace(mh, bytes);
@@ -318,6 +320,7 @@ static uint8_t *new_metadata(void *pool, unsigned long poolsize,
 
 
 	newmh = (struct metaheader *)((char *)pool + page * getpagesize());
 	newmh = (struct metaheader *)((char *)pool + page * getpagesize());
 	newmh->metalen = getpagesize() - sizeof(*mh);
 	newmh->metalen = getpagesize() - sizeof(*mh);
+	BUILD_ASSERT(FREE == 0);
 	memset(newmh + 1, 0, newmh->metalen);
 	memset(newmh + 1, 0, newmh->metalen);
 
 
 	/* Sew it into linked list */
 	/* Sew it into linked list */
@@ -559,3 +562,91 @@ bool alloc_check(void *pool, unsigned long poolsize)
 	}
 	}
 	return true;
 	return true;
 }
 }
+
+void alloc_visualize(FILE *out, void *pool, unsigned long poolsize)
+{
+	struct metaheader *mh;
+	unsigned long pagebitlen, metadata_pages, count[1<<BITS_PER_PAGE], tot;
+	long i;
+
+	if (poolsize < MIN_SIZE) {
+		fprintf(out, "Pool smaller than %u: no content\n", MIN_SIZE);
+		return;
+	}
+
+	memset(count, 0, sizeof(count));
+	for (i = 0; i < poolsize / getpagesize(); i++)
+		count[get_page_state(pool, i)]++;
+
+	mh = first_mheader(pool, poolsize);
+	pagebitlen = (char *)mh - (char *)pool;
+	fprintf(out, "%lu bytes of page bits: FREE/TAKEN/TAKEN_START/SUBPAGE = %lu/%lu/%lu/%lu\n",
+		pagebitlen, count[0], count[1], count[2], count[3]);
+
+	/* One metadata page for every page of page bits. */
+	metadata_pages = div_up(pagebitlen, getpagesize());
+
+	/* Now do each metadata page. */
+	for (; mh; mh = next_mheader(pool,mh)) {
+		unsigned long free = 0, subpageblocks = 0, len = 0;
+		uint8_t *meta = (uint8_t *)(mh + 1);
+
+		metadata_pages += (sizeof(*mh) + mh->metalen) / getpagesize();
+
+		/* TAKEN tags end a subpage alloc. */
+		for (i = mh->metalen * CHAR_BIT/BITS_PER_PAGE - 1;
+		     i >= 0;
+		     i -= len) {
+			switch (get_page_state(meta, i)) {
+			case FREE:
+				len = 1;
+				free++;
+				break;
+			case TAKEN:
+				/* Skip over this allocated part. */
+				len = BITMAP_METALEN * CHAR_BIT;
+				subpageblocks++;
+				break;
+			default:
+				assert(0);
+			}
+		}
+
+		fprintf(out, "Metadata %lu-%lu: %lu free, %lu subpageblocks, %lu%% density\n",
+			pool_offset(pool, mh),
+			pool_offset(pool, (char *)(mh+1) + mh->metalen),
+			free, subpageblocks,
+			subpageblocks * BITMAP_METALEN * 100
+			/ (free + subpageblocks * BITMAP_METALEN));
+	}
+
+	/* Account for total pages allocated. */
+	tot = (count[1] + count[2] - metadata_pages) * getpagesize();
+
+	fprintf(out, "Total metadata bytes = %lu\n",
+		metadata_pages * getpagesize());
+
+	/* Now do every subpage. */
+	for (i = 0; i < poolsize / getpagesize(); i++) {
+		uint8_t *meta;
+		unsigned int j;
+		if (get_page_state(pool, i) != SUBPAGE)
+			continue;
+
+		memset(count, 0, sizeof(count));
+		meta = get_page_metadata(pool, i);
+		for (j = 0; j < SUBPAGE_METAOFF/BITMAP_GRANULARITY; j++)
+			count[get_page_state(meta, j)]++;
+
+		fprintf(out, "Subpage %lu: "
+			"FREE/TAKEN/TAKEN_START = %lu/%lu/%lu %lu%% density\n",
+			i, count[0], count[1], count[2],
+			((count[1] + count[2]) * BITMAP_GRANULARITY) * 100
+			/ getpagesize());
+		tot += (count[1] + count[2]) * BITMAP_GRANULARITY;
+	}
+
+	/* This is optimistic, since we overalloc in several cases. */
+	fprintf(out, "Best possible allocation density = %lu%%\n",
+		tot * 100 / poolsize);
+}

+ 2 - 0
alloc/alloc.h

@@ -1,5 +1,6 @@
 #ifndef ALLOC_H
 #ifndef ALLOC_H
 #define ALLOC_H
 #define ALLOC_H
+#include <stdio.h>
 #include <stdbool.h>
 #include <stdbool.h>
 
 
 void alloc_init(void *pool, unsigned long poolsize);
 void alloc_init(void *pool, unsigned long poolsize);
@@ -8,4 +9,5 @@ void *alloc_get(void *pool, unsigned long poolsize,
 void alloc_free(void *pool, unsigned long poolsize, void *free);
 void alloc_free(void *pool, unsigned long poolsize, void *free);
 bool alloc_check(void *pool, unsigned long poolsize);
 bool alloc_check(void *pool, unsigned long poolsize);
 
 
+void alloc_visualize(FILE *out, void *pool, unsigned long poolsize);
 #endif /* ALLOC_H */
 #endif /* ALLOC_H */

+ 43 - 1
alloc/test/run.c

@@ -49,7 +49,7 @@ int main(int argc, char *argv[])
 	unsigned int i, num, max_size;
 	unsigned int i, num, max_size;
 	void *p[POOL_SIZE];
 	void *p[POOL_SIZE];
 
 
-	plan_tests(133);
+	plan_tests(139);
 
 
 	/* FIXME: Needs to be page aligned for now. */
 	/* FIXME: Needs to be page aligned for now. */
 	posix_memalign(&mem, getpagesize(), POOL_SIZE);
 	posix_memalign(&mem, getpagesize(), POOL_SIZE);
@@ -93,6 +93,12 @@ int main(int argc, char *argv[])
 			break;
 			break;
 	}
 	}
 
 
+	/* Uncomment this for a more intuitive view of what the
+	 * allocator looks like after all these 1 byte allocs. */
+#if 0
+	alloc_visualize(stderr, mem, POOL_SIZE);
+#endif
+
 	num = i;
 	num = i;
 	/* Can't allocate this many. */
 	/* Can't allocate this many. */
 	ok1(num != POOL_SIZE);
 	ok1(num != POOL_SIZE);
@@ -149,5 +155,41 @@ int main(int argc, char *argv[])
 	alloc_free(mem, POOL_SIZE, p[0]);
 	alloc_free(mem, POOL_SIZE, p[0]);
 	ok1(alloc_check(mem, POOL_SIZE));
 	ok1(alloc_check(mem, POOL_SIZE));
 
 
+	/* Force the testing of split metadata. */
+	alloc_init(mem, POOL_SIZE);
+	for (i = 0; i < POOL_SIZE; i++) {
+		p[i] = alloc_get(mem, POOL_SIZE, getpagesize(), getpagesize());
+		if (!p[i])
+			break;
+	}
+	ok1(alloc_check(mem, POOL_SIZE));
+
+	/* Sort them. */
+	sort(p, i-1, addr_cmp);
+
+	/* Free all but the one next to the metadata. */
+	for (i = 1; p[i]; i++)
+		alloc_free(mem, POOL_SIZE, p[i]);
+	ok1(alloc_check(mem, POOL_SIZE));
+
+	/* Now do a whole heap of subpage allocs. */
+	for (i = 1; i < POOL_SIZE; i++) {
+		p[i] = alloc_get(mem, POOL_SIZE, 1, 1);
+		if (!p[i])
+			break;
+	}
+	ok1(alloc_check(mem, POOL_SIZE));
+
+	/* Free up our page next to metadata, and should be able to alloc */
+	alloc_free(mem, POOL_SIZE, p[0]);
+	ok1(alloc_check(mem, POOL_SIZE));
+	p[0] = alloc_get(mem, POOL_SIZE, 1, 1);
+	ok1(p[0]);
+
+	/* Clean up. */
+	for (i = 0; p[i]; i++)
+		alloc_free(mem, POOL_SIZE, p[i]);
+	ok1(alloc_check(mem, POOL_SIZE));
+
 	return exit_status();
 	return exit_status();
 }
 }