htable.c 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. #include <ccan/htable/htable.h>
  2. #include <ccan/compiler/compiler.h>
  3. #include <stdint.h>
  4. #include <stdlib.h>
  5. #include <stdbool.h>
  6. #include <assert.h>
  7. /* This means a struct htable takes at least 512 bytes / 1k (32/64 bits). */
  8. #define HTABLE_BASE_BITS 7
  9. /* We use 0x1 as deleted marker. */
  10. #define HTABLE_DELETED (0x1)
  11. struct htable {
  12. size_t (*rehash)(const void *elem, void *priv);
  13. void *priv;
  14. unsigned int bits;
  15. size_t elems, max;
  16. /* These are the bits which are the same in all pointers. */
  17. uintptr_t common_mask, common_bits;
  18. uintptr_t *table;
  19. };
  20. /* We clear out the bits which are always the same, and put metadata there. */
  21. static inline uintptr_t get_extra_ptr_bits(const struct htable *ht,
  22. uintptr_t e)
  23. {
  24. return e & ht->common_mask;
  25. }
  26. static inline void *get_raw_ptr(const struct htable *ht, uintptr_t e)
  27. {
  28. return (void *)((e & ~ht->common_mask) | ht->common_bits);
  29. }
  30. static inline uintptr_t make_hval(const struct htable *ht,
  31. const void *p, uintptr_t bits)
  32. {
  33. return ((uintptr_t)p & ~ht->common_mask) | bits;
  34. }
  35. static inline bool entry_is_valid(uintptr_t e)
  36. {
  37. return e > HTABLE_DELETED;
  38. }
  39. static inline uintptr_t get_hash_ptr_bits(const struct htable *ht,
  40. size_t hash)
  41. {
  42. /* Shuffling the extra bits (as specified in mask) down the
  43. * end is quite expensive. But the lower bits are redundant, so
  44. * we fold the value first. */
  45. return (hash ^ (hash >> ht->bits)) & ht->common_mask;
  46. }
  47. struct htable *htable_new(size_t (*rehash)(const void *elem, void *priv),
  48. void *priv)
  49. {
  50. struct htable *ht = malloc(sizeof(struct htable));
  51. if (ht) {
  52. ht->bits = HTABLE_BASE_BITS;
  53. ht->rehash = rehash;
  54. ht->priv = priv;
  55. ht->elems = 0;
  56. ht->max = (1 << ht->bits) * 3 / 4;
  57. /* This guarantees we enter update_common first add. */
  58. ht->common_mask = -1;
  59. ht->common_bits = 0;
  60. ht->table = calloc(1 << ht->bits, sizeof(uintptr_t));
  61. if (!ht->table) {
  62. free(ht);
  63. ht = NULL;
  64. }
  65. }
  66. return ht;
  67. }
  68. void htable_free(const struct htable *ht)
  69. {
  70. free((void *)ht->table);
  71. free((void *)ht);
  72. }
  73. static size_t hash_bucket(const struct htable *ht, size_t h)
  74. {
  75. return h & ((1 << ht->bits)-1);
  76. }
  77. static void *htable_val(const struct htable *ht,
  78. struct htable_iter *i, size_t hash)
  79. {
  80. uintptr_t h2 = get_hash_ptr_bits(ht, hash);
  81. while (ht->table[i->off]) {
  82. if (ht->table[i->off] != HTABLE_DELETED) {
  83. if (get_extra_ptr_bits(ht, ht->table[i->off]) == h2)
  84. return get_raw_ptr(ht, ht->table[i->off]);
  85. }
  86. i->off = (i->off + 1) & ((1 << ht->bits)-1);
  87. }
  88. return NULL;
  89. }
  90. void *htable_firstval(const struct htable *ht,
  91. struct htable_iter *i, size_t hash)
  92. {
  93. i->off = hash_bucket(ht, hash);
  94. return htable_val(ht, i, hash);
  95. }
  96. void *htable_nextval(const struct htable *ht,
  97. struct htable_iter *i, size_t hash)
  98. {
  99. i->off = (i->off + 1) & ((1 << ht->bits)-1);
  100. return htable_val(ht, i, hash);
  101. }
  102. void *htable_first(const struct htable *ht, struct htable_iter *i)
  103. {
  104. for (i->off = 0; i->off < (size_t)1 << ht->bits; i->off++) {
  105. if (entry_is_valid(ht->table[i->off]))
  106. return get_raw_ptr(ht, ht->table[i->off]);
  107. }
  108. return NULL;
  109. }
  110. void *htable_next(const struct htable *ht, struct htable_iter *i)
  111. {
  112. for (i->off++; i->off < (size_t)1 << ht->bits; i->off++) {
  113. if (entry_is_valid(ht->table[i->off]))
  114. return get_raw_ptr(ht, ht->table[i->off]);
  115. }
  116. return NULL;
  117. }
  118. /* This does not expand the hash table, that's up to caller. */
  119. static void ht_add(struct htable *ht, const void *new, size_t h)
  120. {
  121. size_t i;
  122. i = hash_bucket(ht, h);
  123. while (entry_is_valid(ht->table[i]))
  124. i = (i + 1) & ((1 << ht->bits)-1);
  125. ht->table[i] = make_hval(ht, new, get_hash_ptr_bits(ht, h));
  126. }
  127. static COLD_ATTRIBUTE bool double_table(struct htable *ht)
  128. {
  129. unsigned int i;
  130. size_t oldnum = (size_t)1 << ht->bits;
  131. size_t *oldtable, e;
  132. oldtable = ht->table;
  133. ht->table = calloc(1 << (ht->bits+1), sizeof(size_t));
  134. if (!ht->table) {
  135. ht->table = oldtable;
  136. return false;
  137. }
  138. ht->bits++;
  139. ht->max *= 2;
  140. for (i = 0; i < oldnum; i++) {
  141. if (entry_is_valid(e = oldtable[i])) {
  142. void *p = get_raw_ptr(ht, e);
  143. ht_add(ht, p, ht->rehash(p, ht->priv));
  144. }
  145. }
  146. free(oldtable);
  147. return true;
  148. }
  149. /* We stole some bits, now we need to put them back... */
  150. static COLD_ATTRIBUTE void update_common(struct htable *ht, const void *p)
  151. {
  152. unsigned int i;
  153. uintptr_t maskdiff, bitsdiff;
  154. if (ht->elems == 0) {
  155. ht->common_mask = -1;
  156. ht->common_bits = (uintptr_t)p;
  157. return;
  158. }
  159. /* Find bits which are unequal to old common set. */
  160. maskdiff = ht->common_bits ^ ((uintptr_t)p & ht->common_mask);
  161. /* These are the bits which go there in existing entries. */
  162. bitsdiff = ht->common_bits & maskdiff;
  163. for (i = 0; i < (size_t)1 << ht->bits; i++) {
  164. if (!entry_is_valid(ht->table[i]))
  165. continue;
  166. /* Clear the bits no longer in the mask, set them as
  167. * expected. */
  168. ht->table[i] &= ~maskdiff;
  169. ht->table[i] |= bitsdiff;
  170. }
  171. /* Take away those bits from our mask and set. */
  172. ht->common_mask &= ~maskdiff;
  173. ht->common_bits &= ~maskdiff;
  174. }
  175. bool htable_add(struct htable *ht, size_t hash, const void *p)
  176. {
  177. if (ht->elems+1 > ht->max && !double_table(ht))
  178. return false;
  179. assert(p);
  180. if (((uintptr_t)p & ht->common_mask) != ht->common_bits)
  181. update_common(ht, p);
  182. ht_add(ht, p, hash);
  183. ht->elems++;
  184. return true;
  185. }
  186. /* If every one of the following buckets are DELETED (up to the next unused
  187. one), we can actually mark them all unused. */
  188. static void delete_run(struct htable *ht, unsigned int num)
  189. {
  190. unsigned int i, last = num + 1;
  191. size_t mask = (((size_t)1 << ht->bits)-1);
  192. while (ht->table[last & mask]) {
  193. if (entry_is_valid(ht->table[last & mask]))
  194. return;
  195. last++;
  196. }
  197. /* Now see if we can step backwards to find previous deleted ones. */
  198. for (i = num-1; ht->table[i & mask] == HTABLE_DELETED; i--);
  199. for (i++; i < last; i++)
  200. ht->table[i & ((1 << ht->bits)-1)] = 0;
  201. }
  202. bool htable_del(struct htable *ht, size_t h, const void *p)
  203. {
  204. struct htable_iter i;
  205. void *c;
  206. for (c = htable_firstval(ht,&i,h); c; c = htable_nextval(ht,&i,h)) {
  207. if (c == p) {
  208. htable_delval(ht, &i);
  209. return true;
  210. }
  211. }
  212. return false;
  213. }
  214. void htable_delval(struct htable *ht, struct htable_iter *i)
  215. {
  216. assert(i->off < (size_t)1 << ht->bits);
  217. assert(entry_is_valid(ht->table[i->off]));
  218. ht->elems--;
  219. ht->table[i->off] = HTABLE_DELETED;
  220. delete_run(ht, i->off);
  221. }