PostGIS  2.5.7dev-r@@SVN_REVISION@@
uthash.h
Go to the documentation of this file.
1 /*
2 Copyright (c) 2003-2017, Troy D. Hanson http://troydhanson.github.com/uthash/
3 All rights reserved.
4 
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions are met:
7 
8  * Redistributions of source code must retain the above copyright
9  notice, this list of conditions and the following disclaimer.
10 
11 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
12 IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
13 TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
14 PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
15 OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
16 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
17 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
18 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
19 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
20 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
21 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
22 */
23 
24 #ifndef UTHASH_H
25 #define UTHASH_H
26 
27 #define UTHASH_VERSION 2.0.2
28 
29 #include <string.h> /* memcmp,strlen */
30 #include <stddef.h> /* ptrdiff_t */
31 #include <stdlib.h> /* exit() */
32 
33 /* These macros use decltype or the earlier __typeof GNU extension.
34  As decltype is only available in newer compilers (VS2010 or gcc 4.3+
35  when compiling c++ source) this code uses whatever method is needed
36  or, for VS2008 where neither is available, uses casting workarounds. */
37 #if !defined(DECLTYPE) && !defined(NO_DECLTYPE)
38 #if defined(_MSC_VER) /* MS compiler */
39 #if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */
40 #define DECLTYPE(x) (decltype(x))
41 #else /* VS2008 or older (or VS2010 in C mode) */
42 #define NO_DECLTYPE
43 #endif
44 #elif defined(__BORLANDC__) || defined(__ICCARM__) || defined(__LCC__) || defined(__WATCOMC__)
45 #define NO_DECLTYPE
46 #else /* GNU, Sun and other compilers */
47 #define DECLTYPE(x) (__typeof(x))
48 #endif
49 #endif
50 
51 #ifdef NO_DECLTYPE
52 #define DECLTYPE(x)
53 #define DECLTYPE_ASSIGN(dst,src) \
54 do { \
55  char **_da_dst = (char**)(&(dst)); \
56  *_da_dst = (char*)(src); \
57 } while (0)
58 #else
59 #define DECLTYPE_ASSIGN(dst,src) \
60 do { \
61  (dst) = DECLTYPE(dst)(src); \
62 } while (0)
63 #endif
64 
65 /* a number of the hash function use uint32_t which isn't defined on Pre VS2010 */
66 #if defined(_WIN32)
67 #if defined(_MSC_VER) && _MSC_VER >= 1600
68 #include <stdint.h>
69 #elif defined(__WATCOMC__) || defined(__MINGW32__) || defined(__CYGWIN__)
70 #include <stdint.h>
71 #else
72 typedef unsigned int uint32_t;
73 typedef unsigned char uint8_t;
74 #endif
75 #elif defined(__GNUC__) && !defined(__VXWORKS__)
76 #include <stdint.h>
77 #else
78 typedef unsigned int uint32_t;
79 typedef unsigned char uint8_t;
80 #endif
81 
82 #ifndef uthash_fatal
83 #define uthash_fatal(msg) lwerror("uthash: fatal error (out of memory,etc)")
84 #endif
85 #ifndef uthash_malloc
86 #define uthash_malloc(sz) palloc(sz) /* malloc fcn */
87 #endif
88 #ifndef uthash_free
89 #define uthash_free(ptr,sz) pfree(ptr) /* free fcn */
90 #endif
91 #ifndef uthash_strlen
92 #define uthash_strlen(s) strlen(s)
93 #endif
94 #ifndef uthash_memcmp
95 #define uthash_memcmp(a,b,n) memcmp(a,b,n)
96 #endif
97 
98 #ifndef uthash_noexpand_fyi
99 #define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */
100 #endif
101 #ifndef uthash_expand_fyi
102 #define uthash_expand_fyi(tbl) /* can be defined to log expands */
103 #endif
104 
105 /* initial number of buckets */
106 #define HASH_INITIAL_NUM_BUCKETS 32U /* initial number of buckets */
107 #define HASH_INITIAL_NUM_BUCKETS_LOG2 5U /* lg2 of initial number of buckets */
108 #define HASH_BKT_CAPACITY_THRESH 10U /* expand when bucket count reaches */
109 
110 /* calculate the element whose hash handle address is hhp */
111 #define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho)))
112 /* calculate the hash handle from element address elp */
113 #define HH_FROM_ELMT(tbl,elp) ((UT_hash_handle *)(((char*)(elp)) + ((tbl)->hho)))
114 
115 #define HASH_VALUE(keyptr,keylen,hashv) \
116 do { \
117  HASH_FCN(keyptr, keylen, hashv); \
118 } while (0)
119 
120 #define HASH_FIND_BYHASHVALUE(hh,head,keyptr,keylen,hashval,out) \
121 do { \
122  (out) = NULL; \
123  if (head) { \
124  unsigned _hf_bkt; \
125  HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _hf_bkt); \
126  if (HASH_BLOOM_TEST((head)->hh.tbl, hashval) != 0) { \
127  HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], keyptr, keylen, hashval, out); \
128  } \
129  } \
130 } while (0)
131 
132 #define HASH_FIND(hh,head,keyptr,keylen,out) \
133 do { \
134  unsigned _hf_hashv; \
135  HASH_VALUE(keyptr, keylen, _hf_hashv); \
136  HASH_FIND_BYHASHVALUE(hh, head, keyptr, keylen, _hf_hashv, out); \
137 } while (0)
138 
139 #ifdef HASH_BLOOM
140 #define HASH_BLOOM_BITLEN (1UL << HASH_BLOOM)
141 #define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8UL) + (((HASH_BLOOM_BITLEN%8UL)!=0UL) ? 1UL : 0UL)
142 #define HASH_BLOOM_MAKE(tbl) \
143 do { \
144  (tbl)->bloom_nbits = HASH_BLOOM; \
145  (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \
146  if (!((tbl)->bloom_bv)) { uthash_fatal( "out of memory"); } \
147  memset((tbl)->bloom_bv, 0, HASH_BLOOM_BYTELEN); \
148  (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \
149 } while (0)
150 
151 #define HASH_BLOOM_FREE(tbl) \
152 do { \
153  uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \
154 } while (0)
155 
156 #define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8U] |= (1U << ((idx)%8U)))
157 #define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8U] & (1U << ((idx)%8U)))
158 
159 #define HASH_BLOOM_ADD(tbl,hashv) \
160  HASH_BLOOM_BITSET((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1U)))
161 
162 #define HASH_BLOOM_TEST(tbl,hashv) \
163  HASH_BLOOM_BITTEST((tbl)->bloom_bv, (hashv & (uint32_t)((1ULL << (tbl)->bloom_nbits) - 1U)))
164 
165 #else
166 #define HASH_BLOOM_MAKE(tbl)
167 #define HASH_BLOOM_FREE(tbl)
168 #define HASH_BLOOM_ADD(tbl,hashv)
169 #define HASH_BLOOM_TEST(tbl,hashv) (1)
170 #define HASH_BLOOM_BYTELEN 0U
171 #endif
172 
173 #define HASH_MAKE_TABLE(hh,head) \
174 do { \
175  (head)->hh.tbl = (UT_hash_table*)uthash_malloc( \
176  sizeof(UT_hash_table)); \
177  if (!((head)->hh.tbl)) { uthash_fatal( "out of memory"); } \
178  memset((head)->hh.tbl, 0, sizeof(UT_hash_table)); \
179  (head)->hh.tbl->tail = &((head)->hh); \
180  (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \
181  (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \
182  (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \
183  (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \
184  HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
185  if (! (head)->hh.tbl->buckets) { uthash_fatal( "out of memory"); } \
186  memset((head)->hh.tbl->buckets, 0, \
187  HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \
188  HASH_BLOOM_MAKE((head)->hh.tbl); \
189  (head)->hh.tbl->signature = HASH_SIGNATURE; \
190 } while (0)
191 
192 #define HASH_REPLACE_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,replaced,cmpfcn) \
193 do { \
194  (replaced) = NULL; \
195  HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \
196  if (replaced) { \
197  HASH_DELETE(hh, head, replaced); \
198  } \
199  HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn); \
200 } while (0)
201 
202 #define HASH_REPLACE_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add,replaced) \
203 do { \
204  (replaced) = NULL; \
205  HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \
206  if (replaced) { \
207  HASH_DELETE(hh, head, replaced); \
208  } \
209  HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add); \
210 } while (0)
211 
212 #define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \
213 do { \
214  unsigned _hr_hashv; \
215  HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \
216  HASH_REPLACE_BYHASHVALUE(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced); \
217 } while (0)
218 
219 #define HASH_REPLACE_INORDER(hh,head,fieldname,keylen_in,add,replaced,cmpfcn) \
220 do { \
221  unsigned _hr_hashv; \
222  HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \
223  HASH_REPLACE_BYHASHVALUE_INORDER(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced, cmpfcn); \
224 } while (0)
225 
226 #define HASH_APPEND_LIST(hh, head, add) \
227 do { \
228  (add)->hh.next = NULL; \
229  (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \
230  (head)->hh.tbl->tail->next = (add); \
231  (head)->hh.tbl->tail = &((add)->hh); \
232 } while (0)
233 
234 #define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \
235 do { \
236  do { \
237  if (cmpfcn(DECLTYPE(head)(_hs_iter), add) > 0) \
238  break; \
239  } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \
240 } while (0)
241 
242 #ifdef NO_DECLTYPE
243 #undef HASH_AKBI_INNER_LOOP
244 #define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \
245 do { \
246  char *_hs_saved_head = (char*)(head); \
247  do { \
248  DECLTYPE_ASSIGN(head, _hs_iter); \
249  if (cmpfcn(head, add) > 0) { \
250  DECLTYPE_ASSIGN(head, _hs_saved_head); \
251  break; \
252  } \
253  DECLTYPE_ASSIGN(head, _hs_saved_head); \
254  } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \
255 } while (0)
256 #endif
257 
258 #define HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh,head,keyptr,keylen_in,hashval,add,cmpfcn) \
259 do { \
260  unsigned _ha_bkt; \
261  (add)->hh.hashv = (hashval); \
262  (add)->hh.key = (char*) (keyptr); \
263  (add)->hh.keylen = (unsigned) (keylen_in); \
264  if (!(head)) { \
265  (add)->hh.next = NULL; \
266  (add)->hh.prev = NULL; \
267  (head) = (add); \
268  HASH_MAKE_TABLE(hh, head); \
269  } else { \
270  void *_hs_iter = (head); \
271  (add)->hh.tbl = (head)->hh.tbl; \
272  HASH_AKBI_INNER_LOOP(hh, head, add, cmpfcn); \
273  if (_hs_iter) { \
274  (add)->hh.next = _hs_iter; \
275  if (((add)->hh.prev = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev)) { \
276  HH_FROM_ELMT((head)->hh.tbl, (add)->hh.prev)->next = (add); \
277  } else { \
278  (head) = (add); \
279  } \
280  HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev = (add); \
281  } else { \
282  HASH_APPEND_LIST(hh, head, add); \
283  } \
284  } \
285  (head)->hh.tbl->num_items++; \
286  HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \
287  HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], &(add)->hh); \
288  HASH_BLOOM_ADD((head)->hh.tbl, hashval); \
289  HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \
290  HASH_FSCK(hh, head); \
291 } while (0)
292 
293 #define HASH_ADD_KEYPTR_INORDER(hh,head,keyptr,keylen_in,add,cmpfcn) \
294 do { \
295  unsigned _hs_hashv; \
296  HASH_VALUE(keyptr, keylen_in, _hs_hashv); \
297  HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, keyptr, keylen_in, _hs_hashv, add, cmpfcn); \
298 } while (0)
299 
300 #define HASH_ADD_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,cmpfcn) \
301  HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn)
302 
303 #define HASH_ADD_INORDER(hh,head,fieldname,keylen_in,add,cmpfcn) \
304  HASH_ADD_KEYPTR_INORDER(hh, head, &((add)->fieldname), keylen_in, add, cmpfcn)
305 
306 #define HASH_ADD_KEYPTR_BYHASHVALUE(hh,head,keyptr,keylen_in,hashval,add) \
307 do { \
308  unsigned _ha_bkt; \
309  (add)->hh.hashv = (hashval); \
310  (add)->hh.key = (char*) (keyptr); \
311  (add)->hh.keylen = (unsigned) (keylen_in); \
312  if (!(head)) { \
313  (add)->hh.next = NULL; \
314  (add)->hh.prev = NULL; \
315  (head) = (add); \
316  HASH_MAKE_TABLE(hh, head); \
317  } else { \
318  (add)->hh.tbl = (head)->hh.tbl; \
319  HASH_APPEND_LIST(hh, head, add); \
320  } \
321  (head)->hh.tbl->num_items++; \
322  HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \
323  HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], &(add)->hh); \
324  HASH_BLOOM_ADD((head)->hh.tbl, hashval); \
325  HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \
326  HASH_FSCK(hh, head); \
327 } while (0)
328 
329 #define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \
330 do { \
331  unsigned _ha_hashv; \
332  HASH_VALUE(keyptr, keylen_in, _ha_hashv); \
333  HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, keyptr, keylen_in, _ha_hashv, add); \
334 } while (0)
335 
336 #define HASH_ADD_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add) \
337  HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add)
338 
339 #define HASH_ADD(hh,head,fieldname,keylen_in,add) \
340  HASH_ADD_KEYPTR(hh, head, &((add)->fieldname), keylen_in, add)
341 
342 #define HASH_TO_BKT(hashv,num_bkts,bkt) \
343 do { \
344  bkt = ((hashv) & ((num_bkts) - 1U)); \
345 } while (0)
346 
347 /* delete "delptr" from the hash table.
348  * "the usual" patch-up process for the app-order doubly-linked-list.
349  * The use of _hd_hh_del below deserves special explanation.
350  * These used to be expressed using (delptr) but that led to a bug
351  * if someone used the same symbol for the head and deletee, like
352  * HASH_DELETE(hh,users,users);
353  * We want that to work, but by changing the head (users) below
354  * we were forfeiting our ability to further refer to the deletee (users)
355  * in the patch-up process. Solution: use scratch space to
356  * copy the deletee pointer, then the latter references are via that
357  * scratch pointer rather than through the repointed (users) symbol.
358  */
359 #define HASH_DELETE(hh,head,delptr) \
360 do { \
361  struct UT_hash_handle *_hd_hh_del; \
362  if ( ((delptr)->hh.prev == NULL) && ((delptr)->hh.next == NULL) ) { \
363  uthash_free((head)->hh.tbl->buckets, \
364  (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
365  HASH_BLOOM_FREE((head)->hh.tbl); \
366  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
367  head = NULL; \
368  } else { \
369  unsigned _hd_bkt; \
370  _hd_hh_del = &((delptr)->hh); \
371  if ((delptr) == ELMT_FROM_HH((head)->hh.tbl,(head)->hh.tbl->tail)) { \
372  (head)->hh.tbl->tail = \
373  (UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \
374  (head)->hh.tbl->hho); \
375  } \
376  if ((delptr)->hh.prev != NULL) { \
377  ((UT_hash_handle*)((ptrdiff_t)((delptr)->hh.prev) + \
378  (head)->hh.tbl->hho))->next = (delptr)->hh.next; \
379  } else { \
380  DECLTYPE_ASSIGN(head,(delptr)->hh.next); \
381  } \
382  if (_hd_hh_del->next != NULL) { \
383  ((UT_hash_handle*)((ptrdiff_t)_hd_hh_del->next + \
384  (head)->hh.tbl->hho))->prev = \
385  _hd_hh_del->prev; \
386  } \
387  HASH_TO_BKT( _hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \
388  HASH_DEL_IN_BKT(hh,(head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \
389  (head)->hh.tbl->num_items--; \
390  } \
391  HASH_FSCK(hh,head); \
392 } while (0)
393 
394 
395 /* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */
396 #define HASH_FIND_STR(head,findstr,out) \
397  HASH_FIND(hh,head,findstr,(unsigned)uthash_strlen(findstr),out)
398 #define HASH_ADD_STR(head,strfield,add) \
399  HASH_ADD(hh,head,strfield[0],(unsigned)uthash_strlen(add->strfield),add)
400 #define HASH_REPLACE_STR(head,strfield,add,replaced) \
401  HASH_REPLACE(hh,head,strfield[0],(unsigned)uthash_strlen(add->strfield),add,replaced)
402 #define HASH_FIND_INT(head,findint,out) \
403  HASH_FIND(hh,head,findint,sizeof(int),out)
404 #define HASH_ADD_INT(head,intfield,add) \
405  HASH_ADD(hh,head,intfield,sizeof(int),add)
406 #define HASH_REPLACE_INT(head,intfield,add,replaced) \
407  HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced)
408 #define HASH_FIND_PTR(head,findptr,out) \
409  HASH_FIND(hh,head,findptr,sizeof(void *),out)
410 #define HASH_ADD_PTR(head,ptrfield,add) \
411  HASH_ADD(hh,head,ptrfield,sizeof(void *),add)
412 #define HASH_REPLACE_PTR(head,ptrfield,add,replaced) \
413  HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced)
414 #define HASH_DEL(head,delptr) \
415  HASH_DELETE(hh,head,delptr)
416 
417 /* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined.
418  * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined.
419  */
420 #ifdef HASH_DEBUG
421 #define HASH_OOPS(...) do { fprintf(stderr,__VA_ARGS__); exit(-1); } while (0)
422 #define HASH_FSCK(hh,head) \
423 do { \
424  struct UT_hash_handle *_thh; \
425  if (head) { \
426  unsigned _bkt_i; \
427  unsigned _count; \
428  char *_prev; \
429  _count = 0; \
430  for( _bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; _bkt_i++) { \
431  unsigned _bkt_count = 0; \
432  _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \
433  _prev = NULL; \
434  while (_thh) { \
435  if (_prev != (char*)(_thh->hh_prev)) { \
436  HASH_OOPS("invalid hh_prev %p, actual %p\n", \
437  _thh->hh_prev, _prev ); \
438  } \
439  _bkt_count++; \
440  _prev = (char*)(_thh); \
441  _thh = _thh->hh_next; \
442  } \
443  _count += _bkt_count; \
444  if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \
445  HASH_OOPS("invalid bucket count %u, actual %u\n", \
446  (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \
447  } \
448  } \
449  if (_count != (head)->hh.tbl->num_items) { \
450  HASH_OOPS("invalid hh item count %u, actual %u\n", \
451  (head)->hh.tbl->num_items, _count ); \
452  } \
453  /* traverse hh in app order; check next/prev integrity, count */ \
454  _count = 0; \
455  _prev = NULL; \
456  _thh = &(head)->hh; \
457  while (_thh) { \
458  _count++; \
459  if (_prev !=(char*)(_thh->prev)) { \
460  HASH_OOPS("invalid prev %p, actual %p\n", \
461  _thh->prev, _prev ); \
462  } \
463  _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \
464  _thh = ( _thh->next ? (UT_hash_handle*)((char*)(_thh->next) + \
465  (head)->hh.tbl->hho) : NULL ); \
466  } \
467  if (_count != (head)->hh.tbl->num_items) { \
468  HASH_OOPS("invalid app item count %u, actual %u\n", \
469  (head)->hh.tbl->num_items, _count ); \
470  } \
471  } \
472 } while (0)
473 #else
474 #define HASH_FSCK(hh,head)
475 #endif
476 
477 /* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to
478  * the descriptor to which this macro is defined for tuning the hash function.
479  * The app can #include <unistd.h> to get the prototype for write(2). */
480 #ifdef HASH_EMIT_KEYS
481 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \
482 do { \
483  unsigned _klen = fieldlen; \
484  write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \
485  write(HASH_EMIT_KEYS, keyptr, (unsigned long)fieldlen); \
486 } while (0)
487 #else
488 #define HASH_EMIT_KEY(hh,head,keyptr,fieldlen)
489 #endif
490 
491 /* default to Jenkin's hash */
492 
493 #define HASH_FCN HASH_JEN
494 
495 /* The Bernstein hash function, used in Perl prior to v5.6. Note (x<<5+x)=x*33. */
496 #define HASH_BER(key,keylen,hashv) \
497 do { \
498  unsigned _hb_keylen=(unsigned)keylen; \
499  const unsigned char *_hb_key=(const unsigned char*)(key); \
500  (hashv) = 0; \
501  while (_hb_keylen-- != 0U) { \
502  (hashv) = (((hashv) << 5) + (hashv)) + *_hb_key++; \
503  } \
504 } while (0)
505 
506 
507 /* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at
508  * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx */
509 #define HASH_SAX(key,keylen,hashv) \
510 do { \
511  unsigned _sx_i; \
512  const unsigned char *_hs_key=(const unsigned char*)(key); \
513  hashv = 0; \
514  for(_sx_i=0; _sx_i < keylen; _sx_i++) { \
515  hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \
516  } \
517 } while (0)
518 /* FNV-1a variation */
519 #define HASH_FNV(key,keylen,hashv) \
520 do { \
521  unsigned _fn_i; \
522  const unsigned char *_hf_key=(const unsigned char*)(key); \
523  hashv = 2166136261U; \
524  for(_fn_i=0; _fn_i < keylen; _fn_i++) { \
525  hashv = hashv ^ _hf_key[_fn_i]; \
526  hashv = hashv * 16777619U; \
527  } \
528 } while (0)
529 
530 #define HASH_OAT(key,keylen,hashv) \
531 do { \
532  unsigned _ho_i; \
533  const unsigned char *_ho_key=(const unsigned char*)(key); \
534  hashv = 0; \
535  for(_ho_i=0; _ho_i < keylen; _ho_i++) { \
536  hashv += _ho_key[_ho_i]; \
537  hashv += (hashv << 10); \
538  hashv ^= (hashv >> 6); \
539  } \
540  hashv += (hashv << 3); \
541  hashv ^= (hashv >> 11); \
542  hashv += (hashv << 15); \
543 } while (0)
544 
545 #define HASH_JEN_MIX(a,b,c) \
546 do { \
547  a -= b; a -= c; a ^= ( c >> 13 ); \
548  b -= c; b -= a; b ^= ( a << 8 ); \
549  c -= a; c -= b; c ^= ( b >> 13 ); \
550  a -= b; a -= c; a ^= ( c >> 12 ); \
551  b -= c; b -= a; b ^= ( a << 16 ); \
552  c -= a; c -= b; c ^= ( b >> 5 ); \
553  a -= b; a -= c; a ^= ( c >> 3 ); \
554  b -= c; b -= a; b ^= ( a << 10 ); \
555  c -= a; c -= b; c ^= ( b >> 15 ); \
556 } while (0)
557 
558 #define HASH_JEN(key,keylen,hashv) \
559 do { \
560  unsigned _hj_i,_hj_j,_hj_k; \
561  unsigned const char *_hj_key=(unsigned const char*)(key); \
562  hashv = 0xfeedbeefu; \
563  _hj_i = _hj_j = 0x9e3779b9u; \
564  _hj_k = (unsigned)(keylen); \
565  while (_hj_k >= 12U) { \
566  _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \
567  + ( (unsigned)_hj_key[2] << 16 ) \
568  + ( (unsigned)_hj_key[3] << 24 ) ); \
569  _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \
570  + ( (unsigned)_hj_key[6] << 16 ) \
571  + ( (unsigned)_hj_key[7] << 24 ) ); \
572  hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \
573  + ( (unsigned)_hj_key[10] << 16 ) \
574  + ( (unsigned)_hj_key[11] << 24 ) ); \
575  \
576  HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
577  \
578  _hj_key += 12; \
579  _hj_k -= 12U; \
580  } \
581  hashv += (unsigned)(keylen); \
582  switch ( _hj_k ) { \
583  case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); /* FALLTHROUGH */ \
584  case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); /* FALLTHROUGH */ \
585  case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); /* FALLTHROUGH */ \
586  case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); /* FALLTHROUGH */ \
587  case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); /* FALLTHROUGH */ \
588  case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); /* FALLTHROUGH */ \
589  case 5: _hj_j += _hj_key[4]; /* FALLTHROUGH */ \
590  case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); /* FALLTHROUGH */ \
591  case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); /* FALLTHROUGH */ \
592  case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); /* FALLTHROUGH */ \
593  case 1: _hj_i += _hj_key[0]; \
594  } \
595  HASH_JEN_MIX(_hj_i, _hj_j, hashv); \
596 } while (0)
597 
598 /* The Paul Hsieh hash function */
599 #undef get16bits
600 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
601  || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
602 #define get16bits(d) (*((const uint16_t *) (d)))
603 #endif
604 
605 #if !defined (get16bits)
606 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \
607  +(uint32_t)(((const uint8_t *)(d))[0]) )
608 #endif
609 #define HASH_SFH(key,keylen,hashv) \
610 do { \
611  unsigned const char *_sfh_key=(unsigned const char*)(key); \
612  uint32_t _sfh_tmp, _sfh_len = (uint32_t)keylen; \
613  \
614  unsigned _sfh_rem = _sfh_len & 3U; \
615  _sfh_len >>= 2; \
616  hashv = 0xcafebabeu; \
617  \
618  /* Main loop */ \
619  for (;_sfh_len > 0U; _sfh_len--) { \
620  hashv += get16bits (_sfh_key); \
621  _sfh_tmp = ((uint32_t)(get16bits (_sfh_key+2)) << 11) ^ hashv; \
622  hashv = (hashv << 16) ^ _sfh_tmp; \
623  _sfh_key += 2U*sizeof (uint16_t); \
624  hashv += hashv >> 11; \
625  } \
626  \
627  /* Handle end cases */ \
628  switch (_sfh_rem) { \
629  case 3: hashv += get16bits (_sfh_key); \
630  hashv ^= hashv << 16; \
631  hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)]) << 18; \
632  hashv += hashv >> 11; \
633  break; \
634  case 2: hashv += get16bits (_sfh_key); \
635  hashv ^= hashv << 11; \
636  hashv += hashv >> 17; \
637  break; \
638  case 1: hashv += *_sfh_key; \
639  hashv ^= hashv << 10; \
640  hashv += hashv >> 1; \
641  } \
642  \
643  /* Force "avalanching" of final 127 bits */ \
644  hashv ^= hashv << 3; \
645  hashv += hashv >> 5; \
646  hashv ^= hashv << 4; \
647  hashv += hashv >> 17; \
648  hashv ^= hashv << 25; \
649  hashv += hashv >> 6; \
650 } while (0)
651 
652 #ifdef HASH_USING_NO_STRICT_ALIASING
653 /* The MurmurHash exploits some CPU's (x86,x86_64) tolerance for unaligned reads.
654  * For other types of CPU's (e.g. Sparc) an unaligned read causes a bus error.
655  * MurmurHash uses the faster approach only on CPU's where we know it's safe.
656  *
657  * Note the preprocessor built-in defines can be emitted using:
658  *
659  * gcc -m64 -dM -E - < /dev/null (on gcc)
660  * cc -## a.c (where a.c is a simple test file) (Sun Studio)
661  */
662 #if (defined(__i386__) || defined(__x86_64__) || defined(_M_IX86))
663 #define MUR_GETBLOCK(p,i) p[i]
664 #else /* non intel */
665 #define MUR_PLUS0_ALIGNED(p) (((unsigned long)p & 3UL) == 0UL)
666 #define MUR_PLUS1_ALIGNED(p) (((unsigned long)p & 3UL) == 1UL)
667 #define MUR_PLUS2_ALIGNED(p) (((unsigned long)p & 3UL) == 2UL)
668 #define MUR_PLUS3_ALIGNED(p) (((unsigned long)p & 3UL) == 3UL)
669 #define WP(p) ((uint32_t*)((unsigned long)(p) & ~3UL))
670 #if (defined(__BIG_ENDIAN__) || defined(SPARC) || defined(__ppc__) || defined(__ppc64__))
671 #define MUR_THREE_ONE(p) ((((*WP(p))&0x00ffffff) << 8) | (((*(WP(p)+1))&0xff000000) >> 24))
672 #define MUR_TWO_TWO(p) ((((*WP(p))&0x0000ffff) <<16) | (((*(WP(p)+1))&0xffff0000) >> 16))
673 #define MUR_ONE_THREE(p) ((((*WP(p))&0x000000ff) <<24) | (((*(WP(p)+1))&0xffffff00) >> 8))
674 #else /* assume little endian non-intel */
675 #define MUR_THREE_ONE(p) ((((*WP(p))&0xffffff00) >> 8) | (((*(WP(p)+1))&0x000000ff) << 24))
676 #define MUR_TWO_TWO(p) ((((*WP(p))&0xffff0000) >>16) | (((*(WP(p)+1))&0x0000ffff) << 16))
677 #define MUR_ONE_THREE(p) ((((*WP(p))&0xff000000) >>24) | (((*(WP(p)+1))&0x00ffffff) << 8))
678 #endif
679 #define MUR_GETBLOCK(p,i) (MUR_PLUS0_ALIGNED(p) ? ((p)[i]) : \
680  (MUR_PLUS1_ALIGNED(p) ? MUR_THREE_ONE(p) : \
681  (MUR_PLUS2_ALIGNED(p) ? MUR_TWO_TWO(p) : \
682  MUR_ONE_THREE(p))))
683 #endif
684 #define MUR_ROTL32(x,r) (((x) << (r)) | ((x) >> (32 - (r))))
685 #define MUR_FMIX(_h) \
686 do { \
687  _h ^= _h >> 16; \
688  _h *= 0x85ebca6bu; \
689  _h ^= _h >> 13; \
690  _h *= 0xc2b2ae35u; \
691  _h ^= _h >> 16; \
692 } while (0)
693 
694 #define HASH_MUR(key,keylen,hashv) \
695 do { \
696  const uint8_t *_mur_data = (const uint8_t*)(key); \
697  const int _mur_nblocks = (int)(keylen) / 4; \
698  uint32_t _mur_h1 = 0xf88D5353u; \
699  uint32_t _mur_c1 = 0xcc9e2d51u; \
700  uint32_t _mur_c2 = 0x1b873593u; \
701  uint32_t _mur_k1 = 0; \
702  const uint8_t *_mur_tail; \
703  const uint32_t *_mur_blocks = (const uint32_t*)(_mur_data+(_mur_nblocks*4)); \
704  int _mur_i; \
705  for(_mur_i = -_mur_nblocks; _mur_i!=0; _mur_i++) { \
706  _mur_k1 = MUR_GETBLOCK(_mur_blocks,_mur_i); \
707  _mur_k1 *= _mur_c1; \
708  _mur_k1 = MUR_ROTL32(_mur_k1,15); \
709  _mur_k1 *= _mur_c2; \
710  \
711  _mur_h1 ^= _mur_k1; \
712  _mur_h1 = MUR_ROTL32(_mur_h1,13); \
713  _mur_h1 = (_mur_h1*5U) + 0xe6546b64u; \
714  } \
715  _mur_tail = (const uint8_t*)(_mur_data + (_mur_nblocks*4)); \
716  _mur_k1=0; \
717  switch((keylen) & 3U) { \
718  case 3: _mur_k1 ^= (uint32_t)_mur_tail[2] << 16; /* FALLTHROUGH */ \
719  case 2: _mur_k1 ^= (uint32_t)_mur_tail[1] << 8; /* FALLTHROUGH */ \
720  case 1: _mur_k1 ^= (uint32_t)_mur_tail[0]; \
721  _mur_k1 *= _mur_c1; \
722  _mur_k1 = MUR_ROTL32(_mur_k1,15); \
723  _mur_k1 *= _mur_c2; \
724  _mur_h1 ^= _mur_k1; \
725  } \
726  _mur_h1 ^= (uint32_t)(keylen); \
727  MUR_FMIX(_mur_h1); \
728  hashv = _mur_h1; \
729 } while (0)
730 #endif /* HASH_USING_NO_STRICT_ALIASING */
731 
732 /* iterate over items in a known bucket to find desired item */
733 #define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,hashval,out) \
734 do { \
735  if ((head).hh_head != NULL) { \
736  DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (head).hh_head)); \
737  } else { \
738  (out) = NULL; \
739  } \
740  while ((out) != NULL) { \
741  if ((out)->hh.hashv == (hashval) && (out)->hh.keylen == (keylen_in)) { \
742  if (uthash_memcmp((out)->hh.key, keyptr, keylen_in) == 0) { \
743  break; \
744  } \
745  } \
746  if ((out)->hh.hh_next != NULL) { \
747  DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (out)->hh.hh_next)); \
748  } else { \
749  (out) = NULL; \
750  } \
751  } \
752 } while (0)
753 
754 /* add an item to a bucket */
755 #define HASH_ADD_TO_BKT(head,addhh) \
756 do { \
757  head.count++; \
758  (addhh)->hh_next = head.hh_head; \
759  (addhh)->hh_prev = NULL; \
760  if (head.hh_head != NULL) { (head).hh_head->hh_prev = (addhh); } \
761  (head).hh_head=addhh; \
762  if ((head.count >= ((head.expand_mult+1U) * HASH_BKT_CAPACITY_THRESH)) \
763  && ((addhh)->tbl->noexpand != 1U)) { \
764  HASH_EXPAND_BUCKETS((addhh)->tbl); \
765  } \
766 } while (0)
767 
768 /* remove an item from a given bucket */
769 #define HASH_DEL_IN_BKT(hh,head,hh_del) \
770  (head).count--; \
771  if ((head).hh_head == hh_del) { \
772  (head).hh_head = hh_del->hh_next; \
773  } \
774  if (hh_del->hh_prev) { \
775  hh_del->hh_prev->hh_next = hh_del->hh_next; \
776  } \
777  if (hh_del->hh_next) { \
778  hh_del->hh_next->hh_prev = hh_del->hh_prev; \
779  }
780 
781 /* Bucket expansion has the effect of doubling the number of buckets
782  * and redistributing the items into the new buckets. Ideally the
783  * items will distribute more or less evenly into the new buckets
784  * (the extent to which this is true is a measure of the quality of
785  * the hash function as it applies to the key domain).
786  *
787  * With the items distributed into more buckets, the chain length
788  * (item count) in each bucket is reduced. Thus by expanding buckets
789  * the hash keeps a bound on the chain length. This bounded chain
790  * length is the essence of how a hash provides constant time lookup.
791  *
792  * The calculation of tbl->ideal_chain_maxlen below deserves some
793  * explanation. First, keep in mind that we're calculating the ideal
794  * maximum chain length based on the *new* (doubled) bucket count.
795  * In fractions this is just n/b (n=number of items,b=new num buckets).
796  * Since the ideal chain length is an integer, we want to calculate
797  * ceil(n/b). We don't depend on floating point arithmetic in this
798  * hash, so to calculate ceil(n/b) with integers we could write
799  *
800  * ceil(n/b) = (n/b) + ((n%b)?1:0)
801  *
802  * and in fact a previous version of this hash did just that.
803  * But now we have improved things a bit by recognizing that b is
804  * always a power of two. We keep its base 2 log handy (call it lb),
805  * so now we can write this with a bit shift and logical AND:
806  *
807  * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0)
808  *
809  */
810 #define HASH_EXPAND_BUCKETS(tbl) \
811 do { \
812  unsigned _he_bkt; \
813  unsigned _he_bkt_i; \
814  struct UT_hash_handle *_he_thh, *_he_hh_nxt; \
815  UT_hash_bucket *_he_new_buckets, *_he_newbkt; \
816  _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \
817  2UL * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
818  if (!_he_new_buckets) { uthash_fatal( "out of memory"); } \
819  memset(_he_new_buckets, 0, \
820  2UL * tbl->num_buckets * sizeof(struct UT_hash_bucket)); \
821  tbl->ideal_chain_maxlen = \
822  (tbl->num_items >> (tbl->log2_num_buckets+1U)) + \
823  (((tbl->num_items & ((tbl->num_buckets*2U)-1U)) != 0U) ? 1U : 0U); \
824  tbl->nonideal_items = 0; \
825  for(_he_bkt_i = 0; _he_bkt_i < tbl->num_buckets; _he_bkt_i++) \
826  { \
827  _he_thh = tbl->buckets[ _he_bkt_i ].hh_head; \
828  while (_he_thh != NULL) { \
829  _he_hh_nxt = _he_thh->hh_next; \
830  HASH_TO_BKT( _he_thh->hashv, tbl->num_buckets*2U, _he_bkt); \
831  _he_newbkt = &(_he_new_buckets[ _he_bkt ]); \
832  if (++(_he_newbkt->count) > tbl->ideal_chain_maxlen) { \
833  tbl->nonideal_items++; \
834  _he_newbkt->expand_mult = _he_newbkt->count / \
835  tbl->ideal_chain_maxlen; \
836  } \
837  _he_thh->hh_prev = NULL; \
838  _he_thh->hh_next = _he_newbkt->hh_head; \
839  if (_he_newbkt->hh_head != NULL) { _he_newbkt->hh_head->hh_prev = \
840  _he_thh; } \
841  _he_newbkt->hh_head = _he_thh; \
842  _he_thh = _he_hh_nxt; \
843  } \
844  } \
845  uthash_free( tbl->buckets, tbl->num_buckets*sizeof(struct UT_hash_bucket) ); \
846  tbl->num_buckets *= 2U; \
847  tbl->log2_num_buckets++; \
848  tbl->buckets = _he_new_buckets; \
849  tbl->ineff_expands = (tbl->nonideal_items > (tbl->num_items >> 1)) ? \
850  (tbl->ineff_expands+1U) : 0U; \
851  if (tbl->ineff_expands > 1U) { \
852  tbl->noexpand=1; \
853  uthash_noexpand_fyi(tbl); \
854  } \
855  uthash_expand_fyi(tbl); \
856 } while (0)
857 
858 
859 /* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */
860 /* Note that HASH_SORT assumes the hash handle name to be hh.
861  * HASH_SRT was added to allow the hash handle name to be passed in. */
862 #define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn)
863 #define HASH_SRT(hh,head,cmpfcn) \
864 do { \
865  unsigned _hs_i; \
866  unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \
867  struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \
868  if (head != NULL) { \
869  _hs_insize = 1; \
870  _hs_looping = 1; \
871  _hs_list = &((head)->hh); \
872  while (_hs_looping != 0U) { \
873  _hs_p = _hs_list; \
874  _hs_list = NULL; \
875  _hs_tail = NULL; \
876  _hs_nmerges = 0; \
877  while (_hs_p != NULL) { \
878  _hs_nmerges++; \
879  _hs_q = _hs_p; \
880  _hs_psize = 0; \
881  for ( _hs_i = 0; _hs_i < _hs_insize; _hs_i++ ) { \
882  _hs_psize++; \
883  _hs_q = (UT_hash_handle*)((_hs_q->next != NULL) ? \
884  ((void*)((char*)(_hs_q->next) + \
885  (head)->hh.tbl->hho)) : NULL); \
886  if (! (_hs_q) ) { break; } \
887  } \
888  _hs_qsize = _hs_insize; \
889  while ((_hs_psize > 0U) || ((_hs_qsize > 0U) && (_hs_q != NULL))) {\
890  if (_hs_psize == 0U) { \
891  _hs_e = _hs_q; \
892  _hs_q = (UT_hash_handle*)((_hs_q->next != NULL) ? \
893  ((void*)((char*)(_hs_q->next) + \
894  (head)->hh.tbl->hho)) : NULL); \
895  _hs_qsize--; \
896  } else if ( (_hs_qsize == 0U) || (_hs_q == NULL) ) { \
897  _hs_e = _hs_p; \
898  if (_hs_p != NULL){ \
899  _hs_p = (UT_hash_handle*)((_hs_p->next != NULL) ? \
900  ((void*)((char*)(_hs_p->next) + \
901  (head)->hh.tbl->hho)) : NULL); \
902  } \
903  _hs_psize--; \
904  } else if (( \
905  cmpfcn(DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_p)), \
906  DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl,_hs_q))) \
907  ) <= 0) { \
908  _hs_e = _hs_p; \
909  if (_hs_p != NULL){ \
910  _hs_p = (UT_hash_handle*)((_hs_p->next != NULL) ? \
911  ((void*)((char*)(_hs_p->next) + \
912  (head)->hh.tbl->hho)) : NULL); \
913  } \
914  _hs_psize--; \
915  } else { \
916  _hs_e = _hs_q; \
917  _hs_q = (UT_hash_handle*)((_hs_q->next != NULL) ? \
918  ((void*)((char*)(_hs_q->next) + \
919  (head)->hh.tbl->hho)) : NULL); \
920  _hs_qsize--; \
921  } \
922  if ( _hs_tail != NULL ) { \
923  _hs_tail->next = ((_hs_e != NULL) ? \
924  ELMT_FROM_HH((head)->hh.tbl,_hs_e) : NULL); \
925  } else { \
926  _hs_list = _hs_e; \
927  } \
928  if (_hs_e != NULL) { \
929  _hs_e->prev = ((_hs_tail != NULL) ? \
930  ELMT_FROM_HH((head)->hh.tbl,_hs_tail) : NULL); \
931  } \
932  _hs_tail = _hs_e; \
933  } \
934  _hs_p = _hs_q; \
935  } \
936  if (_hs_tail != NULL){ \
937  _hs_tail->next = NULL; \
938  } \
939  if ( _hs_nmerges <= 1U ) { \
940  _hs_looping=0; \
941  (head)->hh.tbl->tail = _hs_tail; \
942  DECLTYPE_ASSIGN(head,ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \
943  } \
944  _hs_insize *= 2U; \
945  } \
946  HASH_FSCK(hh,head); \
947  } \
948 } while (0)
949 
950 /* This function selects items from one hash into another hash.
951  * The end result is that the selected items have dual presence
952  * in both hashes. There is no copy of the items made; rather
953  * they are added into the new hash through a secondary hash
954  * hash handle that must be present in the structure. */
955 #define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \
956 do { \
957  unsigned _src_bkt, _dst_bkt; \
958  void *_last_elt=NULL, *_elt; \
959  UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \
960  ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \
961  if (src != NULL) { \
962  for(_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \
963  for(_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \
964  _src_hh != NULL; \
965  _src_hh = _src_hh->hh_next) { \
966  _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \
967  if (cond(_elt)) { \
968  _dst_hh = (UT_hash_handle*)(((char*)_elt) + _dst_hho); \
969  _dst_hh->key = _src_hh->key; \
970  _dst_hh->keylen = _src_hh->keylen; \
971  _dst_hh->hashv = _src_hh->hashv; \
972  _dst_hh->prev = _last_elt; \
973  _dst_hh->next = NULL; \
974  if (_last_elt_hh != NULL) { _last_elt_hh->next = _elt; } \
975  if (dst == NULL) { \
976  DECLTYPE_ASSIGN(dst,_elt); \
977  HASH_MAKE_TABLE(hh_dst,dst); \
978  } else { \
979  _dst_hh->tbl = (dst)->hh_dst.tbl; \
980  } \
981  HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \
982  HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt],_dst_hh); \
983  (dst)->hh_dst.tbl->num_items++; \
984  _last_elt = _elt; \
985  _last_elt_hh = _dst_hh; \
986  } \
987  } \
988  } \
989  } \
990  HASH_FSCK(hh_dst,dst); \
991 } while (0)
992 
993 #define HASH_CLEAR(hh,head) \
994 do { \
995  if (head != NULL) { \
996  uthash_free((head)->hh.tbl->buckets, \
997  (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \
998  HASH_BLOOM_FREE((head)->hh.tbl); \
999  uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \
1000  (head)=NULL; \
1001  } \
1002 } while (0)
1003 
1004 #define HASH_OVERHEAD(hh,head) \
1005  ((head != NULL) ? ( \
1006  (size_t)(((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \
1007  ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \
1008  sizeof(UT_hash_table) + \
1009  (HASH_BLOOM_BYTELEN))) : 0U)
1010 
1011 #ifdef NO_DECLTYPE
1012 #define HASH_ITER(hh,head,el,tmp) \
1013 for(((el)=(head)), ((*(char**)(&(tmp)))=(char*)((head!=NULL)?(head)->hh.next:NULL)); \
1014  (el) != NULL; ((el)=(tmp)), ((*(char**)(&(tmp)))=(char*)((tmp!=NULL)?(tmp)->hh.next:NULL)))
1015 #else
1016 #define HASH_ITER(hh,head,el,tmp) \
1017 for(((el)=(head)), ((tmp)=DECLTYPE(el)((head!=NULL)?(head)->hh.next:NULL)); \
1018  (el) != NULL; ((el)=(tmp)), ((tmp)=DECLTYPE(el)((tmp!=NULL)?(tmp)->hh.next:NULL)))
1019 #endif
1020 
1021 /* obtain a count of items in the hash */
1022 #define HASH_COUNT(head) HASH_CNT(hh,head)
1023 #define HASH_CNT(hh,head) ((head != NULL)?((head)->hh.tbl->num_items):0U)
1024 
1025 typedef struct UT_hash_bucket {
1027  unsigned count;
1028 
1029  /* expand_mult is normally set to 0. In this situation, the max chain length
1030  * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If
1031  * the bucket's chain exceeds this length, bucket expansion is triggered).
1032  * However, setting expand_mult to a non-zero value delays bucket expansion
1033  * (that would be triggered by additions to this particular bucket)
1034  * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH.
1035  * (The multiplier is simply expand_mult+1). The whole idea of this
1036  * multiplier is to reduce bucket expansions, since they are expensive, in
1037  * situations where we know that a particular bucket tends to be overused.
1038  * It is better to let its chain length grow to a longer yet-still-bounded
1039  * value, than to do an O(n) bucket expansion too often.
1040  */
1041  unsigned expand_mult;
1042 
1044 
1045 /* random signature used only to find hash tables in external analysis */
1046 #define HASH_SIGNATURE 0xa0111fe1u
1047 #define HASH_BLOOM_SIGNATURE 0xb12220f2u
1048 
1049 typedef struct UT_hash_table {
1052  unsigned num_items;
1053  struct UT_hash_handle *tail; /* tail hh in app order, for fast append */
1054  ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */
1055 
1056  /* in an ideal situation (all buckets used equally), no bucket would have
1057  * more than ceil(#items/#buckets) items. that's the ideal chain length. */
1059 
1060  /* nonideal_items is the number of items in the hash whose chain position
1061  * exceeds the ideal chain maxlen. these items pay the penalty for an uneven
1062  * hash distribution; reaching them in a chain traversal takes >ideal steps */
1063  unsigned nonideal_items;
1064 
1065  /* ineffective expands occur when a bucket doubling was performed, but
1066  * afterward, more than half the items in the hash had nonideal chain
1067  * positions. If this happens on two consecutive expansions we inhibit any
1068  * further expansion, as it's not helping; this happens when the hash
1069  * function isn't a good fit for the key domain. When expansion is inhibited
1070  * the hash will still work, albeit no longer in constant time. */
1072 
1073  uint32_t signature; /* used only to find hash tables in external analysis */
1074 #ifdef HASH_BLOOM
1075  uint32_t bloom_sig; /* used only to test bloom exists in external analysis */
1076  uint8_t *bloom_bv;
1077  uint8_t bloom_nbits;
1078 #endif
1079 
1081 
1082 typedef struct UT_hash_handle {
1084  void *prev; /* prev element in app order */
1085  void *next; /* next element in app order */
1086  struct UT_hash_handle *hh_prev; /* previous hh in bucket order */
1087  struct UT_hash_handle *hh_next; /* next hh in bucket order */
1088  void *key; /* ptr to enclosing struct's key */
1089  unsigned keylen; /* enclosing struct's key len */
1090  unsigned hashv; /* result of hash-fcn(key) */
1092 
1093 #endif /* UTHASH_H */
unsigned count
Definition: uthash.h:1027
struct UT_hash_handle * hh_head
Definition: uthash.h:1026
unsigned expand_mult
Definition: uthash.h:1041
struct UT_hash_handle * hh_prev
Definition: uthash.h:1086
void * key
Definition: uthash.h:1088
struct UT_hash_handle * hh_next
Definition: uthash.h:1087
void * next
Definition: uthash.h:1085
unsigned hashv
Definition: uthash.h:1090
void * prev
Definition: uthash.h:1084
struct UT_hash_table * tbl
Definition: uthash.h:1083
unsigned keylen
Definition: uthash.h:1089
struct UT_hash_handle * tail
Definition: uthash.h:1053
UT_hash_bucket * buckets
Definition: uthash.h:1050
unsigned ineff_expands
Definition: uthash.h:1071
unsigned num_buckets
Definition: uthash.h:1051
unsigned ideal_chain_maxlen
Definition: uthash.h:1058
unsigned noexpand
Definition: uthash.h:1071
unsigned num_items
Definition: uthash.h:1052
uint32_t signature
Definition: uthash.h:1073
unsigned nonideal_items
Definition: uthash.h:1063
unsigned log2_num_buckets
Definition: uthash.h:1051
ptrdiff_t hho
Definition: uthash.h:1054
struct UT_hash_bucket UT_hash_bucket
unsigned int uint32_t
Definition: uthash.h:78
struct UT_hash_handle UT_hash_handle
unsigned char uint8_t
Definition: uthash.h:79
struct UT_hash_table UT_hash_table