]> git.ipfire.org Git - thirdparty/systemd.git/blame_incremental - src/shared/hashmap.c
resolve: make DnsScope::conflict_queue an OrderedHashmap
[thirdparty/systemd.git] / src / shared / hashmap.c
... / ...
CommitLineData
1/*-*- Mode: C; c-basic-offset: 8; indent-tabs-mode: nil -*-*/
2
3/***
4 This file is part of systemd.
5
6 Copyright 2010 Lennart Poettering
7
8 systemd is free software; you can redistribute it and/or modify it
9 under the terms of the GNU Lesser General Public License as published by
10 the Free Software Foundation; either version 2.1 of the License, or
11 (at your option) any later version.
12
13 systemd is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 Lesser General Public License for more details.
17
18 You should have received a copy of the GNU Lesser General Public License
19 along with systemd; If not, see <http://www.gnu.org/licenses/>.
20***/
21
22#include <assert.h>
23#include <stdlib.h>
24#include <string.h>
25#include <errno.h>
26
27#include "util.h"
28#include "hashmap.h"
29#include "macro.h"
30#include "siphash24.h"
31
32#define INITIAL_N_BUCKETS 31
33
34struct hashmap_entry {
35 const void *key;
36 void *value;
37 struct hashmap_entry *bucket_next, *bucket_previous;
38 struct hashmap_entry *iterate_next, *iterate_previous;
39};
40
41struct Hashmap {
42 const struct hash_ops *hash_ops;
43
44 struct hashmap_entry *iterate_list_head, *iterate_list_tail;
45
46 struct hashmap_entry ** buckets;
47 unsigned n_buckets, n_entries;
48
49 uint8_t hash_key[HASH_KEY_SIZE];
50 bool from_pool:1;
51};
52
53struct pool {
54 struct pool *next;
55 unsigned n_tiles;
56 unsigned n_used;
57};
58
59static struct pool *first_hashmap_pool = NULL;
60static void *first_hashmap_tile = NULL;
61
62static struct pool *first_entry_pool = NULL;
63static void *first_entry_tile = NULL;
64
65static void* allocate_tile(struct pool **first_pool, void **first_tile, size_t tile_size, unsigned at_least) {
66 unsigned i;
67
68 /* When a tile is released we add it to the list and simply
69 * place the next pointer at its offset 0. */
70
71 assert(tile_size >= sizeof(void*));
72 assert(at_least > 0);
73
74 if (*first_tile) {
75 void *r;
76
77 r = *first_tile;
78 *first_tile = * (void**) (*first_tile);
79 return r;
80 }
81
82 if (_unlikely_(!*first_pool) || _unlikely_((*first_pool)->n_used >= (*first_pool)->n_tiles)) {
83 unsigned n;
84 size_t size;
85 struct pool *p;
86
87 n = *first_pool ? (*first_pool)->n_tiles : 0;
88 n = MAX(at_least, n * 2);
89 size = PAGE_ALIGN(ALIGN(sizeof(struct pool)) + n*tile_size);
90 n = (size - ALIGN(sizeof(struct pool))) / tile_size;
91
92 p = malloc(size);
93 if (!p)
94 return NULL;
95
96 p->next = *first_pool;
97 p->n_tiles = n;
98 p->n_used = 0;
99
100 *first_pool = p;
101 }
102
103 i = (*first_pool)->n_used++;
104
105 return ((uint8_t*) (*first_pool)) + ALIGN(sizeof(struct pool)) + i*tile_size;
106}
107
108static void deallocate_tile(void **first_tile, void *p) {
109 * (void**) p = *first_tile;
110 *first_tile = p;
111}
112
113#ifdef VALGRIND
114
115static void drop_pool(struct pool *p) {
116 while (p) {
117 struct pool *n;
118 n = p->next;
119 free(p);
120 p = n;
121 }
122}
123
124__attribute__((destructor)) static void cleanup_pool(void) {
125 /* Be nice to valgrind */
126
127 drop_pool(first_hashmap_pool);
128 drop_pool(first_entry_pool);
129}
130
131#endif
132
133unsigned long string_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
134 uint64_t u;
135 siphash24((uint8_t*) &u, p, strlen(p), hash_key);
136 return (unsigned long) u;
137}
138
139int string_compare_func(const void *a, const void *b) {
140 return strcmp(a, b);
141}
142
143const struct hash_ops string_hash_ops = {
144 .hash = string_hash_func,
145 .compare = string_compare_func
146};
147
148unsigned long trivial_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
149 uint64_t u;
150 siphash24((uint8_t*) &u, &p, sizeof(p), hash_key);
151 return (unsigned long) u;
152}
153
154int trivial_compare_func(const void *a, const void *b) {
155 return a < b ? -1 : (a > b ? 1 : 0);
156}
157
158const struct hash_ops trivial_hash_ops = {
159 .hash = trivial_hash_func,
160 .compare = trivial_compare_func
161};
162
163unsigned long uint64_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
164 uint64_t u;
165 siphash24((uint8_t*) &u, p, sizeof(uint64_t), hash_key);
166 return (unsigned long) u;
167}
168
169int uint64_compare_func(const void *_a, const void *_b) {
170 uint64_t a, b;
171 a = *(const uint64_t*) _a;
172 b = *(const uint64_t*) _b;
173 return a < b ? -1 : (a > b ? 1 : 0);
174}
175
176const struct hash_ops uint64_hash_ops = {
177 .hash = uint64_hash_func,
178 .compare = uint64_compare_func
179};
180
181#if SIZEOF_DEV_T != 8
182unsigned long devt_hash_func(const void *p, const uint8_t hash_key[HASH_KEY_SIZE]) {
183 uint64_t u;
184 siphash24((uint8_t*) &u, p, sizeof(dev_t), hash_key);
185 return (unsigned long) u;
186}
187
188int devt_compare_func(const void *_a, const void *_b) {
189 dev_t a, b;
190 a = *(const dev_t*) _a;
191 b = *(const dev_t*) _b;
192 return a < b ? -1 : (a > b ? 1 : 0);
193}
194
195const struct hash_ops devt_hash_ops = {
196 .hash = devt_hash_func,
197 .compare = devt_compare_func
198};
199#endif
200
201static unsigned bucket_hash(Hashmap *h, const void *p) {
202 return (unsigned) (h->hash_ops->hash(p, h->hash_key) % h->n_buckets);
203}
204
205static void get_hash_key(uint8_t hash_key[HASH_KEY_SIZE], bool reuse_is_ok) {
206 static uint8_t current[HASH_KEY_SIZE];
207 static bool current_initialized = false;
208
209 /* Returns a hash function key to use. In order to keep things
210 * fast we will not generate a new key each time we allocate a
211 * new hash table. Instead, we'll just reuse the most recently
212 * generated one, except if we never generated one or when we
213 * are rehashing an entire hash table because we reached a
214 * fill level */
215
216 if (!current_initialized || !reuse_is_ok) {
217 random_bytes(current, sizeof(current));
218 current_initialized = true;
219 }
220
221 memcpy(hash_key, current, sizeof(current));
222}
223
224Hashmap *hashmap_new(const struct hash_ops *hash_ops) {
225 bool b;
226 Hashmap *h;
227 size_t size;
228
229 b = is_main_thread();
230
231 size = ALIGN(sizeof(Hashmap)) + INITIAL_N_BUCKETS * sizeof(struct hashmap_entry*);
232
233 if (b) {
234 h = allocate_tile(&first_hashmap_pool, &first_hashmap_tile, size, 8);
235 if (!h)
236 return NULL;
237
238 memzero(h, size);
239 } else {
240 h = malloc0(size);
241
242 if (!h)
243 return NULL;
244 }
245
246 h->hash_ops = hash_ops ? hash_ops : &trivial_hash_ops;
247
248 h->n_buckets = INITIAL_N_BUCKETS;
249 h->n_entries = 0;
250 h->iterate_list_head = h->iterate_list_tail = NULL;
251
252 h->buckets = (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap)));
253
254 h->from_pool = b;
255
256 get_hash_key(h->hash_key, true);
257
258 return h;
259}
260
261int hashmap_ensure_allocated(Hashmap **h, const struct hash_ops *hash_ops) {
262 Hashmap *q;
263
264 assert(h);
265
266 if (*h)
267 return 0;
268
269 q = hashmap_new(hash_ops);
270 if (!q)
271 return -ENOMEM;
272
273 *h = q;
274 return 0;
275}
276
277static void link_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
278 assert(h);
279 assert(e);
280
281 /* Insert into hash table */
282 e->bucket_next = h->buckets[hash];
283 e->bucket_previous = NULL;
284 if (h->buckets[hash])
285 h->buckets[hash]->bucket_previous = e;
286 h->buckets[hash] = e;
287
288 /* Insert into iteration list */
289 e->iterate_previous = h->iterate_list_tail;
290 e->iterate_next = NULL;
291 if (h->iterate_list_tail) {
292 assert(h->iterate_list_head);
293 h->iterate_list_tail->iterate_next = e;
294 } else {
295 assert(!h->iterate_list_head);
296 h->iterate_list_head = e;
297 }
298 h->iterate_list_tail = e;
299
300 h->n_entries++;
301 assert(h->n_entries >= 1);
302}
303
304static void unlink_entry(Hashmap *h, struct hashmap_entry *e, unsigned hash) {
305 assert(h);
306 assert(e);
307
308 /* Remove from iteration list */
309 if (e->iterate_next)
310 e->iterate_next->iterate_previous = e->iterate_previous;
311 else
312 h->iterate_list_tail = e->iterate_previous;
313
314 if (e->iterate_previous)
315 e->iterate_previous->iterate_next = e->iterate_next;
316 else
317 h->iterate_list_head = e->iterate_next;
318
319 /* Remove from hash table bucket list */
320 if (e->bucket_next)
321 e->bucket_next->bucket_previous = e->bucket_previous;
322
323 if (e->bucket_previous)
324 e->bucket_previous->bucket_next = e->bucket_next;
325 else
326 h->buckets[hash] = e->bucket_next;
327
328 assert(h->n_entries >= 1);
329 h->n_entries--;
330}
331
332static void remove_entry(Hashmap *h, struct hashmap_entry *e) {
333 unsigned hash;
334
335 assert(h);
336 assert(e);
337
338 hash = bucket_hash(h, e->key);
339 unlink_entry(h, e, hash);
340
341 if (h->from_pool)
342 deallocate_tile(&first_entry_tile, e);
343 else
344 free(e);
345}
346
347void hashmap_free(Hashmap*h) {
348
349 /* Free the hashmap, but nothing in it */
350
351 if (!h)
352 return;
353
354 hashmap_clear(h);
355
356 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
357 free(h->buckets);
358
359 if (h->from_pool)
360 deallocate_tile(&first_hashmap_tile, h);
361 else
362 free(h);
363}
364
365void hashmap_free_free(Hashmap *h) {
366
367 /* Free the hashmap and all data objects in it, but not the
368 * keys */
369
370 if (!h)
371 return;
372
373 hashmap_clear_free(h);
374 hashmap_free(h);
375}
376
377void hashmap_free_free_free(Hashmap *h) {
378
379 /* Free the hashmap and all data and key objects in it */
380
381 if (!h)
382 return;
383
384 hashmap_clear_free_free(h);
385 hashmap_free(h);
386}
387
388void hashmap_clear(Hashmap *h) {
389 if (!h)
390 return;
391
392 while (h->iterate_list_head)
393 remove_entry(h, h->iterate_list_head);
394}
395
396void hashmap_clear_free(Hashmap *h) {
397 void *p;
398
399 if (!h)
400 return;
401
402 while ((p = hashmap_steal_first(h)))
403 free(p);
404}
405
406void hashmap_clear_free_free(Hashmap *h) {
407 if (!h)
408 return;
409
410 while (h->iterate_list_head) {
411 void *a, *b;
412
413 a = h->iterate_list_head->value;
414 b = (void*) h->iterate_list_head->key;
415 remove_entry(h, h->iterate_list_head);
416 free(a);
417 free(b);
418 }
419}
420
421static struct hashmap_entry *hash_scan(Hashmap *h, unsigned hash, const void *key) {
422 struct hashmap_entry *e;
423 assert(h);
424 assert(hash < h->n_buckets);
425
426 for (e = h->buckets[hash]; e; e = e->bucket_next)
427 if (h->hash_ops->compare(e->key, key) == 0)
428 return e;
429
430 return NULL;
431}
432
433static bool resize_buckets(Hashmap *h) {
434 struct hashmap_entry **n, *i;
435 unsigned m;
436 uint8_t nkey[HASH_KEY_SIZE];
437
438 assert(h);
439
440 if (_likely_(h->n_entries*4 < h->n_buckets*3))
441 return false;
442
443 /* Increase by four */
444 m = (h->n_entries+1)*4-1;
445
446 /* If we hit OOM we simply risk packed hashmaps... */
447 n = new0(struct hashmap_entry*, m);
448 if (!n)
449 return false;
450
451 /* Let's use a different randomized hash key for the
452 * extension, so that people cannot guess what we are using
453 * here forever */
454 get_hash_key(nkey, false);
455
456 for (i = h->iterate_list_head; i; i = i->iterate_next) {
457 unsigned long old_bucket, new_bucket;
458
459 old_bucket = h->hash_ops->hash(i->key, h->hash_key) % h->n_buckets;
460
461 /* First, drop from old bucket table */
462 if (i->bucket_next)
463 i->bucket_next->bucket_previous = i->bucket_previous;
464
465 if (i->bucket_previous)
466 i->bucket_previous->bucket_next = i->bucket_next;
467 else
468 h->buckets[old_bucket] = i->bucket_next;
469
470 /* Then, add to new backet table */
471 new_bucket = h->hash_ops->hash(i->key, nkey) % m;
472
473 i->bucket_next = n[new_bucket];
474 i->bucket_previous = NULL;
475 if (n[new_bucket])
476 n[new_bucket]->bucket_previous = i;
477 n[new_bucket] = i;
478 }
479
480 if (h->buckets != (struct hashmap_entry**) ((uint8_t*) h + ALIGN(sizeof(Hashmap))))
481 free(h->buckets);
482
483 h->buckets = n;
484 h->n_buckets = m;
485
486 memcpy(h->hash_key, nkey, HASH_KEY_SIZE);
487
488 return true;
489}
490
491static int __hashmap_put(Hashmap *h, const void *key, void *value, unsigned hash) {
492 /* For when we know no such entry exists yet */
493
494 struct hashmap_entry *e;
495
496 if (resize_buckets(h))
497 hash = bucket_hash(h, key);
498
499 if (h->from_pool)
500 e = allocate_tile(&first_entry_pool, &first_entry_tile, sizeof(struct hashmap_entry), 64U);
501 else
502 e = new(struct hashmap_entry, 1);
503
504 if (!e)
505 return -ENOMEM;
506
507 e->key = key;
508 e->value = value;
509
510 link_entry(h, e, hash);
511
512 return 1;
513}
514
515int hashmap_put(Hashmap *h, const void *key, void *value) {
516 struct hashmap_entry *e;
517 unsigned hash;
518
519 assert(h);
520
521 hash = bucket_hash(h, key);
522 e = hash_scan(h, hash, key);
523 if (e) {
524 if (e->value == value)
525 return 0;
526 return -EEXIST;
527 }
528
529 return __hashmap_put(h, key, value, hash);
530}
531
532int hashmap_replace(Hashmap *h, const void *key, void *value) {
533 struct hashmap_entry *e;
534 unsigned hash;
535
536 assert(h);
537
538 hash = bucket_hash(h, key);
539 e = hash_scan(h, hash, key);
540 if (e) {
541 e->key = key;
542 e->value = value;
543 return 0;
544 }
545
546 return __hashmap_put(h, key, value, hash);
547}
548
549int hashmap_update(Hashmap *h, const void *key, void *value) {
550 struct hashmap_entry *e;
551 unsigned hash;
552
553 assert(h);
554
555 hash = bucket_hash(h, key);
556 e = hash_scan(h, hash, key);
557 if (!e)
558 return -ENOENT;
559
560 e->value = value;
561 return 0;
562}
563
564void* hashmap_get(Hashmap *h, const void *key) {
565 unsigned hash;
566 struct hashmap_entry *e;
567
568 if (!h)
569 return NULL;
570
571 hash = bucket_hash(h, key);
572 e = hash_scan(h, hash, key);
573 if (!e)
574 return NULL;
575
576 return e->value;
577}
578
579void* hashmap_get2(Hashmap *h, const void *key, void **key2) {
580 unsigned hash;
581 struct hashmap_entry *e;
582
583 if (!h)
584 return NULL;
585
586 hash = bucket_hash(h, key);
587 e = hash_scan(h, hash, key);
588 if (!e)
589 return NULL;
590
591 if (key2)
592 *key2 = (void*) e->key;
593
594 return e->value;
595}
596
597bool hashmap_contains(Hashmap *h, const void *key) {
598 unsigned hash;
599
600 if (!h)
601 return false;
602
603 hash = bucket_hash(h, key);
604 return !!hash_scan(h, hash, key);
605}
606
607void* hashmap_remove(Hashmap *h, const void *key) {
608 struct hashmap_entry *e;
609 unsigned hash;
610 void *data;
611
612 if (!h)
613 return NULL;
614
615 hash = bucket_hash(h, key);
616 e = hash_scan(h, hash, key);
617 if (!e)
618 return NULL;
619
620 data = e->value;
621 remove_entry(h, e);
622
623 return data;
624}
625
626void* hashmap_remove2(Hashmap *h, const void *key, void **rkey) {
627 struct hashmap_entry *e;
628 unsigned hash;
629 void *data;
630
631 if (!h) {
632 if (rkey)
633 *rkey = NULL;
634 return NULL;
635 }
636
637 hash = bucket_hash(h, key);
638 e = hash_scan(h, hash, key);
639 if (!e) {
640 if (rkey)
641 *rkey = NULL;
642 return NULL;
643 }
644
645 data = e->value;
646 if (rkey)
647 *rkey = (void*) e->key;
648
649 remove_entry(h, e);
650
651 return data;
652}
653
654int hashmap_remove_and_put(Hashmap *h, const void *old_key, const void *new_key, void *value) {
655 struct hashmap_entry *e;
656 unsigned old_hash, new_hash;
657
658 if (!h)
659 return -ENOENT;
660
661 old_hash = bucket_hash(h, old_key);
662 e = hash_scan(h, old_hash, old_key);
663 if (!e)
664 return -ENOENT;
665
666 new_hash = bucket_hash(h, new_key);
667 if (hash_scan(h, new_hash, new_key))
668 return -EEXIST;
669
670 unlink_entry(h, e, old_hash);
671
672 e->key = new_key;
673 e->value = value;
674
675 link_entry(h, e, new_hash);
676
677 return 0;
678}
679
680int hashmap_remove_and_replace(Hashmap *h, const void *old_key, const void *new_key, void *value) {
681 struct hashmap_entry *e, *k;
682 unsigned old_hash, new_hash;
683
684 if (!h)
685 return -ENOENT;
686
687 old_hash = bucket_hash(h, old_key);
688 e = hash_scan(h, old_hash, old_key);
689 if (!e)
690 return -ENOENT;
691
692 new_hash = bucket_hash(h, new_key);
693 k = hash_scan(h, new_hash, new_key);
694 if (k)
695 if (e != k)
696 remove_entry(h, k);
697
698 unlink_entry(h, e, old_hash);
699
700 e->key = new_key;
701 e->value = value;
702
703 link_entry(h, e, new_hash);
704
705 return 0;
706}
707
708void* hashmap_remove_value(Hashmap *h, const void *key, void *value) {
709 struct hashmap_entry *e;
710 unsigned hash;
711
712 if (!h)
713 return NULL;
714
715 hash = bucket_hash(h, key);
716
717 e = hash_scan(h, hash, key);
718 if (!e)
719 return NULL;
720
721 if (e->value != value)
722 return NULL;
723
724 remove_entry(h, e);
725
726 return value;
727}
728
729void *hashmap_iterate(Hashmap *h, Iterator *i, const void **key) {
730 struct hashmap_entry *e;
731
732 assert(i);
733
734 if (!h)
735 goto at_end;
736
737 if (*i == ITERATOR_LAST)
738 goto at_end;
739
740 if (*i == ITERATOR_FIRST && !h->iterate_list_head)
741 goto at_end;
742
743 e = *i == ITERATOR_FIRST ? h->iterate_list_head : (struct hashmap_entry*) *i;
744
745 if (e->iterate_next)
746 *i = (Iterator) e->iterate_next;
747 else
748 *i = ITERATOR_LAST;
749
750 if (key)
751 *key = e->key;
752
753 return e->value;
754
755at_end:
756 *i = ITERATOR_LAST;
757
758 if (key)
759 *key = NULL;
760
761 return NULL;
762}
763
764void* hashmap_first(Hashmap *h) {
765
766 if (!h)
767 return NULL;
768
769 if (!h->iterate_list_head)
770 return NULL;
771
772 return h->iterate_list_head->value;
773}
774
775void* hashmap_first_key(Hashmap *h) {
776
777 if (!h)
778 return NULL;
779
780 if (!h->iterate_list_head)
781 return NULL;
782
783 return (void*) h->iterate_list_head->key;
784}
785
786void* hashmap_steal_first(Hashmap *h) {
787 void *data;
788
789 if (!h)
790 return NULL;
791
792 if (!h->iterate_list_head)
793 return NULL;
794
795 data = h->iterate_list_head->value;
796 remove_entry(h, h->iterate_list_head);
797
798 return data;
799}
800
801void* hashmap_steal_first_key(Hashmap *h) {
802 void *key;
803
804 if (!h)
805 return NULL;
806
807 if (!h->iterate_list_head)
808 return NULL;
809
810 key = (void*) h->iterate_list_head->key;
811 remove_entry(h, h->iterate_list_head);
812
813 return key;
814}
815
816unsigned hashmap_size(Hashmap *h) {
817
818 if (!h)
819 return 0;
820
821 return h->n_entries;
822}
823
824unsigned hashmap_buckets(Hashmap *h) {
825
826 if (!h)
827 return 0;
828
829 return h->n_buckets;
830}
831
832bool hashmap_isempty(Hashmap *h) {
833
834 if (!h)
835 return true;
836
837 return h->n_entries == 0;
838}
839
840int hashmap_merge(Hashmap *h, Hashmap *other) {
841 struct hashmap_entry *e;
842
843 assert(h);
844
845 if (!other)
846 return 0;
847
848 for (e = other->iterate_list_head; e; e = e->iterate_next) {
849 int r;
850
851 r = hashmap_put(h, e->key, e->value);
852 if (r < 0 && r != -EEXIST)
853 return r;
854 }
855
856 return 0;
857}
858
859void hashmap_move(Hashmap *h, Hashmap *other) {
860 struct hashmap_entry *e, *n;
861
862 assert(h);
863
864 /* The same as hashmap_merge(), but every new item from other
865 * is moved to h. This function is guaranteed to succeed. */
866
867 if (!other)
868 return;
869
870 for (e = other->iterate_list_head; e; e = n) {
871 unsigned h_hash, other_hash;
872
873 n = e->iterate_next;
874
875 h_hash = bucket_hash(h, e->key);
876 if (hash_scan(h, h_hash, e->key))
877 continue;
878
879 other_hash = bucket_hash(other, e->key);
880 unlink_entry(other, e, other_hash);
881 link_entry(h, e, h_hash);
882 }
883}
884
885int hashmap_move_one(Hashmap *h, Hashmap *other, const void *key) {
886 unsigned h_hash, other_hash;
887 struct hashmap_entry *e;
888
889 assert(h);
890
891 h_hash = bucket_hash(h, key);
892 if (hash_scan(h, h_hash, key))
893 return -EEXIST;
894
895 if (!other)
896 return -ENOENT;
897
898 other_hash = bucket_hash(other, key);
899 e = hash_scan(other, other_hash, key);
900 if (!e)
901 return -ENOENT;
902
903 unlink_entry(other, e, other_hash);
904 link_entry(h, e, h_hash);
905
906 return 0;
907}
908
909Hashmap *hashmap_copy(Hashmap *h) {
910 Hashmap *copy;
911
912 assert(h);
913
914 copy = hashmap_new(h->hash_ops);
915 if (!copy)
916 return NULL;
917
918 if (hashmap_merge(copy, h) < 0) {
919 hashmap_free(copy);
920 return NULL;
921 }
922
923 return copy;
924}
925
926char **hashmap_get_strv(Hashmap *h) {
927 char **sv;
928 Iterator it;
929 char *item;
930 int n;
931
932 sv = new(char*, h->n_entries+1);
933 if (!sv)
934 return NULL;
935
936 n = 0;
937 HASHMAP_FOREACH(item, h, it)
938 sv[n++] = item;
939 sv[n] = NULL;
940
941 return sv;
942}
943
944void *hashmap_next(Hashmap *h, const void *key) {
945 unsigned hash;
946 struct hashmap_entry *e;
947
948 assert(key);
949
950 if (!h)
951 return NULL;
952
953 hash = bucket_hash(h, key);
954 e = hash_scan(h, hash, key);
955 if (!e)
956 return NULL;
957
958 e = e->iterate_next;
959 if (!e)
960 return NULL;
961
962 return e->value;
963}