]>
Commit | Line | Data |
---|---|---|
92d8ed8a EW |
1 | /* |
2 | * A wrapper around cbtree which stores oids | |
3 | * May be used to replace oid-array for prefix (abbreviation) matches | |
4 | */ | |
5 | #include "oidtree.h" | |
6 | #include "alloc.h" | |
7 | #include "hash.h" | |
8 | ||
9 | struct oidtree_node { | |
10 | /* n.k[] is used to store "struct object_id" */ | |
11 | struct cb_node n; | |
12 | }; | |
13 | ||
14 | struct oidtree_iter_data { | |
15 | oidtree_iter fn; | |
16 | void *arg; | |
17 | size_t *last_nibble_at; | |
18 | int algo; | |
19 | uint8_t last_byte; | |
20 | }; | |
21 | ||
22 | void oidtree_init(struct oidtree *ot) | |
23 | { | |
24 | cb_init(&ot->tree); | |
25 | mem_pool_init(&ot->mem_pool, 0); | |
26 | } | |
27 | ||
28 | void oidtree_clear(struct oidtree *ot) | |
29 | { | |
30 | if (ot) { | |
31 | mem_pool_discard(&ot->mem_pool, 0); | |
32 | oidtree_init(ot); | |
33 | } | |
34 | } | |
35 | ||
36 | void oidtree_insert(struct oidtree *ot, const struct object_id *oid) | |
37 | { | |
38 | struct oidtree_node *on; | |
39 | ||
40 | if (!oid->algo) | |
41 | BUG("oidtree_insert requires oid->algo"); | |
42 | ||
43 | on = mem_pool_alloc(&ot->mem_pool, sizeof(*on) + sizeof(*oid)); | |
44 | oidcpy_with_padding((struct object_id *)on->n.k, oid); | |
45 | ||
46 | /* | |
47 | * n.b. Current callers won't get us duplicates, here. If a | |
48 | * future caller causes duplicates, there'll be a a small leak | |
49 | * that won't be freed until oidtree_clear. Currently it's not | |
50 | * worth maintaining a free list | |
51 | */ | |
52 | cb_insert(&ot->tree, &on->n, sizeof(*oid)); | |
53 | } | |
54 | ||
55 | ||
56 | int oidtree_contains(struct oidtree *ot, const struct object_id *oid) | |
57 | { | |
58 | struct object_id k; | |
59 | size_t klen = sizeof(k); | |
60 | ||
61 | oidcpy_with_padding(&k, oid); | |
62 | ||
63 | if (oid->algo == GIT_HASH_UNKNOWN) | |
64 | klen -= sizeof(oid->algo); | |
65 | ||
66 | /* cb_lookup relies on memcmp on the struct, so order matters: */ | |
67 | klen += BUILD_ASSERT_OR_ZERO(offsetof(struct object_id, hash) < | |
68 | offsetof(struct object_id, algo)); | |
69 | ||
70 | return cb_lookup(&ot->tree, (const uint8_t *)&k, klen) ? 1 : 0; | |
71 | } | |
72 | ||
73 | static enum cb_next iter(struct cb_node *n, void *arg) | |
74 | { | |
75 | struct oidtree_iter_data *x = arg; | |
76 | const struct object_id *oid = (const struct object_id *)n->k; | |
77 | ||
78 | if (x->algo != GIT_HASH_UNKNOWN && x->algo != oid->algo) | |
79 | return CB_CONTINUE; | |
80 | ||
81 | if (x->last_nibble_at) { | |
82 | if ((oid->hash[*x->last_nibble_at] ^ x->last_byte) & 0xf0) | |
83 | return CB_CONTINUE; | |
84 | } | |
85 | ||
86 | return x->fn(oid, x->arg); | |
87 | } | |
88 | ||
89 | void oidtree_each(struct oidtree *ot, const struct object_id *oid, | |
90 | size_t oidhexsz, oidtree_iter fn, void *arg) | |
91 | { | |
92 | size_t klen = oidhexsz / 2; | |
93 | struct oidtree_iter_data x = { 0 }; | |
94 | assert(oidhexsz <= GIT_MAX_HEXSZ); | |
95 | ||
96 | x.fn = fn; | |
97 | x.arg = arg; | |
98 | x.algo = oid->algo; | |
99 | if (oidhexsz & 1) { | |
100 | x.last_byte = oid->hash[klen]; | |
101 | x.last_nibble_at = &klen; | |
102 | } | |
103 | cb_each(&ot->tree, (const uint8_t *)oid, klen, iter, &x); | |
104 | } |