]>
Commit | Line | Data |
---|---|---|
92d8ed8a EW |
1 | /* |
2 | * A wrapper around cbtree which stores oids | |
3 | * May be used to replace oid-array for prefix (abbreviation) matches | |
4 | */ | |
5 | #include "oidtree.h" | |
6 | #include "alloc.h" | |
7 | #include "hash.h" | |
8 | ||
92d8ed8a EW |
9 | struct oidtree_iter_data { |
10 | oidtree_iter fn; | |
11 | void *arg; | |
12 | size_t *last_nibble_at; | |
13 | int algo; | |
14 | uint8_t last_byte; | |
15 | }; | |
16 | ||
17 | void oidtree_init(struct oidtree *ot) | |
18 | { | |
19 | cb_init(&ot->tree); | |
20 | mem_pool_init(&ot->mem_pool, 0); | |
21 | } | |
22 | ||
23 | void oidtree_clear(struct oidtree *ot) | |
24 | { | |
25 | if (ot) { | |
26 | mem_pool_discard(&ot->mem_pool, 0); | |
27 | oidtree_init(ot); | |
28 | } | |
29 | } | |
30 | ||
31 | void oidtree_insert(struct oidtree *ot, const struct object_id *oid) | |
32 | { | |
14825944 | 33 | struct cb_node *on; |
8bcda98d | 34 | struct object_id k; |
92d8ed8a EW |
35 | |
36 | if (!oid->algo) | |
37 | BUG("oidtree_insert requires oid->algo"); | |
38 | ||
39 | on = mem_pool_alloc(&ot->mem_pool, sizeof(*on) + sizeof(*oid)); | |
8bcda98d RS |
40 | |
41 | /* | |
42 | * Clear the padding and copy the result in separate steps to | |
43 | * respect the 4-byte alignment needed by struct object_id. | |
44 | */ | |
45 | oidcpy_with_padding(&k, oid); | |
46 | memcpy(on->k, &k, sizeof(k)); | |
92d8ed8a EW |
47 | |
48 | /* | |
49 | * n.b. Current callers won't get us duplicates, here. If a | |
50 | * future caller causes duplicates, there'll be a a small leak | |
51 | * that won't be freed until oidtree_clear. Currently it's not | |
52 | * worth maintaining a free list | |
53 | */ | |
14825944 | 54 | cb_insert(&ot->tree, on, sizeof(*oid)); |
92d8ed8a EW |
55 | } |
56 | ||
57 | ||
58 | int oidtree_contains(struct oidtree *ot, const struct object_id *oid) | |
59 | { | |
60 | struct object_id k; | |
61 | size_t klen = sizeof(k); | |
62 | ||
63 | oidcpy_with_padding(&k, oid); | |
64 | ||
65 | if (oid->algo == GIT_HASH_UNKNOWN) | |
66 | klen -= sizeof(oid->algo); | |
67 | ||
68 | /* cb_lookup relies on memcmp on the struct, so order matters: */ | |
69 | klen += BUILD_ASSERT_OR_ZERO(offsetof(struct object_id, hash) < | |
70 | offsetof(struct object_id, algo)); | |
71 | ||
72 | return cb_lookup(&ot->tree, (const uint8_t *)&k, klen) ? 1 : 0; | |
73 | } | |
74 | ||
75 | static enum cb_next iter(struct cb_node *n, void *arg) | |
76 | { | |
77 | struct oidtree_iter_data *x = arg; | |
8bcda98d RS |
78 | struct object_id k; |
79 | ||
80 | /* Copy to provide 4-byte alignment needed by struct object_id. */ | |
81 | memcpy(&k, n->k, sizeof(k)); | |
92d8ed8a | 82 | |
8bcda98d | 83 | if (x->algo != GIT_HASH_UNKNOWN && x->algo != k.algo) |
92d8ed8a EW |
84 | return CB_CONTINUE; |
85 | ||
86 | if (x->last_nibble_at) { | |
8bcda98d | 87 | if ((k.hash[*x->last_nibble_at] ^ x->last_byte) & 0xf0) |
92d8ed8a EW |
88 | return CB_CONTINUE; |
89 | } | |
90 | ||
8bcda98d | 91 | return x->fn(&k, x->arg); |
92d8ed8a EW |
92 | } |
93 | ||
94 | void oidtree_each(struct oidtree *ot, const struct object_id *oid, | |
95 | size_t oidhexsz, oidtree_iter fn, void *arg) | |
96 | { | |
97 | size_t klen = oidhexsz / 2; | |
98 | struct oidtree_iter_data x = { 0 }; | |
99 | assert(oidhexsz <= GIT_MAX_HEXSZ); | |
100 | ||
101 | x.fn = fn; | |
102 | x.arg = arg; | |
103 | x.algo = oid->algo; | |
104 | if (oidhexsz & 1) { | |
105 | x.last_byte = oid->hash[klen]; | |
106 | x.last_nibble_at = &klen; | |
107 | } | |
108 | cb_each(&ot->tree, (const uint8_t *)oid, klen, iter, &x); | |
109 | } |