1 // SPDX-License-Identifier: GPL-2.0+
4 * Crossported from the same named file of btrfs-progs.
6 * Minor modification to include headers.
8 #include <linux/kernel.h>
9 #include <linux/rbtree.h>
10 #include <linux/errno.h>
11 #include <linux/bug.h>
13 #include "extent-cache.h"
14 #include "common/rbtree-utils.h"
16 struct cache_extent_search_range
{
22 static int cache_tree_comp_range(struct rb_node
*node
, void *data
)
24 struct cache_extent
*entry
;
25 struct cache_extent_search_range
*range
;
27 range
= (struct cache_extent_search_range
*)data
;
28 entry
= rb_entry(node
, struct cache_extent
, rb_node
);
30 if (entry
->start
+ entry
->size
<= range
->start
)
32 else if (range
->start
+ range
->size
<= entry
->start
)
38 static int cache_tree_comp_nodes(struct rb_node
*node1
, struct rb_node
*node2
)
40 struct cache_extent
*entry
;
41 struct cache_extent_search_range range
;
43 entry
= rb_entry(node2
, struct cache_extent
, rb_node
);
44 range
.start
= entry
->start
;
45 range
.size
= entry
->size
;
47 return cache_tree_comp_range(node1
, (void *)&range
);
50 static int cache_tree_comp_range2(struct rb_node
*node
, void *data
)
52 struct cache_extent
*entry
;
53 struct cache_extent_search_range
*range
;
55 range
= (struct cache_extent_search_range
*)data
;
56 entry
= rb_entry(node
, struct cache_extent
, rb_node
);
58 if (entry
->objectid
< range
->objectid
)
60 else if (entry
->objectid
> range
->objectid
)
62 else if (entry
->start
+ entry
->size
<= range
->start
)
64 else if (range
->start
+ range
->size
<= entry
->start
)
70 static int cache_tree_comp_nodes2(struct rb_node
*node1
, struct rb_node
*node2
)
72 struct cache_extent
*entry
;
73 struct cache_extent_search_range range
;
75 entry
= rb_entry(node2
, struct cache_extent
, rb_node
);
76 range
.objectid
= entry
->objectid
;
77 range
.start
= entry
->start
;
78 range
.size
= entry
->size
;
80 return cache_tree_comp_range2(node1
, (void *)&range
);
83 void cache_tree_init(struct cache_tree
*tree
)
88 static struct cache_extent
*alloc_cache_extent(u64 start
, u64 size
)
90 struct cache_extent
*pe
= malloc(sizeof(*pe
));
101 int add_cache_extent(struct cache_tree
*tree
, u64 start
, u64 size
)
103 struct cache_extent
*pe
= alloc_cache_extent(start
, size
);
109 ret
= insert_cache_extent(tree
, pe
);
116 int insert_cache_extent(struct cache_tree
*tree
, struct cache_extent
*pe
)
118 return rb_insert(&tree
->root
, &pe
->rb_node
, cache_tree_comp_nodes
);
121 int insert_cache_extent2(struct cache_tree
*tree
, struct cache_extent
*pe
)
123 return rb_insert(&tree
->root
, &pe
->rb_node
, cache_tree_comp_nodes2
);
126 struct cache_extent
*lookup_cache_extent(struct cache_tree
*tree
,
129 struct rb_node
*node
;
130 struct cache_extent
*entry
;
131 struct cache_extent_search_range range
;
135 node
= rb_search(&tree
->root
, &range
, cache_tree_comp_range
, NULL
);
139 entry
= rb_entry(node
, struct cache_extent
, rb_node
);
143 struct cache_extent
*lookup_cache_extent2(struct cache_tree
*tree
,
144 u64 objectid
, u64 start
, u64 size
)
146 struct rb_node
*node
;
147 struct cache_extent
*entry
;
148 struct cache_extent_search_range range
;
150 range
.objectid
= objectid
;
153 node
= rb_search(&tree
->root
, &range
, cache_tree_comp_range2
, NULL
);
157 entry
= rb_entry(node
, struct cache_extent
, rb_node
);
161 struct cache_extent
*search_cache_extent(struct cache_tree
*tree
, u64 start
)
163 struct rb_node
*next
;
164 struct rb_node
*node
;
165 struct cache_extent
*entry
;
166 struct cache_extent_search_range range
;
170 node
= rb_search(&tree
->root
, &range
, cache_tree_comp_range
, &next
);
176 entry
= rb_entry(node
, struct cache_extent
, rb_node
);
180 struct cache_extent
*search_cache_extent2(struct cache_tree
*tree
,
181 u64 objectid
, u64 start
)
183 struct rb_node
*next
;
184 struct rb_node
*node
;
185 struct cache_extent
*entry
;
186 struct cache_extent_search_range range
;
188 range
.objectid
= objectid
;
191 node
= rb_search(&tree
->root
, &range
, cache_tree_comp_range2
, &next
);
197 entry
= rb_entry(node
, struct cache_extent
, rb_node
);
201 struct cache_extent
*first_cache_extent(struct cache_tree
*tree
)
203 struct rb_node
*node
= rb_first(&tree
->root
);
207 return rb_entry(node
, struct cache_extent
, rb_node
);
210 struct cache_extent
*last_cache_extent(struct cache_tree
*tree
)
212 struct rb_node
*node
= rb_last(&tree
->root
);
216 return rb_entry(node
, struct cache_extent
, rb_node
);
219 struct cache_extent
*prev_cache_extent(struct cache_extent
*pe
)
221 struct rb_node
*node
= rb_prev(&pe
->rb_node
);
225 return rb_entry(node
, struct cache_extent
, rb_node
);
228 struct cache_extent
*next_cache_extent(struct cache_extent
*pe
)
230 struct rb_node
*node
= rb_next(&pe
->rb_node
);
234 return rb_entry(node
, struct cache_extent
, rb_node
);
237 void remove_cache_extent(struct cache_tree
*tree
, struct cache_extent
*pe
)
239 rb_erase(&pe
->rb_node
, &tree
->root
);
242 void cache_tree_free_extents(struct cache_tree
*tree
,
243 free_cache_extent free_func
)
245 struct cache_extent
*ce
;
247 while ((ce
= first_cache_extent(tree
))) {
248 remove_cache_extent(tree
, ce
);
253 static void free_extent_cache(struct cache_extent
*pe
)
258 void free_extent_cache_tree(struct cache_tree
*tree
)
260 cache_tree_free_extents(tree
, free_extent_cache
);
263 int add_merge_cache_extent(struct cache_tree
*tree
, u64 start
, u64 size
)
265 struct cache_extent
*cache
;
266 struct cache_extent
*next
= NULL
;
267 struct cache_extent
*prev
= NULL
;
272 if (cache_tree_empty(tree
))
275 cache
= search_cache_extent(tree
, start
);
278 * Either the tree is completely empty, or the no range after
280 * Either way, the last cache_extent should be prev.
282 prev
= last_cache_extent(tree
);
283 } else if (start
<= cache
->start
) {
285 prev
= prev_cache_extent(cache
);
288 next
= next_cache_extent(cache
);
292 * Ensure the range to be inserted won't cover with existings
293 * Or we will need extra loop to do merge
295 BUG_ON(next
&& start
+ size
> next
->start
);
296 BUG_ON(prev
&& prev
->start
+ prev
->size
> start
);
298 if (next
&& start
+ size
== next
->start
) {
300 next
->size
= next
->start
+ next
->size
- start
;
303 if (prev
&& prev
->start
+ prev
->size
== start
) {
306 next
->size
= next
->start
+ next
->size
- prev
->start
;
307 next
->start
= prev
->start
;
308 remove_cache_extent(tree
, prev
);
311 prev
->size
= start
+ size
- prev
->start
;
315 if (!prev_merged
&& !next_merged
)
316 ret
= add_cache_extent(tree
, start
, size
);