]>
Commit | Line | Data |
---|---|---|
54a611b6 LH |
1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | |
3 | * Maple Tree implementation | |
4 | * Copyright (c) 2018-2022 Oracle Corporation | |
5 | * Authors: Liam R. Howlett <Liam.Howlett@oracle.com> | |
6 | * Matthew Wilcox <willy@infradead.org> | |
fd32e4e9 PZ |
7 | * Copyright (c) 2023 ByteDance |
8 | * Author: Peng Zhang <zhangpeng.00@bytedance.com> | |
54a611b6 LH |
9 | */ |
10 | ||
11 | /* | |
12 | * DOC: Interesting implementation details of the Maple Tree | |
13 | * | |
14 | * Each node type has a number of slots for entries and a number of slots for | |
15 | * pivots. In the case of dense nodes, the pivots are implied by the position | |
16 | * and are simply the slot index + the minimum of the node. | |
17 | * | |
18 | * In regular B-Tree terms, pivots are called keys. The term pivot is used to | |
d5f6057c RD |
19 | * indicate that the tree is specifying ranges. Pivots may appear in the |
20 | * subtree with an entry attached to the value whereas keys are unique to a | |
54a611b6 LH |
21 | * specific position of a B-tree. Pivot values are inclusive of the slot with |
22 | * the same index. | |
23 | * | |
24 | * | |
25 | * The following illustrates the layout of a range64 nodes slots and pivots. | |
26 | * | |
27 | * | |
28 | * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 | | |
29 | * ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ | |
30 | * │ │ │ │ │ │ │ │ └─ Implied maximum | |
31 | * │ │ │ │ │ │ │ └─ Pivot 14 | |
32 | * │ │ │ │ │ │ └─ Pivot 13 | |
33 | * │ │ │ │ │ └─ Pivot 12 | |
34 | * │ │ │ │ └─ Pivot 11 | |
35 | * │ │ │ └─ Pivot 2 | |
36 | * │ │ └─ Pivot 1 | |
37 | * │ └─ Pivot 0 | |
38 | * └─ Implied minimum | |
39 | * | |
40 | * Slot contents: | |
41 | * Internal (non-leaf) nodes contain pointers to other nodes. | |
42 | * Leaf nodes contain entries. | |
43 | * | |
44 | * The location of interest is often referred to as an offset. All offsets have | |
45 | * a slot, but the last offset has an implied pivot from the node above (or | |
46 | * UINT_MAX for the root node. | |
47 | * | |
48 | * Ranges complicate certain write activities. When modifying any of | |
49 | * the B-tree variants, it is known that one entry will either be added or | |
50 | * deleted. When modifying the Maple Tree, one store operation may overwrite | |
51 | * the entire data set, or one half of the tree, or the middle half of the tree. | |
52 | * | |
53 | */ | |
54 | ||
55 | ||
56 | #include <linux/maple_tree.h> | |
57 | #include <linux/xarray.h> | |
58 | #include <linux/types.h> | |
59 | #include <linux/export.h> | |
60 | #include <linux/slab.h> | |
61 | #include <linux/limits.h> | |
62 | #include <asm/barrier.h> | |
63 | ||
64 | #define CREATE_TRACE_POINTS | |
65 | #include <trace/events/maple_tree.h> | |
66 | ||
b314e215 LS |
67 | /* |
68 | * Kernel pointer hashing renders much of the maple tree dump useless as tagged | |
69 | * pointers get hashed to arbitrary values. | |
70 | * | |
71 | * If CONFIG_DEBUG_VM_MAPLE_TREE is set we are in a debug mode where it is | |
72 | * permissible to bypass this. Otherwise remain cautious and retain the hashing. | |
73 | * | |
74 | * Userland doesn't know about %px so also use %p there. | |
75 | */ | |
76 | #if defined(__KERNEL__) && defined(CONFIG_DEBUG_VM_MAPLE_TREE) | |
77 | #define PTR_FMT "%px" | |
78 | #else | |
79 | #define PTR_FMT "%p" | |
80 | #endif | |
81 | ||
54a611b6 LH |
82 | #define MA_ROOT_PARENT 1 |
83 | ||
84 | /* | |
85 | * Maple state flags | |
86 | * * MA_STATE_BULK - Bulk insert mode | |
87 | * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert | |
88 | * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation | |
89 | */ | |
90 | #define MA_STATE_BULK 1 | |
91 | #define MA_STATE_REBALANCE 2 | |
92 | #define MA_STATE_PREALLOC 4 | |
93 | ||
94 | #define ma_parent_ptr(x) ((struct maple_pnode *)(x)) | |
4ffc2ee2 | 95 | #define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT) |
54a611b6 LH |
96 | #define ma_mnode_ptr(x) ((struct maple_node *)(x)) |
97 | #define ma_enode_ptr(x) ((struct maple_enode *)(x)) | |
98 | static struct kmem_cache *maple_node_cache; | |
99 | ||
100 | #ifdef CONFIG_DEBUG_MAPLE_TREE | |
101 | static const unsigned long mt_max[] = { | |
102 | [maple_dense] = MAPLE_NODE_SLOTS, | |
103 | [maple_leaf_64] = ULONG_MAX, | |
104 | [maple_range_64] = ULONG_MAX, | |
105 | [maple_arange_64] = ULONG_MAX, | |
106 | }; | |
107 | #define mt_node_max(x) mt_max[mte_node_type(x)] | |
108 | #endif | |
109 | ||
110 | static const unsigned char mt_slots[] = { | |
111 | [maple_dense] = MAPLE_NODE_SLOTS, | |
112 | [maple_leaf_64] = MAPLE_RANGE64_SLOTS, | |
113 | [maple_range_64] = MAPLE_RANGE64_SLOTS, | |
114 | [maple_arange_64] = MAPLE_ARANGE64_SLOTS, | |
115 | }; | |
116 | #define mt_slot_count(x) mt_slots[mte_node_type(x)] | |
117 | ||
118 | static const unsigned char mt_pivots[] = { | |
119 | [maple_dense] = 0, | |
120 | [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1, | |
121 | [maple_range_64] = MAPLE_RANGE64_SLOTS - 1, | |
122 | [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1, | |
123 | }; | |
124 | #define mt_pivot_count(x) mt_pivots[mte_node_type(x)] | |
125 | ||
126 | static const unsigned char mt_min_slots[] = { | |
127 | [maple_dense] = MAPLE_NODE_SLOTS / 2, | |
128 | [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, | |
129 | [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, | |
130 | [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1, | |
131 | }; | |
132 | #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)] | |
133 | ||
134 | #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2) | |
135 | #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1) | |
136 | ||
137 | struct maple_big_node { | |
54a611b6 LH |
138 | unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1]; |
139 | union { | |
140 | struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS]; | |
141 | struct { | |
142 | unsigned long padding[MAPLE_BIG_NODE_GAPS]; | |
143 | unsigned long gap[MAPLE_BIG_NODE_GAPS]; | |
144 | }; | |
145 | }; | |
146 | unsigned char b_end; | |
147 | enum maple_type type; | |
148 | }; | |
149 | ||
150 | /* | |
151 | * The maple_subtree_state is used to build a tree to replace a segment of an | |
152 | * existing tree in a more atomic way. Any walkers of the older tree will hit a | |
153 | * dead node and restart on updates. | |
154 | */ | |
155 | struct maple_subtree_state { | |
156 | struct ma_state *orig_l; /* Original left side of subtree */ | |
157 | struct ma_state *orig_r; /* Original right side of subtree */ | |
158 | struct ma_state *l; /* New left side of subtree */ | |
159 | struct ma_state *m; /* New middle of subtree (rare) */ | |
160 | struct ma_state *r; /* New right side of subtree */ | |
161 | struct ma_topiary *free; /* nodes to be freed */ | |
162 | struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */ | |
163 | struct maple_big_node *bn; | |
164 | }; | |
165 | ||
44081c77 AB |
166 | #ifdef CONFIG_KASAN_STACK |
167 | /* Prevent mas_wr_bnode() from exceeding the stack frame limit */ | |
168 | #define noinline_for_kasan noinline_for_stack | |
169 | #else | |
170 | #define noinline_for_kasan inline | |
171 | #endif | |
172 | ||
54a611b6 LH |
173 | /* Functions */ |
174 | static inline struct maple_node *mt_alloc_one(gfp_t gfp) | |
175 | { | |
541e06b7 | 176 | return kmem_cache_alloc(maple_node_cache, gfp); |
54a611b6 LH |
177 | } |
178 | ||
179 | static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes) | |
180 | { | |
541e06b7 | 181 | return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes); |
54a611b6 LH |
182 | } |
183 | ||
4f2267b5 PZ |
184 | static inline void mt_free_one(struct maple_node *node) |
185 | { | |
186 | kmem_cache_free(maple_node_cache, node); | |
187 | } | |
188 | ||
54a611b6 LH |
189 | static inline void mt_free_bulk(size_t size, void __rcu **nodes) |
190 | { | |
191 | kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes); | |
192 | } | |
193 | ||
194 | static void mt_free_rcu(struct rcu_head *head) | |
195 | { | |
196 | struct maple_node *node = container_of(head, struct maple_node, rcu); | |
197 | ||
198 | kmem_cache_free(maple_node_cache, node); | |
199 | } | |
200 | ||
201 | /* | |
202 | * ma_free_rcu() - Use rcu callback to free a maple node | |
203 | * @node: The node to free | |
204 | * | |
205 | * The maple tree uses the parent pointer to indicate this node is no longer in | |
206 | * use and will be freed. | |
207 | */ | |
208 | static void ma_free_rcu(struct maple_node *node) | |
209 | { | |
c13af03d | 210 | WARN_ON(node->parent != ma_parent_ptr(node)); |
54a611b6 LH |
211 | call_rcu(&node->rcu, mt_free_rcu); |
212 | } | |
213 | ||
f9d3a963 | 214 | static void mt_set_height(struct maple_tree *mt, unsigned char height) |
54a611b6 | 215 | { |
f9d3a963 | 216 | unsigned int new_flags = mt->ma_flags; |
54a611b6 LH |
217 | |
218 | new_flags &= ~MT_FLAGS_HEIGHT_MASK; | |
f9d3a963 SK |
219 | MT_BUG_ON(mt, height > MAPLE_HEIGHT_MAX); |
220 | new_flags |= height << MT_FLAGS_HEIGHT_OFFSET; | |
221 | mt->ma_flags = new_flags; | |
54a611b6 LH |
222 | } |
223 | ||
224 | static unsigned int mas_mt_height(struct ma_state *mas) | |
225 | { | |
226 | return mt_height(mas->tree); | |
227 | } | |
228 | ||
4f2267b5 PZ |
229 | static inline unsigned int mt_attr(struct maple_tree *mt) |
230 | { | |
231 | return mt->ma_flags & ~MT_FLAGS_HEIGHT_MASK; | |
232 | } | |
233 | ||
271f61a8 LH |
234 | static __always_inline enum maple_type mte_node_type( |
235 | const struct maple_enode *entry) | |
54a611b6 LH |
236 | { |
237 | return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) & | |
238 | MAPLE_NODE_TYPE_MASK; | |
239 | } | |
240 | ||
271f61a8 | 241 | static __always_inline bool ma_is_dense(const enum maple_type type) |
54a611b6 LH |
242 | { |
243 | return type < maple_leaf_64; | |
244 | } | |
245 | ||
271f61a8 | 246 | static __always_inline bool ma_is_leaf(const enum maple_type type) |
54a611b6 LH |
247 | { |
248 | return type < maple_range_64; | |
249 | } | |
250 | ||
271f61a8 | 251 | static __always_inline bool mte_is_leaf(const struct maple_enode *entry) |
54a611b6 LH |
252 | { |
253 | return ma_is_leaf(mte_node_type(entry)); | |
254 | } | |
255 | ||
256 | /* | |
257 | * We also reserve values with the bottom two bits set to '10' which are | |
258 | * below 4096 | |
259 | */ | |
271f61a8 | 260 | static __always_inline bool mt_is_reserved(const void *entry) |
54a611b6 LH |
261 | { |
262 | return ((unsigned long)entry < MAPLE_RESERVED_RANGE) && | |
263 | xa_is_internal(entry); | |
264 | } | |
265 | ||
067311d3 | 266 | static __always_inline void mas_set_err(struct ma_state *mas, long err) |
54a611b6 LH |
267 | { |
268 | mas->node = MA_ERROR(err); | |
067311d3 | 269 | mas->status = ma_error; |
54a611b6 LH |
270 | } |
271 | ||
067311d3 | 272 | static __always_inline bool mas_is_ptr(const struct ma_state *mas) |
54a611b6 | 273 | { |
067311d3 | 274 | return mas->status == ma_root; |
54a611b6 LH |
275 | } |
276 | ||
067311d3 | 277 | static __always_inline bool mas_is_start(const struct ma_state *mas) |
54a611b6 | 278 | { |
067311d3 | 279 | return mas->status == ma_start; |
54a611b6 LH |
280 | } |
281 | ||
067311d3 | 282 | static __always_inline bool mas_is_none(const struct ma_state *mas) |
54a611b6 | 283 | { |
067311d3 | 284 | return mas->status == ma_none; |
54a611b6 LH |
285 | } |
286 | ||
067311d3 | 287 | static __always_inline bool mas_is_paused(const struct ma_state *mas) |
a8091f03 | 288 | { |
067311d3 | 289 | return mas->status == ma_pause; |
a8091f03 LH |
290 | } |
291 | ||
067311d3 | 292 | static __always_inline bool mas_is_overflow(struct ma_state *mas) |
a8091f03 | 293 | { |
067311d3 LH |
294 | return mas->status == ma_overflow; |
295 | } | |
a8091f03 | 296 | |
067311d3 LH |
297 | static inline bool mas_is_underflow(struct ma_state *mas) |
298 | { | |
299 | return mas->status == ma_underflow; | |
a8091f03 LH |
300 | } |
301 | ||
271f61a8 LH |
302 | static __always_inline struct maple_node *mte_to_node( |
303 | const struct maple_enode *entry) | |
54a611b6 LH |
304 | { |
305 | return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK); | |
306 | } | |
307 | ||
308 | /* | |
309 | * mte_to_mat() - Convert a maple encoded node to a maple topiary node. | |
310 | * @entry: The maple encoded node | |
311 | * | |
312 | * Return: a maple topiary pointer | |
313 | */ | |
314 | static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry) | |
315 | { | |
316 | return (struct maple_topiary *) | |
317 | ((unsigned long)entry & ~MAPLE_NODE_MASK); | |
318 | } | |
319 | ||
320 | /* | |
321 | * mas_mn() - Get the maple state node. | |
322 | * @mas: The maple state | |
323 | * | |
324 | * Return: the maple node (not encoded - bare pointer). | |
325 | */ | |
326 | static inline struct maple_node *mas_mn(const struct ma_state *mas) | |
327 | { | |
328 | return mte_to_node(mas->node); | |
329 | } | |
330 | ||
331 | /* | |
332 | * mte_set_node_dead() - Set a maple encoded node as dead. | |
333 | * @mn: The maple encoded node. | |
334 | */ | |
335 | static inline void mte_set_node_dead(struct maple_enode *mn) | |
336 | { | |
337 | mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn)); | |
338 | smp_wmb(); /* Needed for RCU */ | |
339 | } | |
340 | ||
341 | /* Bit 1 indicates the root is a node */ | |
342 | #define MAPLE_ROOT_NODE 0x02 | |
343 | /* maple_type stored bit 3-6 */ | |
344 | #define MAPLE_ENODE_TYPE_SHIFT 0x03 | |
345 | /* Bit 2 means a NULL somewhere below */ | |
346 | #define MAPLE_ENODE_NULL 0x04 | |
347 | ||
348 | static inline struct maple_enode *mt_mk_node(const struct maple_node *node, | |
349 | enum maple_type type) | |
350 | { | |
351 | return (void *)((unsigned long)node | | |
352 | (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL); | |
353 | } | |
354 | ||
355 | static inline void *mte_mk_root(const struct maple_enode *node) | |
356 | { | |
357 | return (void *)((unsigned long)node | MAPLE_ROOT_NODE); | |
358 | } | |
359 | ||
360 | static inline void *mte_safe_root(const struct maple_enode *node) | |
361 | { | |
362 | return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE); | |
363 | } | |
364 | ||
1930c6ad | 365 | static inline void __maybe_unused *mte_set_full(const struct maple_enode *node) |
54a611b6 | 366 | { |
6e7ba8b5 | 367 | return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL); |
54a611b6 LH |
368 | } |
369 | ||
1930c6ad | 370 | static inline void __maybe_unused *mte_clear_full(const struct maple_enode *node) |
54a611b6 | 371 | { |
6e7ba8b5 LH |
372 | return (void *)((unsigned long)node | MAPLE_ENODE_NULL); |
373 | } | |
374 | ||
1930c6ad | 375 | static inline bool __maybe_unused mte_has_null(const struct maple_enode *node) |
6e7ba8b5 LH |
376 | { |
377 | return (unsigned long)node & MAPLE_ENODE_NULL; | |
54a611b6 LH |
378 | } |
379 | ||
271f61a8 | 380 | static __always_inline bool ma_is_root(struct maple_node *node) |
54a611b6 LH |
381 | { |
382 | return ((unsigned long)node->parent & MA_ROOT_PARENT); | |
383 | } | |
384 | ||
271f61a8 | 385 | static __always_inline bool mte_is_root(const struct maple_enode *node) |
54a611b6 LH |
386 | { |
387 | return ma_is_root(mte_to_node(node)); | |
388 | } | |
389 | ||
390 | static inline bool mas_is_root_limits(const struct ma_state *mas) | |
391 | { | |
392 | return !mas->min && mas->max == ULONG_MAX; | |
393 | } | |
394 | ||
271f61a8 | 395 | static __always_inline bool mt_is_alloc(struct maple_tree *mt) |
54a611b6 LH |
396 | { |
397 | return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE); | |
398 | } | |
399 | ||
400 | /* | |
401 | * The Parent Pointer | |
402 | * Excluding root, the parent pointer is 256B aligned like all other tree nodes. | |
403 | * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16 | |
404 | * bit values need an extra bit to store the offset. This extra bit comes from | |
405 | * a reuse of the last bit in the node type. This is possible by using bit 1 to | |
406 | * indicate if bit 2 is part of the type or the slot. | |
407 | * | |
408 | * Note types: | |
409 | * 0x??1 = Root | |
410 | * 0x?00 = 16 bit nodes | |
411 | * 0x010 = 32 bit nodes | |
412 | * 0x110 = 64 bit nodes | |
413 | * | |
414 | * Slot size and alignment | |
415 | * 0b??1 : Root | |
416 | * 0b?00 : 16 bit values, type in 0-1, slot in 2-7 | |
417 | * 0b010 : 32 bit values, type in 0-2, slot in 3-7 | |
418 | * 0b110 : 64 bit values, type in 0-2, slot in 3-7 | |
419 | */ | |
420 | ||
421 | #define MAPLE_PARENT_ROOT 0x01 | |
422 | ||
423 | #define MAPLE_PARENT_SLOT_SHIFT 0x03 | |
424 | #define MAPLE_PARENT_SLOT_MASK 0xF8 | |
425 | ||
426 | #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02 | |
427 | #define MAPLE_PARENT_16B_SLOT_MASK 0xFC | |
428 | ||
429 | #define MAPLE_PARENT_RANGE64 0x06 | |
430 | #define MAPLE_PARENT_RANGE32 0x04 | |
431 | #define MAPLE_PARENT_NOT_RANGE16 0x02 | |
432 | ||
433 | /* | |
434 | * mte_parent_shift() - Get the parent shift for the slot storage. | |
435 | * @parent: The parent pointer cast as an unsigned long | |
436 | * Return: The shift into that pointer to the star to of the slot | |
437 | */ | |
438 | static inline unsigned long mte_parent_shift(unsigned long parent) | |
439 | { | |
440 | /* Note bit 1 == 0 means 16B */ | |
441 | if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) | |
442 | return MAPLE_PARENT_SLOT_SHIFT; | |
443 | ||
444 | return MAPLE_PARENT_16B_SLOT_SHIFT; | |
445 | } | |
446 | ||
447 | /* | |
448 | * mte_parent_slot_mask() - Get the slot mask for the parent. | |
449 | * @parent: The parent pointer cast as an unsigned long. | |
450 | * Return: The slot mask for that parent. | |
451 | */ | |
452 | static inline unsigned long mte_parent_slot_mask(unsigned long parent) | |
453 | { | |
454 | /* Note bit 1 == 0 means 16B */ | |
455 | if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) | |
456 | return MAPLE_PARENT_SLOT_MASK; | |
457 | ||
458 | return MAPLE_PARENT_16B_SLOT_MASK; | |
459 | } | |
460 | ||
461 | /* | |
afc754c6 | 462 | * mas_parent_type() - Return the maple_type of the parent from the stored |
54a611b6 LH |
463 | * parent type. |
464 | * @mas: The maple state | |
afc754c6 | 465 | * @enode: The maple_enode to extract the parent's enum |
54a611b6 LH |
466 | * Return: The node->parent maple_type |
467 | */ | |
468 | static inline | |
afc754c6 | 469 | enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode) |
54a611b6 LH |
470 | { |
471 | unsigned long p_type; | |
472 | ||
afc754c6 LH |
473 | p_type = (unsigned long)mte_to_node(enode)->parent; |
474 | if (WARN_ON(p_type & MAPLE_PARENT_ROOT)) | |
475 | return 0; | |
54a611b6 LH |
476 | |
477 | p_type &= MAPLE_NODE_MASK; | |
afc754c6 | 478 | p_type &= ~mte_parent_slot_mask(p_type); |
54a611b6 LH |
479 | switch (p_type) { |
480 | case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */ | |
afc754c6 | 481 | if (mt_is_alloc(mas->tree)) |
54a611b6 LH |
482 | return maple_arange_64; |
483 | return maple_range_64; | |
484 | } | |
485 | ||
486 | return 0; | |
487 | } | |
488 | ||
54a611b6 | 489 | /* |
bf96715e | 490 | * mas_set_parent() - Set the parent node and encode the slot |
96ae4c90 | 491 | * @mas: The maple state |
54a611b6 LH |
492 | * @enode: The encoded maple node. |
493 | * @parent: The encoded maple node that is the parent of @enode. | |
494 | * @slot: The slot that @enode resides in @parent. | |
495 | * | |
496 | * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the | |
497 | * parent type. | |
498 | */ | |
499 | static inline | |
bf96715e LH |
500 | void mas_set_parent(struct ma_state *mas, struct maple_enode *enode, |
501 | const struct maple_enode *parent, unsigned char slot) | |
54a611b6 | 502 | { |
831978e3 | 503 | unsigned long val = (unsigned long)parent; |
54a611b6 LH |
504 | unsigned long shift; |
505 | unsigned long type; | |
506 | enum maple_type p_type = mte_node_type(parent); | |
507 | ||
bf96715e LH |
508 | MAS_BUG_ON(mas, p_type == maple_dense); |
509 | MAS_BUG_ON(mas, p_type == maple_leaf_64); | |
54a611b6 LH |
510 | |
511 | switch (p_type) { | |
512 | case maple_range_64: | |
513 | case maple_arange_64: | |
514 | shift = MAPLE_PARENT_SLOT_SHIFT; | |
515 | type = MAPLE_PARENT_RANGE64; | |
516 | break; | |
517 | default: | |
518 | case maple_dense: | |
519 | case maple_leaf_64: | |
520 | shift = type = 0; | |
521 | break; | |
522 | } | |
523 | ||
524 | val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */ | |
525 | val |= (slot << shift) | type; | |
526 | mte_to_node(enode)->parent = ma_parent_ptr(val); | |
527 | } | |
528 | ||
529 | /* | |
530 | * mte_parent_slot() - get the parent slot of @enode. | |
531 | * @enode: The encoded maple node. | |
532 | * | |
533 | * Return: The slot in the parent node where @enode resides. | |
534 | */ | |
271f61a8 LH |
535 | static __always_inline |
536 | unsigned int mte_parent_slot(const struct maple_enode *enode) | |
54a611b6 | 537 | { |
831978e3 | 538 | unsigned long val = (unsigned long)mte_to_node(enode)->parent; |
54a611b6 | 539 | |
271f61a8 | 540 | if (unlikely(val & MA_ROOT_PARENT)) |
54a611b6 LH |
541 | return 0; |
542 | ||
543 | /* | |
544 | * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost | |
545 | * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT | |
546 | */ | |
547 | return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val); | |
548 | } | |
549 | ||
550 | /* | |
551 | * mte_parent() - Get the parent of @node. | |
96ae4c90 | 552 | * @enode: The encoded maple node. |
54a611b6 LH |
553 | * |
554 | * Return: The parent maple node. | |
555 | */ | |
271f61a8 LH |
556 | static __always_inline |
557 | struct maple_node *mte_parent(const struct maple_enode *enode) | |
54a611b6 LH |
558 | { |
559 | return (void *)((unsigned long) | |
560 | (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK); | |
561 | } | |
562 | ||
563 | /* | |
564 | * ma_dead_node() - check if the @enode is dead. | |
565 | * @enode: The encoded maple node | |
566 | * | |
567 | * Return: true if dead, false otherwise. | |
568 | */ | |
271f61a8 | 569 | static __always_inline bool ma_dead_node(const struct maple_node *node) |
54a611b6 | 570 | { |
0a2b18d9 | 571 | struct maple_node *parent; |
54a611b6 | 572 | |
0a2b18d9 LH |
573 | /* Do not reorder reads from the node prior to the parent check */ |
574 | smp_rmb(); | |
575 | parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK); | |
54a611b6 LH |
576 | return (parent == node); |
577 | } | |
39d0bd86 | 578 | |
54a611b6 LH |
579 | /* |
580 | * mte_dead_node() - check if the @enode is dead. | |
581 | * @enode: The encoded maple node | |
582 | * | |
583 | * Return: true if dead, false otherwise. | |
584 | */ | |
271f61a8 | 585 | static __always_inline bool mte_dead_node(const struct maple_enode *enode) |
54a611b6 | 586 | { |
6fbea852 | 587 | struct maple_node *node; |
54a611b6 LH |
588 | |
589 | node = mte_to_node(enode); | |
6fbea852 | 590 | return ma_dead_node(node); |
54a611b6 LH |
591 | } |
592 | ||
593 | /* | |
594 | * mas_allocated() - Get the number of nodes allocated in a maple state. | |
595 | * @mas: The maple state | |
596 | * | |
597 | * The ma_state alloc member is overloaded to hold a pointer to the first | |
598 | * allocated node or to the number of requested nodes to allocate. If bit 0 is | |
599 | * set, then the alloc contains the number of requested nodes. If there is an | |
600 | * allocated node, then the total allocated nodes is in that node. | |
601 | * | |
602 | * Return: The total number of nodes allocated | |
603 | */ | |
604 | static inline unsigned long mas_allocated(const struct ma_state *mas) | |
605 | { | |
606 | if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) | |
607 | return 0; | |
608 | ||
609 | return mas->alloc->total; | |
610 | } | |
611 | ||
612 | /* | |
613 | * mas_set_alloc_req() - Set the requested number of allocations. | |
614 | * @mas: the maple state | |
615 | * @count: the number of allocations. | |
616 | * | |
617 | * The requested number of allocations is either in the first allocated node, | |
618 | * located in @mas->alloc->request_count, or directly in @mas->alloc if there is | |
619 | * no allocated node. Set the request either in the node or do the necessary | |
620 | * encoding to store in @mas->alloc directly. | |
621 | */ | |
622 | static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count) | |
623 | { | |
624 | if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) { | |
625 | if (!count) | |
626 | mas->alloc = NULL; | |
627 | else | |
628 | mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U); | |
629 | return; | |
630 | } | |
631 | ||
632 | mas->alloc->request_count = count; | |
633 | } | |
634 | ||
635 | /* | |
636 | * mas_alloc_req() - get the requested number of allocations. | |
637 | * @mas: The maple state | |
638 | * | |
639 | * The alloc count is either stored directly in @mas, or in | |
640 | * @mas->alloc->request_count if there is at least one node allocated. Decode | |
641 | * the request count if it's stored directly in @mas->alloc. | |
642 | * | |
643 | * Return: The allocation request count. | |
644 | */ | |
645 | static inline unsigned int mas_alloc_req(const struct ma_state *mas) | |
646 | { | |
647 | if ((unsigned long)mas->alloc & 0x1) | |
648 | return (unsigned long)(mas->alloc) >> 1; | |
649 | else if (mas->alloc) | |
650 | return mas->alloc->request_count; | |
651 | return 0; | |
652 | } | |
653 | ||
654 | /* | |
655 | * ma_pivots() - Get a pointer to the maple node pivots. | |
96ae4c90 WY |
656 | * @node: the maple node |
657 | * @type: the node type | |
54a611b6 | 658 | * |
39d0bd86 LH |
659 | * In the event of a dead node, this array may be %NULL |
660 | * | |
54a611b6 LH |
661 | * Return: A pointer to the maple node pivots |
662 | */ | |
663 | static inline unsigned long *ma_pivots(struct maple_node *node, | |
664 | enum maple_type type) | |
665 | { | |
666 | switch (type) { | |
667 | case maple_arange_64: | |
668 | return node->ma64.pivot; | |
669 | case maple_range_64: | |
670 | case maple_leaf_64: | |
671 | return node->mr64.pivot; | |
672 | case maple_dense: | |
673 | return NULL; | |
674 | } | |
675 | return NULL; | |
676 | } | |
677 | ||
678 | /* | |
679 | * ma_gaps() - Get a pointer to the maple node gaps. | |
96ae4c90 WY |
680 | * @node: the maple node |
681 | * @type: the node type | |
54a611b6 LH |
682 | * |
683 | * Return: A pointer to the maple node gaps | |
684 | */ | |
685 | static inline unsigned long *ma_gaps(struct maple_node *node, | |
686 | enum maple_type type) | |
687 | { | |
688 | switch (type) { | |
689 | case maple_arange_64: | |
690 | return node->ma64.gap; | |
691 | case maple_range_64: | |
692 | case maple_leaf_64: | |
693 | case maple_dense: | |
694 | return NULL; | |
695 | } | |
696 | return NULL; | |
697 | } | |
698 | ||
54a611b6 LH |
699 | /* |
700 | * mas_safe_pivot() - get the pivot at @piv or mas->max. | |
701 | * @mas: The maple state | |
702 | * @pivots: The pointer to the maple node pivots | |
703 | * @piv: The pivot to fetch | |
704 | * @type: The maple node type | |
705 | * | |
706 | * Return: The pivot at @piv within the limit of the @pivots array, @mas->max | |
707 | * otherwise. | |
708 | */ | |
271f61a8 | 709 | static __always_inline unsigned long |
54a611b6 LH |
710 | mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots, |
711 | unsigned char piv, enum maple_type type) | |
712 | { | |
713 | if (piv >= mt_pivots[type]) | |
714 | return mas->max; | |
715 | ||
716 | return pivots[piv]; | |
717 | } | |
718 | ||
719 | /* | |
720 | * mas_safe_min() - Return the minimum for a given offset. | |
721 | * @mas: The maple state | |
722 | * @pivots: The pointer to the maple node pivots | |
723 | * @offset: The offset into the pivot array | |
724 | * | |
725 | * Return: The minimum range value that is contained in @offset. | |
726 | */ | |
727 | static inline unsigned long | |
728 | mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset) | |
729 | { | |
730 | if (likely(offset)) | |
731 | return pivots[offset - 1] + 1; | |
732 | ||
733 | return mas->min; | |
734 | } | |
735 | ||
54a611b6 LH |
736 | /* |
737 | * mte_set_pivot() - Set a pivot to a value in an encoded maple node. | |
738 | * @mn: The encoded maple node | |
739 | * @piv: The pivot offset | |
740 | * @val: The value of the pivot | |
741 | */ | |
742 | static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv, | |
743 | unsigned long val) | |
744 | { | |
745 | struct maple_node *node = mte_to_node(mn); | |
746 | enum maple_type type = mte_node_type(mn); | |
747 | ||
748 | BUG_ON(piv >= mt_pivots[type]); | |
749 | switch (type) { | |
54a611b6 LH |
750 | case maple_range_64: |
751 | case maple_leaf_64: | |
752 | node->mr64.pivot[piv] = val; | |
753 | break; | |
754 | case maple_arange_64: | |
755 | node->ma64.pivot[piv] = val; | |
756 | break; | |
757 | case maple_dense: | |
758 | break; | |
759 | } | |
760 | ||
761 | } | |
762 | ||
763 | /* | |
764 | * ma_slots() - Get a pointer to the maple node slots. | |
765 | * @mn: The maple node | |
766 | * @mt: The maple node type | |
767 | * | |
768 | * Return: A pointer to the maple node slots | |
769 | */ | |
770 | static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt) | |
771 | { | |
772 | switch (mt) { | |
54a611b6 LH |
773 | case maple_arange_64: |
774 | return mn->ma64.slot; | |
775 | case maple_range_64: | |
776 | case maple_leaf_64: | |
777 | return mn->mr64.slot; | |
778 | case maple_dense: | |
779 | return mn->slot; | |
780 | } | |
37a8ab24 LH |
781 | |
782 | return NULL; | |
54a611b6 LH |
783 | } |
784 | ||
19a462f0 LH |
785 | static inline bool mt_write_locked(const struct maple_tree *mt) |
786 | { | |
787 | return mt_external_lock(mt) ? mt_write_lock_is_held(mt) : | |
788 | lockdep_is_held(&mt->ma_lock); | |
789 | } | |
790 | ||
271f61a8 | 791 | static __always_inline bool mt_locked(const struct maple_tree *mt) |
54a611b6 LH |
792 | { |
793 | return mt_external_lock(mt) ? mt_lock_is_held(mt) : | |
794 | lockdep_is_held(&mt->ma_lock); | |
795 | } | |
796 | ||
271f61a8 | 797 | static __always_inline void *mt_slot(const struct maple_tree *mt, |
54a611b6 LH |
798 | void __rcu **slots, unsigned char offset) |
799 | { | |
800 | return rcu_dereference_check(slots[offset], mt_locked(mt)); | |
801 | } | |
802 | ||
271f61a8 LH |
803 | static __always_inline void *mt_slot_locked(struct maple_tree *mt, |
804 | void __rcu **slots, unsigned char offset) | |
790e1fa8 | 805 | { |
19a462f0 | 806 | return rcu_dereference_protected(slots[offset], mt_write_locked(mt)); |
790e1fa8 | 807 | } |
54a611b6 LH |
808 | /* |
809 | * mas_slot_locked() - Get the slot value when holding the maple tree lock. | |
810 | * @mas: The maple state | |
811 | * @slots: The pointer to the slots | |
812 | * @offset: The offset into the slots array to fetch | |
813 | * | |
814 | * Return: The entry stored in @slots at the @offset. | |
815 | */ | |
271f61a8 LH |
816 | static __always_inline void *mas_slot_locked(struct ma_state *mas, |
817 | void __rcu **slots, unsigned char offset) | |
54a611b6 | 818 | { |
790e1fa8 | 819 | return mt_slot_locked(mas->tree, slots, offset); |
54a611b6 LH |
820 | } |
821 | ||
822 | /* | |
823 | * mas_slot() - Get the slot value when not holding the maple tree lock. | |
824 | * @mas: The maple state | |
825 | * @slots: The pointer to the slots | |
826 | * @offset: The offset into the slots array to fetch | |
827 | * | |
828 | * Return: The entry stored in @slots at the @offset | |
829 | */ | |
271f61a8 LH |
830 | static __always_inline void *mas_slot(struct ma_state *mas, void __rcu **slots, |
831 | unsigned char offset) | |
54a611b6 LH |
832 | { |
833 | return mt_slot(mas->tree, slots, offset); | |
834 | } | |
835 | ||
836 | /* | |
837 | * mas_root() - Get the maple tree root. | |
838 | * @mas: The maple state. | |
839 | * | |
840 | * Return: The pointer to the root of the tree | |
841 | */ | |
271f61a8 | 842 | static __always_inline void *mas_root(struct ma_state *mas) |
54a611b6 LH |
843 | { |
844 | return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree)); | |
845 | } | |
846 | ||
847 | static inline void *mt_root_locked(struct maple_tree *mt) | |
848 | { | |
19a462f0 | 849 | return rcu_dereference_protected(mt->ma_root, mt_write_locked(mt)); |
54a611b6 LH |
850 | } |
851 | ||
852 | /* | |
853 | * mas_root_locked() - Get the maple tree root when holding the maple tree lock. | |
854 | * @mas: The maple state. | |
855 | * | |
856 | * Return: The pointer to the root of the tree | |
857 | */ | |
858 | static inline void *mas_root_locked(struct ma_state *mas) | |
859 | { | |
860 | return mt_root_locked(mas->tree); | |
861 | } | |
862 | ||
863 | static inline struct maple_metadata *ma_meta(struct maple_node *mn, | |
864 | enum maple_type mt) | |
865 | { | |
866 | switch (mt) { | |
867 | case maple_arange_64: | |
868 | return &mn->ma64.meta; | |
869 | default: | |
870 | return &mn->mr64.meta; | |
871 | } | |
872 | } | |
873 | ||
874 | /* | |
875 | * ma_set_meta() - Set the metadata information of a node. | |
876 | * @mn: The maple node | |
877 | * @mt: The maple node type | |
878 | * @offset: The offset of the highest sub-gap in this node. | |
879 | * @end: The end of the data in this node. | |
880 | */ | |
881 | static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt, | |
882 | unsigned char offset, unsigned char end) | |
883 | { | |
884 | struct maple_metadata *meta = ma_meta(mn, mt); | |
885 | ||
886 | meta->gap = offset; | |
887 | meta->end = end; | |
888 | } | |
889 | ||
2e5b4921 | 890 | /* |
790e1fa8 LH |
891 | * mt_clear_meta() - clear the metadata information of a node, if it exists |
892 | * @mt: The maple tree | |
2e5b4921 | 893 | * @mn: The maple node |
790e1fa8 | 894 | * @type: The maple node type |
2e5b4921 | 895 | */ |
790e1fa8 LH |
896 | static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn, |
897 | enum maple_type type) | |
2e5b4921 LH |
898 | { |
899 | struct maple_metadata *meta; | |
900 | unsigned long *pivots; | |
901 | void __rcu **slots; | |
902 | void *next; | |
903 | ||
790e1fa8 | 904 | switch (type) { |
2e5b4921 LH |
905 | case maple_range_64: |
906 | pivots = mn->mr64.pivot; | |
907 | if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) { | |
908 | slots = mn->mr64.slot; | |
790e1fa8 LH |
909 | next = mt_slot_locked(mt, slots, |
910 | MAPLE_RANGE64_SLOTS - 1); | |
911 | if (unlikely((mte_to_node(next) && | |
912 | mte_node_type(next)))) | |
913 | return; /* no metadata, could be node */ | |
2e5b4921 LH |
914 | } |
915 | fallthrough; | |
916 | case maple_arange_64: | |
790e1fa8 | 917 | meta = ma_meta(mn, type); |
2e5b4921 LH |
918 | break; |
919 | default: | |
920 | return; | |
921 | } | |
922 | ||
923 | meta->gap = 0; | |
924 | meta->end = 0; | |
925 | } | |
926 | ||
54a611b6 LH |
927 | /* |
928 | * ma_meta_end() - Get the data end of a node from the metadata | |
929 | * @mn: The maple node | |
930 | * @mt: The maple node type | |
931 | */ | |
932 | static inline unsigned char ma_meta_end(struct maple_node *mn, | |
933 | enum maple_type mt) | |
934 | { | |
935 | struct maple_metadata *meta = ma_meta(mn, mt); | |
936 | ||
937 | return meta->end; | |
938 | } | |
939 | ||
940 | /* | |
941 | * ma_meta_gap() - Get the largest gap location of a node from the metadata | |
942 | * @mn: The maple node | |
54a611b6 | 943 | */ |
c5e94121 | 944 | static inline unsigned char ma_meta_gap(struct maple_node *mn) |
54a611b6 | 945 | { |
54a611b6 LH |
946 | return mn->ma64.meta.gap; |
947 | } | |
948 | ||
949 | /* | |
950 | * ma_set_meta_gap() - Set the largest gap location in a nodes metadata | |
951 | * @mn: The maple node | |
96ae4c90 | 952 | * @mt: The maple node type |
54a611b6 LH |
953 | * @offset: The location of the largest gap. |
954 | */ | |
955 | static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt, | |
956 | unsigned char offset) | |
957 | { | |
958 | ||
959 | struct maple_metadata *meta = ma_meta(mn, mt); | |
960 | ||
961 | meta->gap = offset; | |
962 | } | |
963 | ||
964 | /* | |
965 | * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes. | |
96ae4c90 WY |
966 | * @mat: the ma_topiary, a linked list of dead nodes. |
967 | * @dead_enode: the node to be marked as dead and added to the tail of the list | |
54a611b6 LH |
968 | * |
969 | * Add the @dead_enode to the linked list in @mat. | |
970 | */ | |
971 | static inline void mat_add(struct ma_topiary *mat, | |
972 | struct maple_enode *dead_enode) | |
973 | { | |
974 | mte_set_node_dead(dead_enode); | |
975 | mte_to_mat(dead_enode)->next = NULL; | |
976 | if (!mat->tail) { | |
977 | mat->tail = mat->head = dead_enode; | |
978 | return; | |
979 | } | |
980 | ||
981 | mte_to_mat(mat->tail)->next = dead_enode; | |
982 | mat->tail = dead_enode; | |
983 | } | |
984 | ||
530f745c LH |
985 | static void mt_free_walk(struct rcu_head *head); |
986 | static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt, | |
987 | bool free); | |
54a611b6 LH |
988 | /* |
989 | * mas_mat_destroy() - Free all nodes and subtrees in a dead list. | |
96ae4c90 WY |
990 | * @mas: the maple state |
991 | * @mat: the ma_topiary linked list of dead nodes to free. | |
54a611b6 LH |
992 | * |
993 | * Destroy walk a dead list. | |
994 | */ | |
995 | static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat) | |
996 | { | |
997 | struct maple_enode *next; | |
530f745c LH |
998 | struct maple_node *node; |
999 | bool in_rcu = mt_in_rcu(mas->tree); | |
54a611b6 LH |
1000 | |
1001 | while (mat->head) { | |
1002 | next = mte_to_mat(mat->head)->next; | |
530f745c LH |
1003 | node = mte_to_node(mat->head); |
1004 | mt_destroy_walk(mat->head, mas->tree, !in_rcu); | |
1005 | if (in_rcu) | |
1006 | call_rcu(&node->rcu, mt_free_walk); | |
54a611b6 LH |
1007 | mat->head = next; |
1008 | } | |
1009 | } | |
1010 | /* | |
1011 | * mas_descend() - Descend into the slot stored in the ma_state. | |
96ae4c90 | 1012 | * @mas: the maple state. |
54a611b6 LH |
1013 | * |
1014 | * Note: Not RCU safe, only use in write side or debug code. | |
1015 | */ | |
1016 | static inline void mas_descend(struct ma_state *mas) | |
1017 | { | |
1018 | enum maple_type type; | |
1019 | unsigned long *pivots; | |
1020 | struct maple_node *node; | |
1021 | void __rcu **slots; | |
1022 | ||
1023 | node = mas_mn(mas); | |
1024 | type = mte_node_type(mas->node); | |
1025 | pivots = ma_pivots(node, type); | |
1026 | slots = ma_slots(node, type); | |
1027 | ||
1028 | if (mas->offset) | |
1029 | mas->min = pivots[mas->offset - 1] + 1; | |
1030 | mas->max = mas_safe_pivot(mas, pivots, mas->offset, type); | |
1031 | mas->node = mas_slot(mas, slots, mas->offset); | |
1032 | } | |
1033 | ||
1034 | /* | |
1035 | * mte_set_gap() - Set a maple node gap. | |
1036 | * @mn: The encoded maple node | |
1037 | * @gap: The offset of the gap to set | |
1038 | * @val: The gap value | |
1039 | */ | |
1040 | static inline void mte_set_gap(const struct maple_enode *mn, | |
1041 | unsigned char gap, unsigned long val) | |
1042 | { | |
1043 | switch (mte_node_type(mn)) { | |
1044 | default: | |
1045 | break; | |
1046 | case maple_arange_64: | |
1047 | mte_to_node(mn)->ma64.gap[gap] = val; | |
1048 | break; | |
1049 | } | |
1050 | } | |
1051 | ||
1052 | /* | |
1053 | * mas_ascend() - Walk up a level of the tree. | |
1054 | * @mas: The maple state | |
1055 | * | |
1056 | * Sets the @mas->max and @mas->min to the correct values when walking up. This | |
1057 | * may cause several levels of walking up to find the correct min and max. | |
1058 | * May find a dead node which will cause a premature return. | |
1059 | * Return: 1 on dead node, 0 otherwise | |
1060 | */ | |
1061 | static int mas_ascend(struct ma_state *mas) | |
1062 | { | |
1063 | struct maple_enode *p_enode; /* parent enode. */ | |
1064 | struct maple_enode *a_enode; /* ancestor enode. */ | |
1065 | struct maple_node *a_node; /* ancestor node. */ | |
1066 | struct maple_node *p_node; /* parent node. */ | |
1067 | unsigned char a_slot; | |
1068 | enum maple_type a_type; | |
1069 | unsigned long min, max; | |
1070 | unsigned long *pivots; | |
54a611b6 LH |
1071 | bool set_max = false, set_min = false; |
1072 | ||
1073 | a_node = mas_mn(mas); | |
1074 | if (ma_is_root(a_node)) { | |
1075 | mas->offset = 0; | |
1076 | return 0; | |
1077 | } | |
1078 | ||
1079 | p_node = mte_parent(mas->node); | |
1080 | if (unlikely(a_node == p_node)) | |
1081 | return 1; | |
633769c9 | 1082 | |
afc754c6 | 1083 | a_type = mas_parent_type(mas, mas->node); |
633769c9 | 1084 | mas->offset = mte_parent_slot(mas->node); |
54a611b6 LH |
1085 | a_enode = mt_mk_node(p_node, a_type); |
1086 | ||
1087 | /* Check to make sure all parent information is still accurate */ | |
1088 | if (p_node != mte_parent(mas->node)) | |
1089 | return 1; | |
1090 | ||
1091 | mas->node = a_enode; | |
54a611b6 LH |
1092 | |
1093 | if (mte_is_root(a_enode)) { | |
1094 | mas->max = ULONG_MAX; | |
1095 | mas->min = 0; | |
1096 | return 0; | |
1097 | } | |
1098 | ||
3f05fcde PZ |
1099 | min = 0; |
1100 | max = ULONG_MAX; | |
1101 | if (!mas->offset) { | |
1102 | min = mas->min; | |
633769c9 | 1103 | set_min = true; |
3f05fcde | 1104 | } |
633769c9 LH |
1105 | |
1106 | if (mas->max == ULONG_MAX) | |
1107 | set_max = true; | |
1108 | ||
54a611b6 LH |
1109 | do { |
1110 | p_enode = a_enode; | |
afc754c6 | 1111 | a_type = mas_parent_type(mas, p_enode); |
54a611b6 LH |
1112 | a_node = mte_parent(p_enode); |
1113 | a_slot = mte_parent_slot(p_enode); | |
54a611b6 | 1114 | a_enode = mt_mk_node(a_node, a_type); |
39d0bd86 LH |
1115 | pivots = ma_pivots(a_node, a_type); |
1116 | ||
1117 | if (unlikely(ma_dead_node(a_node))) | |
1118 | return 1; | |
54a611b6 LH |
1119 | |
1120 | if (!set_min && a_slot) { | |
1121 | set_min = true; | |
1122 | min = pivots[a_slot - 1] + 1; | |
1123 | } | |
1124 | ||
1125 | if (!set_max && a_slot < mt_pivots[a_type]) { | |
1126 | set_max = true; | |
1127 | max = pivots[a_slot]; | |
1128 | } | |
1129 | ||
1130 | if (unlikely(ma_dead_node(a_node))) | |
1131 | return 1; | |
1132 | ||
1133 | if (unlikely(ma_is_root(a_node))) | |
1134 | break; | |
1135 | ||
1136 | } while (!set_min || !set_max); | |
1137 | ||
1138 | mas->max = max; | |
1139 | mas->min = min; | |
1140 | return 0; | |
1141 | } | |
1142 | ||
1143 | /* | |
1144 | * mas_pop_node() - Get a previously allocated maple node from the maple state. | |
1145 | * @mas: The maple state | |
1146 | * | |
1147 | * Return: A pointer to a maple node. | |
1148 | */ | |
1149 | static inline struct maple_node *mas_pop_node(struct ma_state *mas) | |
1150 | { | |
1151 | struct maple_alloc *ret, *node = mas->alloc; | |
1152 | unsigned long total = mas_allocated(mas); | |
541e06b7 | 1153 | unsigned int req = mas_alloc_req(mas); |
54a611b6 LH |
1154 | |
1155 | /* nothing or a request pending. */ | |
541e06b7 | 1156 | if (WARN_ON(!total)) |
54a611b6 LH |
1157 | return NULL; |
1158 | ||
1159 | if (total == 1) { | |
1160 | /* single allocation in this ma_state */ | |
1161 | mas->alloc = NULL; | |
1162 | ret = node; | |
1163 | goto single_node; | |
1164 | } | |
1165 | ||
541e06b7 | 1166 | if (node->node_count == 1) { |
54a611b6 LH |
1167 | /* Single allocation in this node. */ |
1168 | mas->alloc = node->slot[0]; | |
54a611b6 LH |
1169 | mas->alloc->total = node->total - 1; |
1170 | ret = node; | |
1171 | goto new_head; | |
1172 | } | |
54a611b6 | 1173 | node->total--; |
541e06b7 LH |
1174 | ret = node->slot[--node->node_count]; |
1175 | node->slot[node->node_count] = NULL; | |
54a611b6 LH |
1176 | |
1177 | single_node: | |
1178 | new_head: | |
541e06b7 LH |
1179 | if (req) { |
1180 | req++; | |
1181 | mas_set_alloc_req(mas, req); | |
54a611b6 | 1182 | } |
541e06b7 LH |
1183 | |
1184 | memset(ret, 0, sizeof(*ret)); | |
54a611b6 LH |
1185 | return (struct maple_node *)ret; |
1186 | } | |
1187 | ||
1188 | /* | |
1189 | * mas_push_node() - Push a node back on the maple state allocation. | |
1190 | * @mas: The maple state | |
1191 | * @used: The used maple node | |
1192 | * | |
1193 | * Stores the maple node back into @mas->alloc for reuse. Updates allocated and | |
1194 | * requested node count as necessary. | |
1195 | */ | |
1196 | static inline void mas_push_node(struct ma_state *mas, struct maple_node *used) | |
1197 | { | |
1198 | struct maple_alloc *reuse = (struct maple_alloc *)used; | |
1199 | struct maple_alloc *head = mas->alloc; | |
1200 | unsigned long count; | |
1201 | unsigned int requested = mas_alloc_req(mas); | |
1202 | ||
54a611b6 LH |
1203 | count = mas_allocated(mas); |
1204 | ||
541e06b7 LH |
1205 | reuse->request_count = 0; |
1206 | reuse->node_count = 0; | |
908378a3 WY |
1207 | if (count) { |
1208 | if (head->node_count < MAPLE_ALLOC_SLOTS) { | |
1209 | head->slot[head->node_count++] = reuse; | |
1210 | head->total++; | |
1211 | goto done; | |
1212 | } | |
54a611b6 | 1213 | reuse->slot[0] = head; |
541e06b7 | 1214 | reuse->node_count = 1; |
54a611b6 LH |
1215 | } |
1216 | ||
908378a3 | 1217 | reuse->total = count + 1; |
54a611b6 LH |
1218 | mas->alloc = reuse; |
1219 | done: | |
1220 | if (requested > 1) | |
1221 | mas_set_alloc_req(mas, requested - 1); | |
1222 | } | |
1223 | ||
1224 | /* | |
1225 | * mas_alloc_nodes() - Allocate nodes into a maple state | |
1226 | * @mas: The maple state | |
1227 | * @gfp: The GFP Flags | |
1228 | */ | |
1229 | static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) | |
1230 | { | |
1231 | struct maple_alloc *node; | |
54a611b6 | 1232 | unsigned long allocated = mas_allocated(mas); |
54a611b6 LH |
1233 | unsigned int requested = mas_alloc_req(mas); |
1234 | unsigned int count; | |
1235 | void **slots = NULL; | |
1236 | unsigned int max_req = 0; | |
1237 | ||
1238 | if (!requested) | |
1239 | return; | |
1240 | ||
1241 | mas_set_alloc_req(mas, 0); | |
1242 | if (mas->mas_flags & MA_STATE_PREALLOC) { | |
1243 | if (allocated) | |
1244 | return; | |
1245 | WARN_ON(!allocated); | |
1246 | } | |
1247 | ||
541e06b7 | 1248 | if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) { |
54a611b6 LH |
1249 | node = (struct maple_alloc *)mt_alloc_one(gfp); |
1250 | if (!node) | |
1251 | goto nomem_one; | |
1252 | ||
541e06b7 | 1253 | if (allocated) { |
54a611b6 | 1254 | node->slot[0] = mas->alloc; |
541e06b7 LH |
1255 | node->node_count = 1; |
1256 | } else { | |
1257 | node->node_count = 0; | |
1258 | } | |
54a611b6 | 1259 | |
54a611b6 | 1260 | mas->alloc = node; |
541e06b7 | 1261 | node->total = ++allocated; |
e852cb1d | 1262 | node->request_count = 0; |
54a611b6 LH |
1263 | requested--; |
1264 | } | |
1265 | ||
1266 | node = mas->alloc; | |
1267 | while (requested) { | |
1f5f12ec PZ |
1268 | max_req = MAPLE_ALLOC_SLOTS - node->node_count; |
1269 | slots = (void **)&node->slot[node->node_count]; | |
54a611b6 LH |
1270 | max_req = min(requested, max_req); |
1271 | count = mt_alloc_bulk(gfp, max_req, slots); | |
1272 | if (!count) | |
1273 | goto nomem_bulk; | |
1274 | ||
1f5f12ec PZ |
1275 | if (node->node_count == 0) { |
1276 | node->slot[0]->node_count = 0; | |
1277 | node->slot[0]->request_count = 0; | |
1278 | } | |
1279 | ||
54a611b6 | 1280 | node->node_count += count; |
541e06b7 | 1281 | allocated += count; |
5b2100f7 JL |
1282 | /* find a non-full node*/ |
1283 | do { | |
1284 | node = node->slot[0]; | |
1285 | } while (unlikely(node->node_count == MAPLE_ALLOC_SLOTS)); | |
54a611b6 LH |
1286 | requested -= count; |
1287 | } | |
541e06b7 | 1288 | mas->alloc->total = allocated; |
54a611b6 LH |
1289 | return; |
1290 | ||
1291 | nomem_bulk: | |
1292 | /* Clean up potential freed allocations on bulk failure */ | |
1293 | memset(slots, 0, max_req * sizeof(unsigned long)); | |
4223dd93 | 1294 | mas->alloc->total = allocated; |
54a611b6 LH |
1295 | nomem_one: |
1296 | mas_set_alloc_req(mas, requested); | |
54a611b6 | 1297 | mas_set_err(mas, -ENOMEM); |
54a611b6 LH |
1298 | } |
1299 | ||
1300 | /* | |
1301 | * mas_free() - Free an encoded maple node | |
1302 | * @mas: The maple state | |
1303 | * @used: The encoded maple node to free. | |
1304 | * | |
1305 | * Uses rcu free if necessary, pushes @used back on the maple state allocations | |
1306 | * otherwise. | |
1307 | */ | |
1308 | static inline void mas_free(struct ma_state *mas, struct maple_enode *used) | |
1309 | { | |
1310 | struct maple_node *tmp = mte_to_node(used); | |
1311 | ||
1312 | if (mt_in_rcu(mas->tree)) | |
1313 | ma_free_rcu(tmp); | |
1314 | else | |
1315 | mas_push_node(mas, tmp); | |
1316 | } | |
1317 | ||
1318 | /* | |
e755c43e SK |
1319 | * mas_node_count_gfp() - Check if enough nodes are allocated and request more |
1320 | * if there is not enough nodes. | |
54a611b6 LH |
1321 | * @mas: The maple state |
1322 | * @count: The number of nodes needed | |
1323 | * @gfp: the gfp flags | |
1324 | */ | |
1325 | static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp) | |
1326 | { | |
1327 | unsigned long allocated = mas_allocated(mas); | |
1328 | ||
1329 | if (allocated < count) { | |
1330 | mas_set_alloc_req(mas, count - allocated); | |
1331 | mas_alloc_nodes(mas, gfp); | |
1332 | } | |
1333 | } | |
1334 | ||
1335 | /* | |
1336 | * mas_node_count() - Check if enough nodes are allocated and request more if | |
1337 | * there is not enough nodes. | |
1338 | * @mas: The maple state | |
1339 | * @count: The number of nodes needed | |
1340 | * | |
1341 | * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags. | |
1342 | */ | |
1343 | static void mas_node_count(struct ma_state *mas, int count) | |
1344 | { | |
1345 | return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN); | |
1346 | } | |
1347 | ||
1348 | /* | |
1349 | * mas_start() - Sets up maple state for operations. | |
1350 | * @mas: The maple state. | |
1351 | * | |
67254c7d | 1352 | * If mas->status == ma_start, then set the min, max and depth to |
54a611b6 LH |
1353 | * defaults. |
1354 | * | |
1355 | * Return: | |
067311d3 LH |
1356 | * - If mas->node is an error or not mas_start, return NULL. |
1357 | * - If it's an empty tree: NULL & mas->status == ma_none | |
7a0529d0 | 1358 | * - If it's a single entry: The entry & mas->status == ma_root |
c64d6615 | 1359 | * - If it's a tree: NULL & mas->status == ma_active |
54a611b6 LH |
1360 | */ |
1361 | static inline struct maple_enode *mas_start(struct ma_state *mas) | |
1362 | { | |
1363 | if (likely(mas_is_start(mas))) { | |
1364 | struct maple_enode *root; | |
1365 | ||
54a611b6 LH |
1366 | mas->min = 0; |
1367 | mas->max = ULONG_MAX; | |
54a611b6 | 1368 | |
a7b92d59 | 1369 | retry: |
d0411860 | 1370 | mas->depth = 0; |
54a611b6 LH |
1371 | root = mas_root(mas); |
1372 | /* Tree with nodes */ | |
1373 | if (likely(xa_is_node(root))) { | |
f9d3a963 | 1374 | mas->depth = 0; |
067311d3 | 1375 | mas->status = ma_active; |
54a611b6 | 1376 | mas->node = mte_safe_root(root); |
46b34584 | 1377 | mas->offset = 0; |
a7b92d59 LH |
1378 | if (mte_dead_node(mas->node)) |
1379 | goto retry; | |
1380 | ||
54a611b6 LH |
1381 | return NULL; |
1382 | } | |
1383 | ||
5d659bbb | 1384 | mas->node = NULL; |
54a611b6 LH |
1385 | /* empty tree */ |
1386 | if (unlikely(!root)) { | |
067311d3 | 1387 | mas->status = ma_none; |
54a611b6 LH |
1388 | mas->offset = MAPLE_NODE_SLOTS; |
1389 | return NULL; | |
1390 | } | |
1391 | ||
1392 | /* Single entry tree */ | |
067311d3 | 1393 | mas->status = ma_root; |
54a611b6 LH |
1394 | mas->offset = MAPLE_NODE_SLOTS; |
1395 | ||
1396 | /* Single entry tree. */ | |
1397 | if (mas->index > 0) | |
1398 | return NULL; | |
1399 | ||
1400 | return root; | |
1401 | } | |
1402 | ||
1403 | return NULL; | |
1404 | } | |
1405 | ||
1406 | /* | |
1407 | * ma_data_end() - Find the end of the data in a node. | |
1408 | * @node: The maple node | |
1409 | * @type: The maple node type | |
1410 | * @pivots: The array of pivots in the node | |
1411 | * @max: The maximum value in the node | |
1412 | * | |
1413 | * Uses metadata to find the end of the data when possible. | |
1414 | * Return: The zero indexed last slot with data (may be null). | |
1415 | */ | |
271f61a8 LH |
1416 | static __always_inline unsigned char ma_data_end(struct maple_node *node, |
1417 | enum maple_type type, unsigned long *pivots, unsigned long max) | |
54a611b6 LH |
1418 | { |
1419 | unsigned char offset; | |
1420 | ||
39d0bd86 LH |
1421 | if (!pivots) |
1422 | return 0; | |
1423 | ||
54a611b6 LH |
1424 | if (type == maple_arange_64) |
1425 | return ma_meta_end(node, type); | |
1426 | ||
1427 | offset = mt_pivots[type] - 1; | |
1428 | if (likely(!pivots[offset])) | |
1429 | return ma_meta_end(node, type); | |
1430 | ||
1431 | if (likely(pivots[offset] == max)) | |
1432 | return offset; | |
1433 | ||
1434 | return mt_pivots[type]; | |
1435 | } | |
1436 | ||
1437 | /* | |
1438 | * mas_data_end() - Find the end of the data (slot). | |
1439 | * @mas: the maple state | |
1440 | * | |
1441 | * This method is optimized to check the metadata of a node if the node type | |
1442 | * supports data end metadata. | |
1443 | * | |
1444 | * Return: The zero indexed last slot with data (may be null). | |
1445 | */ | |
1446 | static inline unsigned char mas_data_end(struct ma_state *mas) | |
1447 | { | |
1448 | enum maple_type type; | |
1449 | struct maple_node *node; | |
1450 | unsigned char offset; | |
1451 | unsigned long *pivots; | |
1452 | ||
1453 | type = mte_node_type(mas->node); | |
1454 | node = mas_mn(mas); | |
1455 | if (type == maple_arange_64) | |
1456 | return ma_meta_end(node, type); | |
1457 | ||
1458 | pivots = ma_pivots(node, type); | |
39d0bd86 LH |
1459 | if (unlikely(ma_dead_node(node))) |
1460 | return 0; | |
1461 | ||
54a611b6 LH |
1462 | offset = mt_pivots[type] - 1; |
1463 | if (likely(!pivots[offset])) | |
1464 | return ma_meta_end(node, type); | |
1465 | ||
1466 | if (likely(pivots[offset] == mas->max)) | |
1467 | return offset; | |
1468 | ||
1469 | return mt_pivots[type]; | |
1470 | } | |
1471 | ||
1472 | /* | |
1473 | * mas_leaf_max_gap() - Returns the largest gap in a leaf node | |
96ae4c90 | 1474 | * @mas: the maple state |
54a611b6 LH |
1475 | * |
1476 | * Return: The maximum gap in the leaf. | |
1477 | */ | |
1478 | static unsigned long mas_leaf_max_gap(struct ma_state *mas) | |
1479 | { | |
1480 | enum maple_type mt; | |
1481 | unsigned long pstart, gap, max_gap; | |
1482 | struct maple_node *mn; | |
1483 | unsigned long *pivots; | |
1484 | void __rcu **slots; | |
1485 | unsigned char i; | |
1486 | unsigned char max_piv; | |
1487 | ||
1488 | mt = mte_node_type(mas->node); | |
1489 | mn = mas_mn(mas); | |
1490 | slots = ma_slots(mn, mt); | |
1491 | max_gap = 0; | |
1492 | if (unlikely(ma_is_dense(mt))) { | |
1493 | gap = 0; | |
1494 | for (i = 0; i < mt_slots[mt]; i++) { | |
1495 | if (slots[i]) { | |
1496 | if (gap > max_gap) | |
1497 | max_gap = gap; | |
1498 | gap = 0; | |
1499 | } else { | |
1500 | gap++; | |
1501 | } | |
1502 | } | |
1503 | if (gap > max_gap) | |
1504 | max_gap = gap; | |
1505 | return max_gap; | |
1506 | } | |
1507 | ||
1508 | /* | |
1509 | * Check the first implied pivot optimizes the loop below and slot 1 may | |
1510 | * be skipped if there is a gap in slot 0. | |
1511 | */ | |
1512 | pivots = ma_pivots(mn, mt); | |
1513 | if (likely(!slots[0])) { | |
1514 | max_gap = pivots[0] - mas->min + 1; | |
1515 | i = 2; | |
1516 | } else { | |
1517 | i = 1; | |
1518 | } | |
1519 | ||
1520 | /* reduce max_piv as the special case is checked before the loop */ | |
1521 | max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1; | |
1522 | /* | |
1523 | * Check end implied pivot which can only be a gap on the right most | |
1524 | * node. | |
1525 | */ | |
1526 | if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) { | |
1527 | gap = ULONG_MAX - pivots[max_piv]; | |
1528 | if (gap > max_gap) | |
1529 | max_gap = gap; | |
7e552dcd PZ |
1530 | |
1531 | if (max_gap > pivots[max_piv] - mas->min) | |
1532 | return max_gap; | |
54a611b6 LH |
1533 | } |
1534 | ||
1535 | for (; i <= max_piv; i++) { | |
1536 | /* data == no gap. */ | |
1537 | if (likely(slots[i])) | |
1538 | continue; | |
1539 | ||
1540 | pstart = pivots[i - 1]; | |
1541 | gap = pivots[i] - pstart; | |
1542 | if (gap > max_gap) | |
1543 | max_gap = gap; | |
1544 | ||
1545 | /* There cannot be two gaps in a row. */ | |
1546 | i++; | |
1547 | } | |
1548 | return max_gap; | |
1549 | } | |
1550 | ||
1551 | /* | |
1552 | * ma_max_gap() - Get the maximum gap in a maple node (non-leaf) | |
1553 | * @node: The maple node | |
1554 | * @gaps: The pointer to the gaps | |
1555 | * @mt: The maple node type | |
96ae4c90 | 1556 | * @off: Pointer to store the offset location of the gap. |
54a611b6 LH |
1557 | * |
1558 | * Uses the metadata data end to scan backwards across set gaps. | |
1559 | * | |
1560 | * Return: The maximum gap value | |
1561 | */ | |
1562 | static inline unsigned long | |
1563 | ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt, | |
1564 | unsigned char *off) | |
1565 | { | |
1566 | unsigned char offset, i; | |
1567 | unsigned long max_gap = 0; | |
1568 | ||
1569 | i = offset = ma_meta_end(node, mt); | |
1570 | do { | |
1571 | if (gaps[i] > max_gap) { | |
1572 | max_gap = gaps[i]; | |
1573 | offset = i; | |
1574 | } | |
1575 | } while (i--); | |
1576 | ||
1577 | *off = offset; | |
1578 | return max_gap; | |
1579 | } | |
1580 | ||
1581 | /* | |
1582 | * mas_max_gap() - find the largest gap in a non-leaf node and set the slot. | |
1583 | * @mas: The maple state. | |
1584 | * | |
54a611b6 LH |
1585 | * Return: The gap value. |
1586 | */ | |
1587 | static inline unsigned long mas_max_gap(struct ma_state *mas) | |
1588 | { | |
1589 | unsigned long *gaps; | |
1590 | unsigned char offset; | |
1591 | enum maple_type mt; | |
1592 | struct maple_node *node; | |
1593 | ||
1594 | mt = mte_node_type(mas->node); | |
1595 | if (ma_is_leaf(mt)) | |
1596 | return mas_leaf_max_gap(mas); | |
1597 | ||
1598 | node = mas_mn(mas); | |
bec1b51e | 1599 | MAS_BUG_ON(mas, mt != maple_arange_64); |
c5e94121 | 1600 | offset = ma_meta_gap(node); |
54a611b6 LH |
1601 | gaps = ma_gaps(node, mt); |
1602 | return gaps[offset]; | |
1603 | } | |
1604 | ||
1605 | /* | |
1606 | * mas_parent_gap() - Set the parent gap and any gaps above, as needed | |
1607 | * @mas: The maple state | |
1608 | * @offset: The gap offset in the parent to set | |
1609 | * @new: The new gap value. | |
1610 | * | |
1611 | * Set the parent gap then continue to set the gap upwards, using the metadata | |
1612 | * of the parent to see if it is necessary to check the node above. | |
1613 | */ | |
1614 | static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset, | |
1615 | unsigned long new) | |
1616 | { | |
1617 | unsigned long meta_gap = 0; | |
1618 | struct maple_node *pnode; | |
1619 | struct maple_enode *penode; | |
1620 | unsigned long *pgaps; | |
1621 | unsigned char meta_offset; | |
1622 | enum maple_type pmt; | |
1623 | ||
1624 | pnode = mte_parent(mas->node); | |
afc754c6 | 1625 | pmt = mas_parent_type(mas, mas->node); |
54a611b6 LH |
1626 | penode = mt_mk_node(pnode, pmt); |
1627 | pgaps = ma_gaps(pnode, pmt); | |
1628 | ||
1629 | ascend: | |
bec1b51e | 1630 | MAS_BUG_ON(mas, pmt != maple_arange_64); |
c5e94121 | 1631 | meta_offset = ma_meta_gap(pnode); |
d695c30a | 1632 | meta_gap = pgaps[meta_offset]; |
54a611b6 LH |
1633 | |
1634 | pgaps[offset] = new; | |
1635 | ||
1636 | if (meta_gap == new) | |
1637 | return; | |
1638 | ||
1639 | if (offset != meta_offset) { | |
1640 | if (meta_gap > new) | |
1641 | return; | |
1642 | ||
1643 | ma_set_meta_gap(pnode, pmt, offset); | |
1644 | } else if (new < meta_gap) { | |
54a611b6 LH |
1645 | new = ma_max_gap(pnode, pgaps, pmt, &meta_offset); |
1646 | ma_set_meta_gap(pnode, pmt, meta_offset); | |
1647 | } | |
1648 | ||
1649 | if (ma_is_root(pnode)) | |
1650 | return; | |
1651 | ||
1652 | /* Go to the parent node. */ | |
1653 | pnode = mte_parent(penode); | |
afc754c6 | 1654 | pmt = mas_parent_type(mas, penode); |
54a611b6 LH |
1655 | pgaps = ma_gaps(pnode, pmt); |
1656 | offset = mte_parent_slot(penode); | |
1657 | penode = mt_mk_node(pnode, pmt); | |
1658 | goto ascend; | |
1659 | } | |
1660 | ||
1661 | /* | |
1662 | * mas_update_gap() - Update a nodes gaps and propagate up if necessary. | |
96ae4c90 | 1663 | * @mas: the maple state. |
54a611b6 LH |
1664 | */ |
1665 | static inline void mas_update_gap(struct ma_state *mas) | |
1666 | { | |
1667 | unsigned char pslot; | |
1668 | unsigned long p_gap; | |
1669 | unsigned long max_gap; | |
1670 | ||
1671 | if (!mt_is_alloc(mas->tree)) | |
1672 | return; | |
1673 | ||
1674 | if (mte_is_root(mas->node)) | |
1675 | return; | |
1676 | ||
1677 | max_gap = mas_max_gap(mas); | |
1678 | ||
1679 | pslot = mte_parent_slot(mas->node); | |
1680 | p_gap = ma_gaps(mte_parent(mas->node), | |
afc754c6 | 1681 | mas_parent_type(mas, mas->node))[pslot]; |
54a611b6 LH |
1682 | |
1683 | if (p_gap != max_gap) | |
1684 | mas_parent_gap(mas, pslot, max_gap); | |
1685 | } | |
1686 | ||
1687 | /* | |
1688 | * mas_adopt_children() - Set the parent pointer of all nodes in @parent to | |
1689 | * @parent with the slot encoded. | |
96ae4c90 WY |
1690 | * @mas: the maple state (for the tree) |
1691 | * @parent: the maple encoded node containing the children. | |
54a611b6 LH |
1692 | */ |
1693 | static inline void mas_adopt_children(struct ma_state *mas, | |
1694 | struct maple_enode *parent) | |
1695 | { | |
1696 | enum maple_type type = mte_node_type(parent); | |
068bafca | 1697 | struct maple_node *node = mte_to_node(parent); |
54a611b6 LH |
1698 | void __rcu **slots = ma_slots(node, type); |
1699 | unsigned long *pivots = ma_pivots(node, type); | |
1700 | struct maple_enode *child; | |
1701 | unsigned char offset; | |
1702 | ||
1703 | offset = ma_data_end(node, type, pivots, mas->max); | |
1704 | do { | |
1705 | child = mas_slot_locked(mas, slots, offset); | |
bf96715e | 1706 | mas_set_parent(mas, child, parent, offset); |
54a611b6 LH |
1707 | } while (offset--); |
1708 | } | |
1709 | ||
1710 | /* | |
1238f6a2 LH |
1711 | * mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old |
1712 | * node as dead. | |
96ae4c90 WY |
1713 | * @mas: the maple state with the new node |
1714 | * @old_enode: The old maple encoded node to replace. | |
f9d3a963 | 1715 | * @new_height: if we are inserting a root node, update the height of the tree |
54a611b6 | 1716 | */ |
1238f6a2 | 1717 | static inline void mas_put_in_tree(struct ma_state *mas, |
f9d3a963 | 1718 | struct maple_enode *old_enode, char new_height) |
14c4b5ab | 1719 | __must_hold(mas->tree->ma_lock) |
54a611b6 | 1720 | { |
1238f6a2 LH |
1721 | unsigned char offset; |
1722 | void __rcu **slots; | |
54a611b6 LH |
1723 | |
1724 | if (mte_is_root(mas->node)) { | |
4ffc2ee2 | 1725 | mas_mn(mas)->parent = ma_parent_ptr(mas_tree_parent(mas)); |
54a611b6 | 1726 | rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); |
f9d3a963 | 1727 | mt_set_height(mas->tree, new_height); |
54a611b6 | 1728 | } else { |
1238f6a2 LH |
1729 | |
1730 | offset = mte_parent_slot(mas->node); | |
1731 | slots = ma_slots(mte_parent(mas->node), | |
1732 | mas_parent_type(mas, mas->node)); | |
54a611b6 LH |
1733 | rcu_assign_pointer(slots[offset], mas->node); |
1734 | } | |
1735 | ||
1238f6a2 | 1736 | mte_set_node_dead(old_enode); |
54a611b6 LH |
1737 | } |
1738 | ||
72bcf4aa LH |
1739 | /* |
1740 | * mas_replace_node() - Replace a node by putting it in the tree, marking it | |
1741 | * dead, and freeing it. | |
1742 | * the parent encoding to locate the maple node in the tree. | |
96ae4c90 WY |
1743 | * @mas: the ma_state with @mas->node pointing to the new node. |
1744 | * @old_enode: The old maple encoded node. | |
f9d3a963 | 1745 | * @new_height: The new height of the tree as a result of the operation |
72bcf4aa LH |
1746 | */ |
1747 | static inline void mas_replace_node(struct ma_state *mas, | |
f9d3a963 | 1748 | struct maple_enode *old_enode, unsigned char new_height) |
72bcf4aa LH |
1749 | __must_hold(mas->tree->ma_lock) |
1750 | { | |
f9d3a963 | 1751 | mas_put_in_tree(mas, old_enode, new_height); |
72bcf4aa LH |
1752 | mas_free(mas, old_enode); |
1753 | } | |
1754 | ||
54a611b6 | 1755 | /* |
530f745c LH |
1756 | * mas_find_child() - Find a child who has the parent @mas->node. |
1757 | * @mas: the maple state with the parent. | |
54a611b6 LH |
1758 | * @child: the maple state to store the child. |
1759 | */ | |
530f745c | 1760 | static inline bool mas_find_child(struct ma_state *mas, struct ma_state *child) |
14c4b5ab | 1761 | __must_hold(mas->tree->ma_lock) |
54a611b6 LH |
1762 | { |
1763 | enum maple_type mt; | |
1764 | unsigned char offset; | |
1765 | unsigned char end; | |
1766 | unsigned long *pivots; | |
1767 | struct maple_enode *entry; | |
1768 | struct maple_node *node; | |
1769 | void __rcu **slots; | |
1770 | ||
1771 | mt = mte_node_type(mas->node); | |
1772 | node = mas_mn(mas); | |
1773 | slots = ma_slots(node, mt); | |
1774 | pivots = ma_pivots(node, mt); | |
1775 | end = ma_data_end(node, mt, pivots, mas->max); | |
1776 | for (offset = mas->offset; offset <= end; offset++) { | |
1777 | entry = mas_slot_locked(mas, slots, offset); | |
1778 | if (mte_parent(entry) == node) { | |
1779 | *child = *mas; | |
1780 | mas->offset = offset + 1; | |
1781 | child->offset = offset; | |
1782 | mas_descend(child); | |
1783 | child->offset = 0; | |
1784 | return true; | |
1785 | } | |
1786 | } | |
1787 | return false; | |
1788 | } | |
1789 | ||
1790 | /* | |
1791 | * mab_shift_right() - Shift the data in mab right. Note, does not clean out the | |
1792 | * old data or set b_node->b_end. | |
1793 | * @b_node: the maple_big_node | |
1794 | * @shift: the shift count | |
1795 | */ | |
1796 | static inline void mab_shift_right(struct maple_big_node *b_node, | |
1797 | unsigned char shift) | |
1798 | { | |
1799 | unsigned long size = b_node->b_end * sizeof(unsigned long); | |
1800 | ||
1801 | memmove(b_node->pivot + shift, b_node->pivot, size); | |
1802 | memmove(b_node->slot + shift, b_node->slot, size); | |
1803 | if (b_node->type == maple_arange_64) | |
1804 | memmove(b_node->gap + shift, b_node->gap, size); | |
1805 | } | |
1806 | ||
1807 | /* | |
1808 | * mab_middle_node() - Check if a middle node is needed (unlikely) | |
1809 | * @b_node: the maple_big_node that contains the data. | |
54a611b6 LH |
1810 | * @split: the potential split location |
1811 | * @slot_count: the size that can be stored in a single node being considered. | |
1812 | * | |
1813 | * Return: true if a middle node is required. | |
1814 | */ | |
1815 | static inline bool mab_middle_node(struct maple_big_node *b_node, int split, | |
1816 | unsigned char slot_count) | |
1817 | { | |
1818 | unsigned char size = b_node->b_end; | |
1819 | ||
1820 | if (size >= 2 * slot_count) | |
1821 | return true; | |
1822 | ||
1823 | if (!b_node->slot[split] && (size >= 2 * slot_count - 1)) | |
1824 | return true; | |
1825 | ||
1826 | return false; | |
1827 | } | |
1828 | ||
1829 | /* | |
1830 | * mab_no_null_split() - ensure the split doesn't fall on a NULL | |
1831 | * @b_node: the maple_big_node with the data | |
1832 | * @split: the suggested split location | |
1833 | * @slot_count: the number of slots in the node being considered. | |
1834 | * | |
1835 | * Return: the split location. | |
1836 | */ | |
1837 | static inline int mab_no_null_split(struct maple_big_node *b_node, | |
1838 | unsigned char split, unsigned char slot_count) | |
1839 | { | |
1840 | if (!b_node->slot[split]) { | |
1841 | /* | |
1842 | * If the split is less than the max slot && the right side will | |
1843 | * still be sufficient, then increment the split on NULL. | |
1844 | */ | |
1845 | if ((split < slot_count - 1) && | |
1846 | (b_node->b_end - split) > (mt_min_slots[b_node->type])) | |
1847 | split++; | |
1848 | else | |
1849 | split--; | |
1850 | } | |
1851 | return split; | |
1852 | } | |
1853 | ||
1854 | /* | |
1855 | * mab_calc_split() - Calculate the split location and if there needs to be two | |
1856 | * splits. | |
96ae4c90 | 1857 | * @mas: The maple state |
54a611b6 LH |
1858 | * @bn: The maple_big_node with the data |
1859 | * @mid_split: The second split, if required. 0 otherwise. | |
1860 | * | |
1861 | * Return: The first split location. The middle split is set in @mid_split. | |
1862 | */ | |
1863 | static inline int mab_calc_split(struct ma_state *mas, | |
4f6a6bed | 1864 | struct maple_big_node *bn, unsigned char *mid_split) |
54a611b6 LH |
1865 | { |
1866 | unsigned char b_end = bn->b_end; | |
1867 | int split = b_end / 2; /* Assume equal split. */ | |
4f6a6bed | 1868 | unsigned char slot_count = mt_slots[bn->type]; |
54a611b6 LH |
1869 | |
1870 | /* | |
1871 | * To support gap tracking, all NULL entries are kept together and a node cannot | |
1872 | * end on a NULL entry, with the exception of the left-most leaf. The | |
1873 | * limitation means that the split of a node must be checked for this condition | |
1874 | * and be able to put more data in one direction or the other. | |
1875 | */ | |
1876 | if (unlikely((mas->mas_flags & MA_STATE_BULK))) { | |
1877 | *mid_split = 0; | |
1878 | split = b_end - mt_min_slots[bn->type]; | |
1879 | ||
1880 | if (!ma_is_leaf(bn->type)) | |
1881 | return split; | |
1882 | ||
1883 | mas->mas_flags |= MA_STATE_REBALANCE; | |
1884 | if (!bn->slot[split]) | |
1885 | split--; | |
1886 | return split; | |
1887 | } | |
1888 | ||
1889 | /* | |
1890 | * Although extremely rare, it is possible to enter what is known as the 3-way | |
1891 | * split scenario. The 3-way split comes about by means of a store of a range | |
1892 | * that overwrites the end and beginning of two full nodes. The result is a set | |
1893 | * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can | |
1894 | * also be located in different parent nodes which are also full. This can | |
1895 | * carry upwards all the way to the root in the worst case. | |
1896 | */ | |
1897 | if (unlikely(mab_middle_node(bn, split, slot_count))) { | |
1898 | split = b_end / 3; | |
1899 | *mid_split = split * 2; | |
1900 | } else { | |
54a611b6 | 1901 | *mid_split = 0; |
54a611b6 LH |
1902 | } |
1903 | ||
1904 | /* Avoid ending a node on a NULL entry */ | |
1905 | split = mab_no_null_split(bn, split, slot_count); | |
54a611b6 | 1906 | |
e11cb683 VY |
1907 | if (unlikely(*mid_split)) |
1908 | *mid_split = mab_no_null_split(bn, *mid_split, slot_count); | |
54a611b6 LH |
1909 | |
1910 | return split; | |
1911 | } | |
1912 | ||
1913 | /* | |
1914 | * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node | |
1915 | * and set @b_node->b_end to the next free slot. | |
1916 | * @mas: The maple state | |
1917 | * @mas_start: The starting slot to copy | |
1918 | * @mas_end: The end slot to copy (inclusively) | |
1919 | * @b_node: The maple_big_node to place the data | |
1920 | * @mab_start: The starting location in maple_big_node to store the data. | |
1921 | */ | |
1922 | static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start, | |
1923 | unsigned char mas_end, struct maple_big_node *b_node, | |
1924 | unsigned char mab_start) | |
1925 | { | |
1926 | enum maple_type mt; | |
1927 | struct maple_node *node; | |
1928 | void __rcu **slots; | |
1929 | unsigned long *pivots, *gaps; | |
1930 | int i = mas_start, j = mab_start; | |
1931 | unsigned char piv_end; | |
1932 | ||
1933 | node = mas_mn(mas); | |
1934 | mt = mte_node_type(mas->node); | |
1935 | pivots = ma_pivots(node, mt); | |
1936 | if (!i) { | |
1937 | b_node->pivot[j] = pivots[i++]; | |
1938 | if (unlikely(i > mas_end)) | |
1939 | goto complete; | |
1940 | j++; | |
1941 | } | |
1942 | ||
1943 | piv_end = min(mas_end, mt_pivots[mt]); | |
1944 | for (; i < piv_end; i++, j++) { | |
1945 | b_node->pivot[j] = pivots[i]; | |
1946 | if (unlikely(!b_node->pivot[j])) | |
1c148069 | 1947 | goto complete; |
54a611b6 LH |
1948 | |
1949 | if (unlikely(mas->max == b_node->pivot[j])) | |
1950 | goto complete; | |
1951 | } | |
1952 | ||
8c7904a8 | 1953 | b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt); |
54a611b6 LH |
1954 | |
1955 | complete: | |
1956 | b_node->b_end = ++j; | |
1957 | j -= mab_start; | |
1958 | slots = ma_slots(node, mt); | |
1959 | memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j); | |
1960 | if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) { | |
1961 | gaps = ma_gaps(node, mt); | |
1962 | memcpy(b_node->gap + mab_start, gaps + mas_start, | |
1963 | sizeof(unsigned long) * j); | |
1964 | } | |
1965 | } | |
1966 | ||
1967 | /* | |
1968 | * mas_leaf_set_meta() - Set the metadata of a leaf if possible. | |
54a611b6 | 1969 | * @node: The maple node |
54a611b6 | 1970 | * @mt: The maple type |
330018fe | 1971 | * @end: The node end |
54a611b6 | 1972 | */ |
330018fe | 1973 | static inline void mas_leaf_set_meta(struct maple_node *node, |
54a611b6 LH |
1974 | enum maple_type mt, unsigned char end) |
1975 | { | |
54a611b6 LH |
1976 | if (end < mt_slots[mt] - 1) |
1977 | ma_set_meta(node, mt, 0, end); | |
1978 | } | |
1979 | ||
1980 | /* | |
1981 | * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node. | |
1982 | * @b_node: the maple_big_node that has the data | |
1983 | * @mab_start: the start location in @b_node. | |
1984 | * @mab_end: The end location in @b_node (inclusively) | |
1985 | * @mas: The maple state with the maple encoded node. | |
1986 | */ | |
1987 | static inline void mab_mas_cp(struct maple_big_node *b_node, | |
1988 | unsigned char mab_start, unsigned char mab_end, | |
1989 | struct ma_state *mas, bool new_max) | |
1990 | { | |
1991 | int i, j = 0; | |
1992 | enum maple_type mt = mte_node_type(mas->node); | |
1993 | struct maple_node *node = mte_to_node(mas->node); | |
1994 | void __rcu **slots = ma_slots(node, mt); | |
1995 | unsigned long *pivots = ma_pivots(node, mt); | |
1996 | unsigned long *gaps = NULL; | |
1997 | unsigned char end; | |
1998 | ||
1999 | if (mab_end - mab_start > mt_pivots[mt]) | |
2000 | mab_end--; | |
2001 | ||
2002 | if (!pivots[mt_pivots[mt] - 1]) | |
2003 | slots[mt_pivots[mt]] = NULL; | |
2004 | ||
2005 | i = mab_start; | |
2006 | do { | |
2007 | pivots[j++] = b_node->pivot[i++]; | |
2008 | } while (i <= mab_end && likely(b_node->pivot[i])); | |
2009 | ||
2010 | memcpy(slots, b_node->slot + mab_start, | |
2011 | sizeof(void *) * (i - mab_start)); | |
2012 | ||
2013 | if (new_max) | |
2014 | mas->max = b_node->pivot[i - 1]; | |
2015 | ||
2016 | end = j - 1; | |
2017 | if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) { | |
2018 | unsigned long max_gap = 0; | |
d695c30a | 2019 | unsigned char offset = 0; |
54a611b6 LH |
2020 | |
2021 | gaps = ma_gaps(node, mt); | |
2022 | do { | |
2023 | gaps[--j] = b_node->gap[--i]; | |
2024 | if (gaps[j] > max_gap) { | |
2025 | offset = j; | |
2026 | max_gap = gaps[j]; | |
2027 | } | |
2028 | } while (j); | |
2029 | ||
2030 | ma_set_meta(node, mt, offset, end); | |
2031 | } else { | |
330018fe | 2032 | mas_leaf_set_meta(node, mt, end); |
54a611b6 LH |
2033 | } |
2034 | } | |
2035 | ||
54a611b6 LH |
2036 | /* |
2037 | * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert. | |
2038 | * @mas: The maple state | |
2039 | * @end: The maple node end | |
2040 | * @mt: The maple node type | |
2041 | */ | |
2042 | static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end, | |
2043 | enum maple_type mt) | |
2044 | { | |
2045 | if (!(mas->mas_flags & MA_STATE_BULK)) | |
2046 | return; | |
2047 | ||
2048 | if (mte_is_root(mas->node)) | |
2049 | return; | |
2050 | ||
2051 | if (end > mt_min_slots[mt]) { | |
2052 | mas->mas_flags &= ~MA_STATE_REBALANCE; | |
2053 | return; | |
2054 | } | |
2055 | } | |
2056 | ||
2057 | /* | |
2058 | * mas_store_b_node() - Store an @entry into the b_node while also copying the | |
2059 | * data from a maple encoded node. | |
2060 | * @wr_mas: the maple write state | |
2061 | * @b_node: the maple_big_node to fill with data | |
2062 | * @offset_end: the offset to end copying | |
2063 | * | |
2064 | * Return: The actual end of the data stored in @b_node | |
2065 | */ | |
44081c77 | 2066 | static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas, |
54a611b6 LH |
2067 | struct maple_big_node *b_node, unsigned char offset_end) |
2068 | { | |
2069 | unsigned char slot; | |
2070 | unsigned char b_end; | |
2071 | /* Possible underflow of piv will wrap back to 0 before use. */ | |
2072 | unsigned long piv; | |
2073 | struct ma_state *mas = wr_mas->mas; | |
2074 | ||
2075 | b_node->type = wr_mas->type; | |
2076 | b_end = 0; | |
2077 | slot = mas->offset; | |
2078 | if (slot) { | |
2079 | /* Copy start data up to insert. */ | |
2080 | mas_mab_cp(mas, 0, slot - 1, b_node, 0); | |
2081 | b_end = b_node->b_end; | |
2082 | piv = b_node->pivot[b_end - 1]; | |
2083 | } else | |
2084 | piv = mas->min - 1; | |
2085 | ||
2086 | if (piv + 1 < mas->index) { | |
2087 | /* Handle range starting after old range */ | |
2088 | b_node->slot[b_end] = wr_mas->content; | |
2089 | if (!wr_mas->content) | |
2090 | b_node->gap[b_end] = mas->index - 1 - piv; | |
2091 | b_node->pivot[b_end++] = mas->index - 1; | |
2092 | } | |
2093 | ||
2094 | /* Store the new entry. */ | |
2095 | mas->offset = b_end; | |
2096 | b_node->slot[b_end] = wr_mas->entry; | |
2097 | b_node->pivot[b_end] = mas->last; | |
2098 | ||
2099 | /* Appended. */ | |
2100 | if (mas->last >= mas->max) | |
2101 | goto b_end; | |
2102 | ||
2103 | /* Handle new range ending before old range ends */ | |
29b2681f | 2104 | piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type); |
54a611b6 LH |
2105 | if (piv > mas->last) { |
2106 | if (piv == ULONG_MAX) | |
2107 | mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type); | |
2108 | ||
2109 | if (offset_end != slot) | |
2110 | wr_mas->content = mas_slot_locked(mas, wr_mas->slots, | |
2111 | offset_end); | |
2112 | ||
2113 | b_node->slot[++b_end] = wr_mas->content; | |
2114 | if (!wr_mas->content) | |
2115 | b_node->gap[b_end] = piv - mas->last + 1; | |
2116 | b_node->pivot[b_end] = piv; | |
2117 | } | |
2118 | ||
2119 | slot = offset_end + 1; | |
0de56e38 | 2120 | if (slot > mas->end) |
54a611b6 LH |
2121 | goto b_end; |
2122 | ||
2123 | /* Copy end data to the end of the node. */ | |
0de56e38 | 2124 | mas_mab_cp(mas, slot, mas->end + 1, b_node, ++b_end); |
54a611b6 LH |
2125 | b_node->b_end--; |
2126 | return; | |
2127 | ||
2128 | b_end: | |
2129 | b_node->b_end = b_end; | |
2130 | } | |
2131 | ||
2132 | /* | |
2133 | * mas_prev_sibling() - Find the previous node with the same parent. | |
2134 | * @mas: the maple state | |
2135 | * | |
2136 | * Return: True if there is a previous sibling, false otherwise. | |
2137 | */ | |
2138 | static inline bool mas_prev_sibling(struct ma_state *mas) | |
2139 | { | |
2140 | unsigned int p_slot = mte_parent_slot(mas->node); | |
2141 | ||
0cc8d68a | 2142 | /* For root node, p_slot is set to 0 by mte_parent_slot(). */ |
54a611b6 LH |
2143 | if (!p_slot) |
2144 | return false; | |
2145 | ||
2146 | mas_ascend(mas); | |
2147 | mas->offset = p_slot - 1; | |
2148 | mas_descend(mas); | |
2149 | return true; | |
2150 | } | |
2151 | ||
2152 | /* | |
2153 | * mas_next_sibling() - Find the next node with the same parent. | |
2154 | * @mas: the maple state | |
2155 | * | |
2156 | * Return: true if there is a next sibling, false otherwise. | |
2157 | */ | |
2158 | static inline bool mas_next_sibling(struct ma_state *mas) | |
2159 | { | |
2160 | MA_STATE(parent, mas->tree, mas->index, mas->last); | |
2161 | ||
2162 | if (mte_is_root(mas->node)) | |
2163 | return false; | |
2164 | ||
2165 | parent = *mas; | |
2166 | mas_ascend(&parent); | |
2167 | parent.offset = mte_parent_slot(mas->node) + 1; | |
2168 | if (parent.offset > mas_data_end(&parent)) | |
2169 | return false; | |
2170 | ||
2171 | *mas = parent; | |
2172 | mas_descend(mas); | |
2173 | return true; | |
2174 | } | |
2175 | ||
2176 | /* | |
96ae4c90 WY |
2177 | * mas_node_or_none() - Set the enode and state. |
2178 | * @mas: the maple state | |
54a611b6 LH |
2179 | * @enode: The encoded maple node. |
2180 | * | |
067311d3 | 2181 | * Set the node to the enode and the status. |
54a611b6 | 2182 | */ |
067311d3 LH |
2183 | static inline void mas_node_or_none(struct ma_state *mas, |
2184 | struct maple_enode *enode) | |
54a611b6 | 2185 | { |
067311d3 LH |
2186 | if (enode) { |
2187 | mas->node = enode; | |
2188 | mas->status = ma_active; | |
2189 | } else { | |
2190 | mas->node = NULL; | |
2191 | mas->status = ma_none; | |
2192 | } | |
54a611b6 LH |
2193 | } |
2194 | ||
2195 | /* | |
2196 | * mas_wr_node_walk() - Find the correct offset for the index in the @mas. | |
bea07fd6 LS |
2197 | * If @mas->index cannot be found within the containing |
2198 | * node, we traverse to the last entry in the node. | |
54a611b6 LH |
2199 | * @wr_mas: The maple write state |
2200 | * | |
2201 | * Uses mas_slot_locked() and does not need to worry about dead nodes. | |
2202 | */ | |
2203 | static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) | |
2204 | { | |
2205 | struct ma_state *mas = wr_mas->mas; | |
97f7e094 | 2206 | unsigned char count, offset; |
54a611b6 LH |
2207 | |
2208 | if (unlikely(ma_is_dense(wr_mas->type))) { | |
2209 | wr_mas->r_max = wr_mas->r_min = mas->index; | |
2210 | mas->offset = mas->index = mas->min; | |
2211 | return; | |
2212 | } | |
2213 | ||
2214 | wr_mas->node = mas_mn(wr_mas->mas); | |
2215 | wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type); | |
0de56e38 LH |
2216 | count = mas->end = ma_data_end(wr_mas->node, wr_mas->type, |
2217 | wr_mas->pivots, mas->max); | |
54a611b6 | 2218 | offset = mas->offset; |
54a611b6 | 2219 | |
97f7e094 PZ |
2220 | while (offset < count && mas->index > wr_mas->pivots[offset]) |
2221 | offset++; | |
54a611b6 | 2222 | |
97f7e094 PZ |
2223 | wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max; |
2224 | wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset); | |
54a611b6 LH |
2225 | wr_mas->offset_end = mas->offset = offset; |
2226 | } | |
2227 | ||
54a611b6 LH |
2228 | /* |
2229 | * mast_rebalance_next() - Rebalance against the next node | |
2230 | * @mast: The maple subtree state | |
54a611b6 LH |
2231 | */ |
2232 | static inline void mast_rebalance_next(struct maple_subtree_state *mast) | |
2233 | { | |
2234 | unsigned char b_end = mast->bn->b_end; | |
2235 | ||
2236 | mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node), | |
2237 | mast->bn, b_end); | |
2238 | mast->orig_r->last = mast->orig_r->max; | |
2239 | } | |
2240 | ||
2241 | /* | |
2242 | * mast_rebalance_prev() - Rebalance against the previous node | |
2243 | * @mast: The maple subtree state | |
54a611b6 LH |
2244 | */ |
2245 | static inline void mast_rebalance_prev(struct maple_subtree_state *mast) | |
2246 | { | |
2247 | unsigned char end = mas_data_end(mast->orig_l) + 1; | |
2248 | unsigned char b_end = mast->bn->b_end; | |
2249 | ||
2250 | mab_shift_right(mast->bn, end); | |
2251 | mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0); | |
2252 | mast->l->min = mast->orig_l->min; | |
2253 | mast->orig_l->index = mast->orig_l->min; | |
2254 | mast->bn->b_end = end + b_end; | |
2255 | mast->l->offset += end; | |
2256 | } | |
2257 | ||
2258 | /* | |
2259 | * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring | |
2260 | * the node to the right. Checking the nodes to the right then the left at each | |
530f745c | 2261 | * level upwards until root is reached. |
54a611b6 LH |
2262 | * Data is copied into the @mast->bn. |
2263 | * @mast: The maple_subtree_state. | |
2264 | */ | |
2265 | static inline | |
2266 | bool mast_spanning_rebalance(struct maple_subtree_state *mast) | |
2267 | { | |
2268 | struct ma_state r_tmp = *mast->orig_r; | |
2269 | struct ma_state l_tmp = *mast->orig_l; | |
54a611b6 LH |
2270 | unsigned char depth = 0; |
2271 | ||
54a611b6 LH |
2272 | do { |
2273 | mas_ascend(mast->orig_r); | |
2274 | mas_ascend(mast->orig_l); | |
2275 | depth++; | |
54a611b6 | 2276 | if (mast->orig_r->offset < mas_data_end(mast->orig_r)) { |
54a611b6 LH |
2277 | mast->orig_r->offset++; |
2278 | do { | |
2279 | mas_descend(mast->orig_r); | |
2280 | mast->orig_r->offset = 0; | |
530f745c | 2281 | } while (--depth); |
54a611b6 LH |
2282 | |
2283 | mast_rebalance_next(mast); | |
54a611b6 LH |
2284 | *mast->orig_l = l_tmp; |
2285 | return true; | |
54a611b6 | 2286 | } else if (mast->orig_l->offset != 0) { |
54a611b6 LH |
2287 | mast->orig_l->offset--; |
2288 | do { | |
2289 | mas_descend(mast->orig_l); | |
2290 | mast->orig_l->offset = | |
2291 | mas_data_end(mast->orig_l); | |
530f745c | 2292 | } while (--depth); |
54a611b6 LH |
2293 | |
2294 | mast_rebalance_prev(mast); | |
54a611b6 LH |
2295 | *mast->orig_r = r_tmp; |
2296 | return true; | |
2297 | } | |
2298 | } while (!mte_is_root(mast->orig_r->node)); | |
2299 | ||
2300 | *mast->orig_r = r_tmp; | |
2301 | *mast->orig_l = l_tmp; | |
2302 | return false; | |
2303 | } | |
2304 | ||
2305 | /* | |
530f745c | 2306 | * mast_ascend() - Ascend the original left and right maple states. |
54a611b6 LH |
2307 | * @mast: the maple subtree state. |
2308 | * | |
530f745c LH |
2309 | * Ascend the original left and right sides. Set the offsets to point to the |
2310 | * data already in the new tree (@mast->l and @mast->r). | |
54a611b6 | 2311 | */ |
530f745c | 2312 | static inline void mast_ascend(struct maple_subtree_state *mast) |
54a611b6 LH |
2313 | { |
2314 | MA_WR_STATE(wr_mas, mast->orig_r, NULL); | |
54a611b6 LH |
2315 | mas_ascend(mast->orig_l); |
2316 | mas_ascend(mast->orig_r); | |
54a611b6 LH |
2317 | |
2318 | mast->orig_r->offset = 0; | |
2319 | mast->orig_r->index = mast->r->max; | |
2320 | /* last should be larger than or equal to index */ | |
2321 | if (mast->orig_r->last < mast->orig_r->index) | |
2322 | mast->orig_r->last = mast->orig_r->index; | |
530f745c | 2323 | |
54a611b6 LH |
2324 | wr_mas.type = mte_node_type(mast->orig_r->node); |
2325 | mas_wr_node_walk(&wr_mas); | |
2326 | /* Set up the left side of things */ | |
2327 | mast->orig_l->offset = 0; | |
2328 | mast->orig_l->index = mast->l->min; | |
2329 | wr_mas.mas = mast->orig_l; | |
2330 | wr_mas.type = mte_node_type(mast->orig_l->node); | |
2331 | mas_wr_node_walk(&wr_mas); | |
2332 | ||
2333 | mast->bn->type = wr_mas.type; | |
2334 | } | |
2335 | ||
2336 | /* | |
2337 | * mas_new_ma_node() - Create and return a new maple node. Helper function. | |
2338 | * @mas: the maple state with the allocations. | |
2339 | * @b_node: the maple_big_node with the type encoding. | |
2340 | * | |
2341 | * Use the node type from the maple_big_node to allocate a new node from the | |
2342 | * ma_state. This function exists mainly for code readability. | |
2343 | * | |
2344 | * Return: A new maple encoded node | |
2345 | */ | |
2346 | static inline struct maple_enode | |
2347 | *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node) | |
2348 | { | |
2349 | return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type); | |
2350 | } | |
2351 | ||
2352 | /* | |
2353 | * mas_mab_to_node() - Set up right and middle nodes | |
2354 | * | |
2355 | * @mas: the maple state that contains the allocations. | |
2356 | * @b_node: the node which contains the data. | |
2357 | * @left: The pointer which will have the left node | |
2358 | * @right: The pointer which may have the right node | |
2359 | * @middle: the pointer which may have the middle node (rare) | |
2360 | * @mid_split: the split location for the middle node | |
2361 | * | |
2362 | * Return: the split of left. | |
2363 | */ | |
2364 | static inline unsigned char mas_mab_to_node(struct ma_state *mas, | |
2365 | struct maple_big_node *b_node, struct maple_enode **left, | |
2366 | struct maple_enode **right, struct maple_enode **middle, | |
4f6a6bed | 2367 | unsigned char *mid_split) |
54a611b6 LH |
2368 | { |
2369 | unsigned char split = 0; | |
2370 | unsigned char slot_count = mt_slots[b_node->type]; | |
2371 | ||
2372 | *left = mas_new_ma_node(mas, b_node); | |
2373 | *right = NULL; | |
2374 | *middle = NULL; | |
2375 | *mid_split = 0; | |
2376 | ||
2377 | if (b_node->b_end < slot_count) { | |
2378 | split = b_node->b_end; | |
2379 | } else { | |
4f6a6bed | 2380 | split = mab_calc_split(mas, b_node, mid_split); |
54a611b6 LH |
2381 | *right = mas_new_ma_node(mas, b_node); |
2382 | } | |
2383 | ||
2384 | if (*mid_split) | |
2385 | *middle = mas_new_ma_node(mas, b_node); | |
2386 | ||
2387 | return split; | |
2388 | ||
2389 | } | |
2390 | ||
2391 | /* | |
2392 | * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end | |
2393 | * pointer. | |
96ae4c90 WY |
2394 | * @b_node: the big node to add the entry |
2395 | * @mas: the maple state to get the pivot (mas->max) | |
2396 | * @entry: the entry to add, if NULL nothing happens. | |
54a611b6 LH |
2397 | */ |
2398 | static inline void mab_set_b_end(struct maple_big_node *b_node, | |
2399 | struct ma_state *mas, | |
2400 | void *entry) | |
2401 | { | |
2402 | if (!entry) | |
2403 | return; | |
2404 | ||
2405 | b_node->slot[b_node->b_end] = entry; | |
2406 | if (mt_is_alloc(mas->tree)) | |
2407 | b_node->gap[b_node->b_end] = mas_max_gap(mas); | |
2408 | b_node->pivot[b_node->b_end++] = mas->max; | |
2409 | } | |
2410 | ||
2411 | /* | |
2412 | * mas_set_split_parent() - combine_then_separate helper function. Sets the parent | |
2413 | * of @mas->node to either @left or @right, depending on @slot and @split | |
2414 | * | |
96ae4c90 WY |
2415 | * @mas: the maple state with the node that needs a parent |
2416 | * @left: possible parent 1 | |
2417 | * @right: possible parent 2 | |
2418 | * @slot: the slot the mas->node was placed | |
2419 | * @split: the split location between @left and @right | |
54a611b6 LH |
2420 | */ |
2421 | static inline void mas_set_split_parent(struct ma_state *mas, | |
2422 | struct maple_enode *left, | |
2423 | struct maple_enode *right, | |
2424 | unsigned char *slot, unsigned char split) | |
2425 | { | |
2426 | if (mas_is_none(mas)) | |
2427 | return; | |
2428 | ||
2429 | if ((*slot) <= split) | |
bf96715e | 2430 | mas_set_parent(mas, mas->node, left, *slot); |
54a611b6 | 2431 | else if (right) |
bf96715e | 2432 | mas_set_parent(mas, mas->node, right, (*slot) - split - 1); |
54a611b6 LH |
2433 | |
2434 | (*slot)++; | |
2435 | } | |
2436 | ||
2437 | /* | |
2438 | * mte_mid_split_check() - Check if the next node passes the mid-split | |
96ae4c90 WY |
2439 | * @l: Pointer to left encoded maple node. |
2440 | * @m: Pointer to middle encoded maple node. | |
2441 | * @r: Pointer to right encoded maple node. | |
54a611b6 | 2442 | * @slot: The offset |
96ae4c90 | 2443 | * @split: The split location. |
54a611b6 LH |
2444 | * @mid_split: The middle split. |
2445 | */ | |
2446 | static inline void mte_mid_split_check(struct maple_enode **l, | |
2447 | struct maple_enode **r, | |
2448 | struct maple_enode *right, | |
2449 | unsigned char slot, | |
2450 | unsigned char *split, | |
2451 | unsigned char mid_split) | |
2452 | { | |
2453 | if (*r == right) | |
2454 | return; | |
2455 | ||
2456 | if (slot < mid_split) | |
2457 | return; | |
2458 | ||
2459 | *l = *r; | |
2460 | *r = right; | |
2461 | *split = mid_split; | |
2462 | } | |
2463 | ||
2464 | /* | |
2465 | * mast_set_split_parents() - Helper function to set three nodes parents. Slot | |
2466 | * is taken from @mast->l. | |
96ae4c90 WY |
2467 | * @mast: the maple subtree state |
2468 | * @left: the left node | |
2469 | * @right: the right node | |
2470 | * @split: the split location. | |
54a611b6 LH |
2471 | */ |
2472 | static inline void mast_set_split_parents(struct maple_subtree_state *mast, | |
2473 | struct maple_enode *left, | |
2474 | struct maple_enode *middle, | |
2475 | struct maple_enode *right, | |
2476 | unsigned char split, | |
2477 | unsigned char mid_split) | |
2478 | { | |
2479 | unsigned char slot; | |
2480 | struct maple_enode *l = left; | |
2481 | struct maple_enode *r = right; | |
2482 | ||
2483 | if (mas_is_none(mast->l)) | |
2484 | return; | |
2485 | ||
2486 | if (middle) | |
2487 | r = middle; | |
2488 | ||
2489 | slot = mast->l->offset; | |
2490 | ||
2491 | mte_mid_split_check(&l, &r, right, slot, &split, mid_split); | |
2492 | mas_set_split_parent(mast->l, l, r, &slot, split); | |
2493 | ||
2494 | mte_mid_split_check(&l, &r, right, slot, &split, mid_split); | |
2495 | mas_set_split_parent(mast->m, l, r, &slot, split); | |
2496 | ||
2497 | mte_mid_split_check(&l, &r, right, slot, &split, mid_split); | |
2498 | mas_set_split_parent(mast->r, l, r, &slot, split); | |
2499 | } | |
2500 | ||
2501 | /* | |
d5f6057c | 2502 | * mas_topiary_node() - Dispose of a single node |
530f745c | 2503 | * @mas: The maple state for pushing nodes |
530f745c | 2504 | * @in_rcu: If the tree is in rcu mode |
54a611b6 | 2505 | * |
530f745c | 2506 | * The node will either be RCU freed or pushed back on the maple state. |
54a611b6 | 2507 | */ |
530f745c | 2508 | static inline void mas_topiary_node(struct ma_state *mas, |
067311d3 | 2509 | struct ma_state *tmp_mas, bool in_rcu) |
54a611b6 | 2510 | { |
530f745c | 2511 | struct maple_node *tmp; |
067311d3 | 2512 | struct maple_enode *enode; |
1238f6a2 | 2513 | |
067311d3 | 2514 | if (mas_is_none(tmp_mas)) |
530f745c | 2515 | return; |
1238f6a2 | 2516 | |
067311d3 | 2517 | enode = tmp_mas->node; |
530f745c LH |
2518 | tmp = mte_to_node(enode); |
2519 | mte_set_node_dead(enode); | |
2520 | if (in_rcu) | |
2521 | ma_free_rcu(tmp); | |
2522 | else | |
2523 | mas_push_node(mas, tmp); | |
2524 | } | |
54a611b6 | 2525 | |
530f745c LH |
2526 | /* |
2527 | * mas_topiary_replace() - Replace the data with new data, then repair the | |
2528 | * parent links within the new tree. Iterate over the dead sub-tree and collect | |
2529 | * the dead subtrees and topiary the nodes that are no longer of use. | |
2530 | * | |
2531 | * The new tree will have up to three children with the correct parent. Keep | |
2532 | * track of the new entries as they need to be followed to find the next level | |
2533 | * of new entries. | |
2534 | * | |
2535 | * The old tree will have up to three children with the old parent. Keep track | |
2536 | * of the old entries as they may have more nodes below replaced. Nodes within | |
2537 | * [index, last] are dead subtrees, others need to be freed and followed. | |
2538 | * | |
2539 | * @mas: The maple state pointing at the new data | |
2540 | * @old_enode: The maple encoded node being replaced | |
f9d3a963 | 2541 | * @new_height: The new height of the tree as a result of the operation |
530f745c LH |
2542 | * |
2543 | */ | |
2544 | static inline void mas_topiary_replace(struct ma_state *mas, | |
f9d3a963 | 2545 | struct maple_enode *old_enode, unsigned char new_height) |
530f745c LH |
2546 | { |
2547 | struct ma_state tmp[3], tmp_next[3]; | |
2548 | MA_TOPIARY(subtrees, mas->tree); | |
2549 | bool in_rcu; | |
2550 | int i, n; | |
2551 | ||
2552 | /* Place data in tree & then mark node as old */ | |
f9d3a963 | 2553 | mas_put_in_tree(mas, old_enode, new_height); |
54a611b6 | 2554 | |
530f745c LH |
2555 | /* Update the parent pointers in the tree */ |
2556 | tmp[0] = *mas; | |
2557 | tmp[0].offset = 0; | |
067311d3 LH |
2558 | tmp[1].status = ma_none; |
2559 | tmp[2].status = ma_none; | |
530f745c LH |
2560 | while (!mte_is_leaf(tmp[0].node)) { |
2561 | n = 0; | |
2562 | for (i = 0; i < 3; i++) { | |
2563 | if (mas_is_none(&tmp[i])) | |
2564 | continue; | |
54a611b6 | 2565 | |
530f745c LH |
2566 | while (n < 3) { |
2567 | if (!mas_find_child(&tmp[i], &tmp_next[n])) | |
2568 | break; | |
2569 | n++; | |
2570 | } | |
54a611b6 | 2571 | |
530f745c LH |
2572 | mas_adopt_children(&tmp[i], tmp[i].node); |
2573 | } | |
54a611b6 | 2574 | |
530f745c LH |
2575 | if (MAS_WARN_ON(mas, n == 0)) |
2576 | break; | |
54a611b6 | 2577 | |
530f745c | 2578 | while (n < 3) |
067311d3 | 2579 | tmp_next[n++].status = ma_none; |
530f745c LH |
2580 | |
2581 | for (i = 0; i < 3; i++) | |
2582 | tmp[i] = tmp_next[i]; | |
2583 | } | |
2584 | ||
2585 | /* Collect the old nodes that need to be discarded */ | |
2586 | if (mte_is_leaf(old_enode)) | |
2587 | return mas_free(mas, old_enode); | |
2588 | ||
2589 | tmp[0] = *mas; | |
2590 | tmp[0].offset = 0; | |
2591 | tmp[0].node = old_enode; | |
067311d3 LH |
2592 | tmp[1].status = ma_none; |
2593 | tmp[2].status = ma_none; | |
530f745c LH |
2594 | in_rcu = mt_in_rcu(mas->tree); |
2595 | do { | |
2596 | n = 0; | |
2597 | for (i = 0; i < 3; i++) { | |
2598 | if (mas_is_none(&tmp[i])) | |
2599 | continue; | |
2600 | ||
2601 | while (n < 3) { | |
2602 | if (!mas_find_child(&tmp[i], &tmp_next[n])) | |
2603 | break; | |
2604 | ||
2605 | if ((tmp_next[n].min >= tmp_next->index) && | |
2606 | (tmp_next[n].max <= tmp_next->last)) { | |
2607 | mat_add(&subtrees, tmp_next[n].node); | |
067311d3 | 2608 | tmp_next[n].status = ma_none; |
530f745c LH |
2609 | } else { |
2610 | n++; | |
2611 | } | |
2612 | } | |
2613 | } | |
2614 | ||
2615 | if (MAS_WARN_ON(mas, n == 0)) | |
2616 | break; | |
2617 | ||
2618 | while (n < 3) | |
067311d3 | 2619 | tmp_next[n++].status = ma_none; |
530f745c LH |
2620 | |
2621 | for (i = 0; i < 3; i++) { | |
067311d3 | 2622 | mas_topiary_node(mas, &tmp[i], in_rcu); |
530f745c LH |
2623 | tmp[i] = tmp_next[i]; |
2624 | } | |
2625 | } while (!mte_is_leaf(tmp[0].node)); | |
2626 | ||
2627 | for (i = 0; i < 3; i++) | |
067311d3 | 2628 | mas_topiary_node(mas, &tmp[i], in_rcu); |
530f745c LH |
2629 | |
2630 | mas_mat_destroy(mas, &subtrees); | |
54a611b6 LH |
2631 | } |
2632 | ||
2633 | /* | |
530f745c | 2634 | * mas_wmb_replace() - Write memory barrier and replace |
54a611b6 | 2635 | * @mas: The maple state |
96ae4c90 | 2636 | * @old_enode: The old maple encoded node that is being replaced. |
f9d3a963 | 2637 | * @new_height: The new height of the tree as a result of the operation |
530f745c LH |
2638 | * |
2639 | * Updates gap as necessary. | |
54a611b6 | 2640 | */ |
530f745c | 2641 | static inline void mas_wmb_replace(struct ma_state *mas, |
f9d3a963 | 2642 | struct maple_enode *old_enode, unsigned char new_height) |
54a611b6 | 2643 | { |
530f745c | 2644 | /* Insert the new data in the tree */ |
f9d3a963 | 2645 | mas_topiary_replace(mas, old_enode, new_height); |
530f745c LH |
2646 | |
2647 | if (mte_is_leaf(mas->node)) | |
2648 | return; | |
2649 | ||
2650 | mas_update_gap(mas); | |
54a611b6 LH |
2651 | } |
2652 | ||
2653 | /* | |
2654 | * mast_cp_to_nodes() - Copy data out to nodes. | |
2655 | * @mast: The maple subtree state | |
2656 | * @left: The left encoded maple node | |
2657 | * @middle: The middle encoded maple node | |
2658 | * @right: The right encoded maple node | |
2659 | * @split: The location to split between left and (middle ? middle : right) | |
2660 | * @mid_split: The location to split between middle and right. | |
2661 | */ | |
2662 | static inline void mast_cp_to_nodes(struct maple_subtree_state *mast, | |
2663 | struct maple_enode *left, struct maple_enode *middle, | |
2664 | struct maple_enode *right, unsigned char split, unsigned char mid_split) | |
2665 | { | |
2666 | bool new_lmax = true; | |
2667 | ||
067311d3 LH |
2668 | mas_node_or_none(mast->l, left); |
2669 | mas_node_or_none(mast->m, middle); | |
2670 | mas_node_or_none(mast->r, right); | |
54a611b6 LH |
2671 | |
2672 | mast->l->min = mast->orig_l->min; | |
2673 | if (split == mast->bn->b_end) { | |
2674 | mast->l->max = mast->orig_r->max; | |
2675 | new_lmax = false; | |
2676 | } | |
2677 | ||
2678 | mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax); | |
2679 | ||
2680 | if (middle) { | |
2681 | mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true); | |
2682 | mast->m->min = mast->bn->pivot[split] + 1; | |
2683 | split = mid_split; | |
2684 | } | |
2685 | ||
2686 | mast->r->max = mast->orig_r->max; | |
2687 | if (right) { | |
2688 | mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false); | |
2689 | mast->r->min = mast->bn->pivot[split] + 1; | |
2690 | } | |
2691 | } | |
2692 | ||
2693 | /* | |
2694 | * mast_combine_cp_left - Copy in the original left side of the tree into the | |
2695 | * combined data set in the maple subtree state big node. | |
2696 | * @mast: The maple subtree state | |
2697 | */ | |
2698 | static inline void mast_combine_cp_left(struct maple_subtree_state *mast) | |
2699 | { | |
2700 | unsigned char l_slot = mast->orig_l->offset; | |
2701 | ||
2702 | if (!l_slot) | |
2703 | return; | |
2704 | ||
2705 | mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0); | |
2706 | } | |
2707 | ||
2708 | /* | |
2709 | * mast_combine_cp_right: Copy in the original right side of the tree into the | |
2710 | * combined data set in the maple subtree state big node. | |
2711 | * @mast: The maple subtree state | |
2712 | */ | |
2713 | static inline void mast_combine_cp_right(struct maple_subtree_state *mast) | |
2714 | { | |
2715 | if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max) | |
2716 | return; | |
2717 | ||
2718 | mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1, | |
2719 | mt_slot_count(mast->orig_r->node), mast->bn, | |
2720 | mast->bn->b_end); | |
2721 | mast->orig_r->last = mast->orig_r->max; | |
2722 | } | |
2723 | ||
2724 | /* | |
2725 | * mast_sufficient: Check if the maple subtree state has enough data in the big | |
2726 | * node to create at least one sufficient node | |
2727 | * @mast: the maple subtree state | |
2728 | */ | |
2729 | static inline bool mast_sufficient(struct maple_subtree_state *mast) | |
2730 | { | |
2731 | if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node)) | |
2732 | return true; | |
2733 | ||
2734 | return false; | |
2735 | } | |
2736 | ||
2737 | /* | |
2738 | * mast_overflow: Check if there is too much data in the subtree state for a | |
2739 | * single node. | |
2740 | * @mast: The maple subtree state | |
2741 | */ | |
2742 | static inline bool mast_overflow(struct maple_subtree_state *mast) | |
2743 | { | |
271152a9 | 2744 | if (mast->bn->b_end > mt_slot_count(mast->orig_l->node)) |
54a611b6 LH |
2745 | return true; |
2746 | ||
2747 | return false; | |
2748 | } | |
2749 | ||
2750 | static inline void *mtree_range_walk(struct ma_state *mas) | |
2751 | { | |
2752 | unsigned long *pivots; | |
2753 | unsigned char offset; | |
2754 | struct maple_node *node; | |
2755 | struct maple_enode *next, *last; | |
2756 | enum maple_type type; | |
2757 | void __rcu **slots; | |
2758 | unsigned char end; | |
2759 | unsigned long max, min; | |
2760 | unsigned long prev_max, prev_min; | |
2761 | ||
1b9c9183 LB |
2762 | next = mas->node; |
2763 | min = mas->min; | |
54a611b6 LH |
2764 | max = mas->max; |
2765 | do { | |
54a611b6 LH |
2766 | last = next; |
2767 | node = mte_to_node(next); | |
2768 | type = mte_node_type(next); | |
2769 | pivots = ma_pivots(node, type); | |
2770 | end = ma_data_end(node, type, pivots, max); | |
a3c63c8c LH |
2771 | prev_min = min; |
2772 | prev_max = max; | |
2773 | if (pivots[0] >= mas->index) { | |
2774 | offset = 0; | |
2775 | max = pivots[0]; | |
54a611b6 LH |
2776 | goto next; |
2777 | } | |
2778 | ||
a3c63c8c LH |
2779 | offset = 1; |
2780 | while (offset < end) { | |
2781 | if (pivots[offset] >= mas->index) { | |
2782 | max = pivots[offset]; | |
2783 | break; | |
2784 | } | |
54a611b6 | 2785 | offset++; |
a3c63c8c | 2786 | } |
54a611b6 | 2787 | |
54a611b6 | 2788 | min = pivots[offset - 1] + 1; |
54a611b6 LH |
2789 | next: |
2790 | slots = ma_slots(node, type); | |
2791 | next = mt_slot(mas->tree, slots, offset); | |
2792 | if (unlikely(ma_dead_node(node))) | |
2793 | goto dead_node; | |
2794 | } while (!ma_is_leaf(type)); | |
2795 | ||
31c532a8 | 2796 | mas->end = end; |
54a611b6 LH |
2797 | mas->offset = offset; |
2798 | mas->index = min; | |
2799 | mas->last = max; | |
2800 | mas->min = prev_min; | |
2801 | mas->max = prev_max; | |
2802 | mas->node = last; | |
831978e3 | 2803 | return (void *)next; |
54a611b6 LH |
2804 | |
2805 | dead_node: | |
2806 | mas_reset(mas); | |
2807 | return NULL; | |
2808 | } | |
2809 | ||
2810 | /* | |
2811 | * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers. | |
2812 | * @mas: The starting maple state | |
2813 | * @mast: The maple_subtree_state, keeps track of 4 maple states. | |
2814 | * @count: The estimated count of iterations needed. | |
2815 | * | |
2816 | * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root | |
2817 | * is hit. First @b_node is split into two entries which are inserted into the | |
2818 | * next iteration of the loop. @b_node is returned populated with the final | |
2819 | * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the | |
2820 | * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last | |
2821 | * to account of what has been copied into the new sub-tree. The update of | |
2822 | * orig_l_mas->last is used in mas_consume to find the slots that will need to | |
2823 | * be either freed or destroyed. orig_l_mas->depth keeps track of the height of | |
2824 | * the new sub-tree in case the sub-tree becomes the full tree. | |
54a611b6 | 2825 | */ |
ed4dfd9a | 2826 | static void mas_spanning_rebalance(struct ma_state *mas, |
54a611b6 LH |
2827 | struct maple_subtree_state *mast, unsigned char count) |
2828 | { | |
2829 | unsigned char split, mid_split; | |
2830 | unsigned char slot = 0; | |
f9d3a963 | 2831 | unsigned char new_height = 0; /* used if node is a new root */ |
54a611b6 | 2832 | struct maple_enode *left = NULL, *middle = NULL, *right = NULL; |
530f745c | 2833 | struct maple_enode *old_enode; |
54a611b6 LH |
2834 | |
2835 | MA_STATE(l_mas, mas->tree, mas->index, mas->index); | |
2836 | MA_STATE(r_mas, mas->tree, mas->index, mas->last); | |
2837 | MA_STATE(m_mas, mas->tree, mas->index, mas->index); | |
54a611b6 LH |
2838 | |
2839 | /* | |
2840 | * The tree needs to be rebalanced and leaves need to be kept at the same level. | |
2841 | * Rebalancing is done by use of the ``struct maple_topiary``. | |
2842 | */ | |
2843 | mast->l = &l_mas; | |
2844 | mast->m = &m_mas; | |
2845 | mast->r = &r_mas; | |
067311d3 | 2846 | l_mas.status = r_mas.status = m_mas.status = ma_none; |
0abb964a LH |
2847 | |
2848 | /* Check if this is not root and has sufficient data. */ | |
2849 | if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) && | |
54a611b6 LH |
2850 | unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type])) |
2851 | mast_spanning_rebalance(mast); | |
2852 | ||
54a611b6 LH |
2853 | /* |
2854 | * Each level of the tree is examined and balanced, pushing data to the left or | |
2855 | * right, or rebalancing against left or right nodes is employed to avoid | |
2856 | * rippling up the tree to limit the amount of churn. Once a new sub-section of | |
2857 | * the tree is created, there may be a mix of new and old nodes. The old nodes | |
2858 | * will have the incorrect parent pointers and currently be in two trees: the | |
2859 | * original tree and the partially new tree. To remedy the parent pointers in | |
2860 | * the old tree, the new data is swapped into the active tree and a walk down | |
2861 | * the tree is performed and the parent pointers are updated. | |
530f745c | 2862 | * See mas_topiary_replace() for more information. |
54a611b6 LH |
2863 | */ |
2864 | while (count--) { | |
2865 | mast->bn->b_end--; | |
2866 | mast->bn->type = mte_node_type(mast->orig_l->node); | |
2867 | split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle, | |
4f6a6bed | 2868 | &mid_split); |
54a611b6 LH |
2869 | mast_set_split_parents(mast, left, middle, right, split, |
2870 | mid_split); | |
2871 | mast_cp_to_nodes(mast, left, middle, right, split, mid_split); | |
f9d3a963 | 2872 | new_height++; |
54a611b6 LH |
2873 | |
2874 | /* | |
2875 | * Copy data from next level in the tree to mast->bn from next | |
2876 | * iteration | |
2877 | */ | |
2878 | memset(mast->bn, 0, sizeof(struct maple_big_node)); | |
2879 | mast->bn->type = mte_node_type(left); | |
54a611b6 LH |
2880 | |
2881 | /* Root already stored in l->node. */ | |
2882 | if (mas_is_root_limits(mast->l)) | |
2883 | goto new_root; | |
2884 | ||
530f745c | 2885 | mast_ascend(mast); |
54a611b6 LH |
2886 | mast_combine_cp_left(mast); |
2887 | l_mas.offset = mast->bn->b_end; | |
2888 | mab_set_b_end(mast->bn, &l_mas, left); | |
2889 | mab_set_b_end(mast->bn, &m_mas, middle); | |
2890 | mab_set_b_end(mast->bn, &r_mas, right); | |
2891 | ||
2892 | /* Copy anything necessary out of the right node. */ | |
2893 | mast_combine_cp_right(mast); | |
54a611b6 LH |
2894 | mast->orig_l->last = mast->orig_l->max; |
2895 | ||
300a5b4f SK |
2896 | if (mast_sufficient(mast)) { |
2897 | if (mast_overflow(mast)) | |
2898 | continue; | |
2899 | ||
2900 | if (mast->orig_l->node == mast->orig_r->node) { | |
2901 | /* | |
2902 | * The data in b_node should be stored in one | |
2903 | * node and in the tree | |
2904 | */ | |
2905 | slot = mast->l->offset; | |
2906 | break; | |
2907 | } | |
54a611b6 | 2908 | |
54a611b6 | 2909 | continue; |
300a5b4f | 2910 | } |
54a611b6 LH |
2911 | |
2912 | /* May be a new root stored in mast->bn */ | |
2913 | if (mas_is_root_limits(mast->orig_l)) | |
2914 | break; | |
2915 | ||
2916 | mast_spanning_rebalance(mast); | |
2917 | ||
2918 | /* rebalancing from other nodes may require another loop. */ | |
2919 | if (!count) | |
2920 | count++; | |
2921 | } | |
2922 | ||
2923 | l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), | |
2924 | mte_node_type(mast->orig_l->node)); | |
f9d3a963 | 2925 | |
54a611b6 | 2926 | mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true); |
f9d3a963 | 2927 | new_height++; |
bf96715e | 2928 | mas_set_parent(mas, left, l_mas.node, slot); |
54a611b6 | 2929 | if (middle) |
bf96715e | 2930 | mas_set_parent(mas, middle, l_mas.node, ++slot); |
54a611b6 LH |
2931 | |
2932 | if (right) | |
bf96715e | 2933 | mas_set_parent(mas, right, l_mas.node, ++slot); |
54a611b6 LH |
2934 | |
2935 | if (mas_is_root_limits(mast->l)) { | |
2936 | new_root: | |
530f745c LH |
2937 | mas_mn(mast->l)->parent = ma_parent_ptr(mas_tree_parent(mas)); |
2938 | while (!mte_is_root(mast->orig_l->node)) | |
2939 | mast_ascend(mast); | |
54a611b6 LH |
2940 | } else { |
2941 | mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent; | |
2942 | } | |
2943 | ||
530f745c LH |
2944 | old_enode = mast->orig_l->node; |
2945 | mas->depth = l_mas.depth; | |
2946 | mas->node = l_mas.node; | |
2947 | mas->min = l_mas.min; | |
2948 | mas->max = l_mas.max; | |
2949 | mas->offset = l_mas.offset; | |
f9d3a963 | 2950 | mas_wmb_replace(mas, old_enode, new_height); |
54a611b6 | 2951 | mtree_range_walk(mas); |
ed4dfd9a | 2952 | return; |
54a611b6 LH |
2953 | } |
2954 | ||
2955 | /* | |
2956 | * mas_rebalance() - Rebalance a given node. | |
2957 | * @mas: The maple state | |
2958 | * @b_node: The big maple node. | |
2959 | * | |
2960 | * Rebalance two nodes into a single node or two new nodes that are sufficient. | |
2961 | * Continue upwards until tree is sufficient. | |
54a611b6 | 2962 | */ |
ed4dfd9a | 2963 | static inline void mas_rebalance(struct ma_state *mas, |
54a611b6 LH |
2964 | struct maple_big_node *b_node) |
2965 | { | |
2966 | char empty_count = mas_mt_height(mas); | |
2967 | struct maple_subtree_state mast; | |
2968 | unsigned char shift, b_end = ++b_node->b_end; | |
2969 | ||
2970 | MA_STATE(l_mas, mas->tree, mas->index, mas->last); | |
2971 | MA_STATE(r_mas, mas->tree, mas->index, mas->last); | |
2972 | ||
2973 | trace_ma_op(__func__, mas); | |
2974 | ||
2975 | /* | |
2976 | * Rebalancing occurs if a node is insufficient. Data is rebalanced | |
2977 | * against the node to the right if it exists, otherwise the node to the | |
2978 | * left of this node is rebalanced against this node. If rebalancing | |
2979 | * causes just one node to be produced instead of two, then the parent | |
2980 | * is also examined and rebalanced if it is insufficient. Every level | |
2981 | * tries to combine the data in the same way. If one node contains the | |
2982 | * entire range of the tree, then that node is used as a new root node. | |
2983 | */ | |
54a611b6 LH |
2984 | |
2985 | mast.orig_l = &l_mas; | |
2986 | mast.orig_r = &r_mas; | |
2987 | mast.bn = b_node; | |
2988 | mast.bn->type = mte_node_type(mas->node); | |
2989 | ||
2990 | l_mas = r_mas = *mas; | |
2991 | ||
2992 | if (mas_next_sibling(&r_mas)) { | |
2993 | mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end); | |
2994 | r_mas.last = r_mas.index = r_mas.max; | |
2995 | } else { | |
2996 | mas_prev_sibling(&l_mas); | |
2997 | shift = mas_data_end(&l_mas) + 1; | |
2998 | mab_shift_right(b_node, shift); | |
2999 | mas->offset += shift; | |
3000 | mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0); | |
3001 | b_node->b_end = shift + b_end; | |
3002 | l_mas.index = l_mas.last = l_mas.min; | |
3003 | } | |
3004 | ||
3005 | return mas_spanning_rebalance(mas, &mast, empty_count); | |
3006 | } | |
3007 | ||
3008 | /* | |
3009 | * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple | |
3010 | * state. | |
3011 | * @mas: The maple state | |
3012 | * @end: The end of the left-most node. | |
3013 | * | |
3014 | * During a mass-insert event (such as forking), it may be necessary to | |
3015 | * rebalance the left-most node when it is not sufficient. | |
3016 | */ | |
3017 | static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end) | |
3018 | { | |
3019 | enum maple_type mt = mte_node_type(mas->node); | |
3020 | struct maple_node reuse, *newnode, *parent, *new_left, *left, *node; | |
72bcf4aa | 3021 | struct maple_enode *eparent, *old_eparent; |
54a611b6 LH |
3022 | unsigned char offset, tmp, split = mt_slots[mt] / 2; |
3023 | void __rcu **l_slots, **slots; | |
3024 | unsigned long *l_pivs, *pivs, gap; | |
3025 | bool in_rcu = mt_in_rcu(mas->tree); | |
f9d3a963 | 3026 | unsigned char new_height = mas_mt_height(mas); |
54a611b6 LH |
3027 | |
3028 | MA_STATE(l_mas, mas->tree, mas->index, mas->last); | |
3029 | ||
3030 | l_mas = *mas; | |
3031 | mas_prev_sibling(&l_mas); | |
3032 | ||
3033 | /* set up node. */ | |
3034 | if (in_rcu) { | |
54a611b6 LH |
3035 | newnode = mas_pop_node(mas); |
3036 | } else { | |
3037 | newnode = &reuse; | |
3038 | } | |
3039 | ||
3040 | node = mas_mn(mas); | |
3041 | newnode->parent = node->parent; | |
3042 | slots = ma_slots(newnode, mt); | |
3043 | pivs = ma_pivots(newnode, mt); | |
3044 | left = mas_mn(&l_mas); | |
3045 | l_slots = ma_slots(left, mt); | |
3046 | l_pivs = ma_pivots(left, mt); | |
3047 | if (!l_slots[split]) | |
3048 | split++; | |
3049 | tmp = mas_data_end(&l_mas) - split; | |
3050 | ||
3051 | memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp); | |
3052 | memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp); | |
3053 | pivs[tmp] = l_mas.max; | |
3054 | memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end); | |
3055 | memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end); | |
3056 | ||
3057 | l_mas.max = l_pivs[split]; | |
3058 | mas->min = l_mas.max + 1; | |
72bcf4aa | 3059 | old_eparent = mt_mk_node(mte_parent(l_mas.node), |
afc754c6 | 3060 | mas_parent_type(&l_mas, l_mas.node)); |
54a611b6 LH |
3061 | tmp += end; |
3062 | if (!in_rcu) { | |
3063 | unsigned char max_p = mt_pivots[mt]; | |
3064 | unsigned char max_s = mt_slots[mt]; | |
3065 | ||
3066 | if (tmp < max_p) | |
3067 | memset(pivs + tmp, 0, | |
fb20e99a | 3068 | sizeof(unsigned long) * (max_p - tmp)); |
54a611b6 LH |
3069 | |
3070 | if (tmp < mt_slots[mt]) | |
3071 | memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp)); | |
3072 | ||
3073 | memcpy(node, newnode, sizeof(struct maple_node)); | |
3074 | ma_set_meta(node, mt, 0, tmp - 1); | |
72bcf4aa | 3075 | mte_set_pivot(old_eparent, mte_parent_slot(l_mas.node), |
54a611b6 LH |
3076 | l_pivs[split]); |
3077 | ||
3078 | /* Remove data from l_pivs. */ | |
3079 | tmp = split + 1; | |
3080 | memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp)); | |
3081 | memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp)); | |
3082 | ma_set_meta(left, mt, 0, split); | |
72bcf4aa | 3083 | eparent = old_eparent; |
54a611b6 LH |
3084 | |
3085 | goto done; | |
3086 | } | |
3087 | ||
3088 | /* RCU requires replacing both l_mas, mas, and parent. */ | |
3089 | mas->node = mt_mk_node(newnode, mt); | |
3090 | ma_set_meta(newnode, mt, 0, tmp); | |
3091 | ||
3092 | new_left = mas_pop_node(mas); | |
3093 | new_left->parent = left->parent; | |
3094 | mt = mte_node_type(l_mas.node); | |
3095 | slots = ma_slots(new_left, mt); | |
3096 | pivs = ma_pivots(new_left, mt); | |
3097 | memcpy(slots, l_slots, sizeof(void *) * split); | |
3098 | memcpy(pivs, l_pivs, sizeof(unsigned long) * split); | |
3099 | ma_set_meta(new_left, mt, 0, split); | |
3100 | l_mas.node = mt_mk_node(new_left, mt); | |
3101 | ||
3102 | /* replace parent. */ | |
3103 | offset = mte_parent_slot(mas->node); | |
afc754c6 | 3104 | mt = mas_parent_type(&l_mas, l_mas.node); |
54a611b6 LH |
3105 | parent = mas_pop_node(mas); |
3106 | slots = ma_slots(parent, mt); | |
3107 | pivs = ma_pivots(parent, mt); | |
72bcf4aa | 3108 | memcpy(parent, mte_to_node(old_eparent), sizeof(struct maple_node)); |
54a611b6 LH |
3109 | rcu_assign_pointer(slots[offset], mas->node); |
3110 | rcu_assign_pointer(slots[offset - 1], l_mas.node); | |
3111 | pivs[offset - 1] = l_mas.max; | |
3112 | eparent = mt_mk_node(parent, mt); | |
3113 | done: | |
3114 | gap = mas_leaf_max_gap(mas); | |
3115 | mte_set_gap(eparent, mte_parent_slot(mas->node), gap); | |
3116 | gap = mas_leaf_max_gap(&l_mas); | |
3117 | mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap); | |
3118 | mas_ascend(mas); | |
3119 | ||
72bcf4aa | 3120 | if (in_rcu) { |
f9d3a963 | 3121 | mas_replace_node(mas, old_eparent, new_height); |
72bcf4aa LH |
3122 | mas_adopt_children(mas, mas->node); |
3123 | } | |
54a611b6 LH |
3124 | |
3125 | mas_update_gap(mas); | |
3126 | } | |
3127 | ||
3128 | /* | |
3129 | * mas_split_final_node() - Split the final node in a subtree operation. | |
3130 | * @mast: the maple subtree state | |
3131 | * @mas: The maple state | |
54a611b6 | 3132 | */ |
d9d9bd97 | 3133 | static inline void mas_split_final_node(struct maple_subtree_state *mast, |
f9d3a963 | 3134 | struct ma_state *mas) |
54a611b6 LH |
3135 | { |
3136 | struct maple_enode *ancestor; | |
3137 | ||
3138 | if (mte_is_root(mas->node)) { | |
3139 | if (mt_is_alloc(mas->tree)) | |
3140 | mast->bn->type = maple_arange_64; | |
3141 | else | |
3142 | mast->bn->type = maple_range_64; | |
54a611b6 LH |
3143 | } |
3144 | /* | |
3145 | * Only a single node is used here, could be root. | |
3146 | * The Big_node data should just fit in a single node. | |
3147 | */ | |
3148 | ancestor = mas_new_ma_node(mas, mast->bn); | |
bf96715e LH |
3149 | mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset); |
3150 | mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset); | |
54a611b6 LH |
3151 | mte_to_node(ancestor)->parent = mas_mn(mas)->parent; |
3152 | ||
3153 | mast->l->node = ancestor; | |
3154 | mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true); | |
3155 | mas->offset = mast->bn->b_end - 1; | |
54a611b6 LH |
3156 | } |
3157 | ||
3158 | /* | |
3159 | * mast_fill_bnode() - Copy data into the big node in the subtree state | |
3160 | * @mast: The maple subtree state | |
3161 | * @mas: the maple state | |
3162 | * @skip: The number of entries to skip for new nodes insertion. | |
3163 | */ | |
3164 | static inline void mast_fill_bnode(struct maple_subtree_state *mast, | |
3165 | struct ma_state *mas, | |
3166 | unsigned char skip) | |
3167 | { | |
3168 | bool cp = true; | |
54a611b6 LH |
3169 | unsigned char split; |
3170 | ||
5059aa63 | 3171 | memset(mast->bn, 0, sizeof(struct maple_big_node)); |
54a611b6 LH |
3172 | |
3173 | if (mte_is_root(mas->node)) { | |
3174 | cp = false; | |
3175 | } else { | |
3176 | mas_ascend(mas); | |
54a611b6 LH |
3177 | mas->offset = mte_parent_slot(mas->node); |
3178 | } | |
3179 | ||
3180 | if (cp && mast->l->offset) | |
3181 | mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0); | |
3182 | ||
3183 | split = mast->bn->b_end; | |
3184 | mab_set_b_end(mast->bn, mast->l, mast->l->node); | |
3185 | mast->r->offset = mast->bn->b_end; | |
3186 | mab_set_b_end(mast->bn, mast->r, mast->r->node); | |
3187 | if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max) | |
3188 | cp = false; | |
3189 | ||
3190 | if (cp) | |
3191 | mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1, | |
3192 | mast->bn, mast->bn->b_end); | |
3193 | ||
3194 | mast->bn->b_end--; | |
3195 | mast->bn->type = mte_node_type(mas->node); | |
3196 | } | |
3197 | ||
3198 | /* | |
3199 | * mast_split_data() - Split the data in the subtree state big node into regular | |
3200 | * nodes. | |
3201 | * @mast: The maple subtree state | |
3202 | * @mas: The maple state | |
3203 | * @split: The location to split the big node | |
3204 | */ | |
3205 | static inline void mast_split_data(struct maple_subtree_state *mast, | |
3206 | struct ma_state *mas, unsigned char split) | |
3207 | { | |
3208 | unsigned char p_slot; | |
3209 | ||
3210 | mab_mas_cp(mast->bn, 0, split, mast->l, true); | |
3211 | mte_set_pivot(mast->r->node, 0, mast->r->max); | |
3212 | mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false); | |
3213 | mast->l->offset = mte_parent_slot(mas->node); | |
3214 | mast->l->max = mast->bn->pivot[split]; | |
3215 | mast->r->min = mast->l->max + 1; | |
3216 | if (mte_is_leaf(mas->node)) | |
3217 | return; | |
3218 | ||
3219 | p_slot = mast->orig_l->offset; | |
3220 | mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node, | |
3221 | &p_slot, split); | |
3222 | mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node, | |
3223 | &p_slot, split); | |
3224 | } | |
3225 | ||
3226 | /* | |
3227 | * mas_push_data() - Instead of splitting a node, it is beneficial to push the | |
3228 | * data to the right or left node if there is room. | |
3229 | * @mas: The maple state | |
54a611b6 LH |
3230 | * @mast: The maple subtree state |
3231 | * @left: Push left or not. | |
3232 | * | |
3233 | * Keeping the height of the tree low means faster lookups. | |
3234 | * | |
3235 | * Return: True if pushed, false otherwise. | |
3236 | */ | |
f9d3a963 SK |
3237 | static inline bool mas_push_data(struct ma_state *mas, |
3238 | struct maple_subtree_state *mast, bool left) | |
54a611b6 LH |
3239 | { |
3240 | unsigned char slot_total = mast->bn->b_end; | |
3241 | unsigned char end, space, split; | |
3242 | ||
3243 | MA_STATE(tmp_mas, mas->tree, mas->index, mas->last); | |
3244 | tmp_mas = *mas; | |
3245 | tmp_mas.depth = mast->l->depth; | |
3246 | ||
3247 | if (left && !mas_prev_sibling(&tmp_mas)) | |
3248 | return false; | |
3249 | else if (!left && !mas_next_sibling(&tmp_mas)) | |
3250 | return false; | |
3251 | ||
3252 | end = mas_data_end(&tmp_mas); | |
3253 | slot_total += end; | |
3254 | space = 2 * mt_slot_count(mas->node) - 2; | |
3255 | /* -2 instead of -1 to ensure there isn't a triple split */ | |
3256 | if (ma_is_leaf(mast->bn->type)) | |
3257 | space--; | |
3258 | ||
3259 | if (mas->max == ULONG_MAX) | |
3260 | space--; | |
3261 | ||
3262 | if (slot_total >= space) | |
3263 | return false; | |
3264 | ||
3265 | /* Get the data; Fill mast->bn */ | |
3266 | mast->bn->b_end++; | |
3267 | if (left) { | |
3268 | mab_shift_right(mast->bn, end + 1); | |
3269 | mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0); | |
3270 | mast->bn->b_end = slot_total + 1; | |
3271 | } else { | |
3272 | mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end); | |
3273 | } | |
3274 | ||
3275 | /* Configure mast for splitting of mast->bn */ | |
3276 | split = mt_slots[mast->bn->type] - 2; | |
3277 | if (left) { | |
3278 | /* Switch mas to prev node */ | |
54a611b6 LH |
3279 | *mas = tmp_mas; |
3280 | /* Start using mast->l for the left side. */ | |
3281 | tmp_mas.node = mast->l->node; | |
3282 | *mast->l = tmp_mas; | |
3283 | } else { | |
54a611b6 LH |
3284 | tmp_mas.node = mast->r->node; |
3285 | *mast->r = tmp_mas; | |
3286 | split = slot_total - split; | |
3287 | } | |
3288 | split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]); | |
3289 | /* Update parent slot for split calculation. */ | |
3290 | if (left) | |
3291 | mast->orig_l->offset += end + 1; | |
3292 | ||
3293 | mast_split_data(mast, mas, split); | |
3294 | mast_fill_bnode(mast, mas, 2); | |
f9d3a963 | 3295 | mas_split_final_node(mast, mas); |
54a611b6 LH |
3296 | return true; |
3297 | } | |
3298 | ||
3299 | /* | |
3300 | * mas_split() - Split data that is too big for one node into two. | |
3301 | * @mas: The maple state | |
3302 | * @b_node: The maple big node | |
54a611b6 | 3303 | */ |
ed4dfd9a | 3304 | static void mas_split(struct ma_state *mas, struct maple_big_node *b_node) |
54a611b6 | 3305 | { |
54a611b6 LH |
3306 | struct maple_subtree_state mast; |
3307 | int height = 0; | |
f9d3a963 | 3308 | unsigned int orig_height = mas_mt_height(mas); |
54a611b6 | 3309 | unsigned char mid_split, split = 0; |
530f745c | 3310 | struct maple_enode *old; |
54a611b6 LH |
3311 | |
3312 | /* | |
3313 | * Splitting is handled differently from any other B-tree; the Maple | |
3314 | * Tree splits upwards. Splitting up means that the split operation | |
3315 | * occurs when the walk of the tree hits the leaves and not on the way | |
3316 | * down. The reason for splitting up is that it is impossible to know | |
3317 | * how much space will be needed until the leaf is (or leaves are) | |
3318 | * reached. Since overwriting data is allowed and a range could | |
3319 | * overwrite more than one range or result in changing one entry into 3 | |
3320 | * entries, it is impossible to know if a split is required until the | |
3321 | * data is examined. | |
3322 | * | |
3323 | * Splitting is a balancing act between keeping allocations to a minimum | |
3324 | * and avoiding a 'jitter' event where a tree is expanded to make room | |
3325 | * for an entry followed by a contraction when the entry is removed. To | |
3326 | * accomplish the balance, there are empty slots remaining in both left | |
3327 | * and right nodes after a split. | |
3328 | */ | |
3329 | MA_STATE(l_mas, mas->tree, mas->index, mas->last); | |
3330 | MA_STATE(r_mas, mas->tree, mas->index, mas->last); | |
3331 | MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last); | |
3332 | MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last); | |
54a611b6 LH |
3333 | |
3334 | trace_ma_op(__func__, mas); | |
54a611b6 LH |
3335 | |
3336 | mast.l = &l_mas; | |
3337 | mast.r = &r_mas; | |
3338 | mast.orig_l = &prev_l_mas; | |
3339 | mast.orig_r = &prev_r_mas; | |
54a611b6 LH |
3340 | mast.bn = b_node; |
3341 | ||
f9d3a963 | 3342 | while (height++ <= orig_height) { |
54a611b6 | 3343 | if (mt_slots[b_node->type] > b_node->b_end) { |
f9d3a963 | 3344 | mas_split_final_node(&mast, mas); |
54a611b6 LH |
3345 | break; |
3346 | } | |
3347 | ||
3348 | l_mas = r_mas = *mas; | |
3349 | l_mas.node = mas_new_ma_node(mas, b_node); | |
3350 | r_mas.node = mas_new_ma_node(mas, b_node); | |
3351 | /* | |
3352 | * Another way that 'jitter' is avoided is to terminate a split up early if the | |
3353 | * left or right node has space to spare. This is referred to as "pushing left" | |
3354 | * or "pushing right" and is similar to the B* tree, except the nodes left or | |
3355 | * right can rarely be reused due to RCU, but the ripple upwards is halted which | |
3356 | * is a significant savings. | |
3357 | */ | |
3358 | /* Try to push left. */ | |
f9d3a963 SK |
3359 | if (mas_push_data(mas, &mast, true)) { |
3360 | height++; | |
54a611b6 | 3361 | break; |
f9d3a963 | 3362 | } |
54a611b6 | 3363 | /* Try to push right. */ |
f9d3a963 SK |
3364 | if (mas_push_data(mas, &mast, false)) { |
3365 | height++; | |
54a611b6 | 3366 | break; |
f9d3a963 | 3367 | } |
54a611b6 | 3368 | |
4f6a6bed | 3369 | split = mab_calc_split(mas, b_node, &mid_split); |
54a611b6 LH |
3370 | mast_split_data(&mast, mas, split); |
3371 | /* | |
3372 | * Usually correct, mab_mas_cp in the above call overwrites | |
3373 | * r->max. | |
3374 | */ | |
3375 | mast.r->max = mas->max; | |
3376 | mast_fill_bnode(&mast, mas, 1); | |
3377 | prev_l_mas = *mast.l; | |
3378 | prev_r_mas = *mast.r; | |
3379 | } | |
3380 | ||
3381 | /* Set the original node as dead */ | |
530f745c | 3382 | old = mas->node; |
54a611b6 | 3383 | mas->node = l_mas.node; |
f9d3a963 | 3384 | mas_wmb_replace(mas, old, height); |
54a611b6 | 3385 | mtree_range_walk(mas); |
ed4dfd9a | 3386 | return; |
54a611b6 LH |
3387 | } |
3388 | ||
3389 | /* | |
3390 | * mas_commit_b_node() - Commit the big node into the tree. | |
3391 | * @wr_mas: The maple write state | |
3392 | * @b_node: The maple big node | |
54a611b6 | 3393 | */ |
ed4dfd9a | 3394 | static noinline_for_kasan void mas_commit_b_node(struct ma_wr_state *wr_mas, |
62c7b2b9 | 3395 | struct maple_big_node *b_node) |
54a611b6 | 3396 | { |
62c7b2b9 | 3397 | enum store_type type = wr_mas->mas->store_type; |
54a611b6 | 3398 | |
62c7b2b9 | 3399 | WARN_ON_ONCE(type != wr_rebalance && type != wr_split_store); |
54a611b6 | 3400 | |
62c7b2b9 SK |
3401 | if (type == wr_rebalance) |
3402 | return mas_rebalance(wr_mas->mas, b_node); | |
54a611b6 | 3403 | |
62c7b2b9 | 3404 | return mas_split(wr_mas->mas, b_node); |
54a611b6 LH |
3405 | } |
3406 | ||
3407 | /* | |
3408 | * mas_root_expand() - Expand a root to a node | |
3409 | * @mas: The maple state | |
3410 | * @entry: The entry to store into the tree | |
3411 | */ | |
cefbcf20 | 3412 | static inline void mas_root_expand(struct ma_state *mas, void *entry) |
54a611b6 LH |
3413 | { |
3414 | void *contents = mas_root_locked(mas); | |
3415 | enum maple_type type = maple_leaf_64; | |
3416 | struct maple_node *node; | |
3417 | void __rcu **slots; | |
3418 | unsigned long *pivots; | |
3419 | int slot = 0; | |
3420 | ||
54a611b6 LH |
3421 | node = mas_pop_node(mas); |
3422 | pivots = ma_pivots(node, type); | |
3423 | slots = ma_slots(node, type); | |
4ffc2ee2 | 3424 | node->parent = ma_parent_ptr(mas_tree_parent(mas)); |
54a611b6 | 3425 | mas->node = mt_mk_node(node, type); |
067311d3 | 3426 | mas->status = ma_active; |
54a611b6 LH |
3427 | |
3428 | if (mas->index) { | |
3429 | if (contents) { | |
3430 | rcu_assign_pointer(slots[slot], contents); | |
3431 | if (likely(mas->index > 1)) | |
3432 | slot++; | |
3433 | } | |
3434 | pivots[slot++] = mas->index - 1; | |
3435 | } | |
3436 | ||
3437 | rcu_assign_pointer(slots[slot], entry); | |
3438 | mas->offset = slot; | |
3439 | pivots[slot] = mas->last; | |
3440 | if (mas->last != ULONG_MAX) | |
3c769fd8 PZ |
3441 | pivots[++slot] = ULONG_MAX; |
3442 | ||
f9d3a963 | 3443 | mt_set_height(mas->tree, 1); |
c45ea315 | 3444 | ma_set_meta(node, maple_leaf_64, 0, slot); |
54a611b6 LH |
3445 | /* swap the new root into the tree */ |
3446 | rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); | |
cefbcf20 | 3447 | return; |
54a611b6 LH |
3448 | } |
3449 | ||
0ea120b2 WY |
3450 | /* |
3451 | * mas_store_root() - Storing value into root. | |
3452 | * @mas: The maple state | |
3453 | * @entry: The entry to store. | |
3454 | * | |
3455 | * There is no root node now and we are storing a value into the root - this | |
3456 | * function either assigns the pointer or expands into a node. | |
3457 | */ | |
54a611b6 LH |
3458 | static inline void mas_store_root(struct ma_state *mas, void *entry) |
3459 | { | |
0ea120b2 WY |
3460 | if (!entry) { |
3461 | if (!mas->index) | |
3462 | rcu_assign_pointer(mas->tree->ma_root, NULL); | |
3463 | } else if (likely((mas->last != 0) || (mas->index != 0))) | |
54a611b6 LH |
3464 | mas_root_expand(mas, entry); |
3465 | else if (((unsigned long) (entry) & 3) == 2) | |
3466 | mas_root_expand(mas, entry); | |
3467 | else { | |
3468 | rcu_assign_pointer(mas->tree->ma_root, entry); | |
067311d3 | 3469 | mas->status = ma_start; |
54a611b6 LH |
3470 | } |
3471 | } | |
3472 | ||
3473 | /* | |
3474 | * mas_is_span_wr() - Check if the write needs to be treated as a write that | |
3475 | * spans the node. | |
96ae4c90 | 3476 | * @wr_mas: The maple write state |
54a611b6 LH |
3477 | * |
3478 | * Spanning writes are writes that start in one node and end in another OR if | |
3479 | * the write of a %NULL will cause the node to end with a %NULL. | |
3480 | * | |
3481 | * Return: True if this is a spanning write, false otherwise. | |
3482 | */ | |
3483 | static bool mas_is_span_wr(struct ma_wr_state *wr_mas) | |
3484 | { | |
bc147f0f | 3485 | unsigned long max = wr_mas->r_max; |
54a611b6 | 3486 | unsigned long last = wr_mas->mas->last; |
54a611b6 LH |
3487 | enum maple_type type = wr_mas->type; |
3488 | void *entry = wr_mas->entry; | |
3489 | ||
bc147f0f PZ |
3490 | /* Contained in this pivot, fast path */ |
3491 | if (last < max) | |
54a611b6 LH |
3492 | return false; |
3493 | ||
bc147f0f PZ |
3494 | if (ma_is_leaf(type)) { |
3495 | max = wr_mas->mas->max; | |
54a611b6 LH |
3496 | if (last < max) |
3497 | return false; | |
bc147f0f | 3498 | } |
54a611b6 | 3499 | |
bc147f0f | 3500 | if (last == max) { |
54a611b6 | 3501 | /* |
bc147f0f PZ |
3502 | * The last entry of leaf node cannot be NULL unless it is the |
3503 | * rightmost node (writing ULONG_MAX), otherwise it spans slots. | |
54a611b6 | 3504 | */ |
bc147f0f | 3505 | if (entry || last == ULONG_MAX) |
54a611b6 LH |
3506 | return false; |
3507 | } | |
3508 | ||
bc147f0f | 3509 | trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry); |
54a611b6 LH |
3510 | return true; |
3511 | } | |
3512 | ||
3513 | static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas) | |
3514 | { | |
54a611b6 LH |
3515 | wr_mas->type = mte_node_type(wr_mas->mas->node); |
3516 | mas_wr_node_walk(wr_mas); | |
3517 | wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type); | |
3518 | } | |
3519 | ||
3520 | static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas) | |
3521 | { | |
3522 | wr_mas->mas->max = wr_mas->r_max; | |
3523 | wr_mas->mas->min = wr_mas->r_min; | |
3524 | wr_mas->mas->node = wr_mas->content; | |
3525 | wr_mas->mas->offset = 0; | |
9bbba563 | 3526 | wr_mas->mas->depth++; |
54a611b6 LH |
3527 | } |
3528 | /* | |
3529 | * mas_wr_walk() - Walk the tree for a write. | |
3530 | * @wr_mas: The maple write state | |
3531 | * | |
3532 | * Uses mas_slot_locked() and does not need to worry about dead nodes. | |
3533 | * | |
3534 | * Return: True if it's contained in a node, false on spanning write. | |
3535 | */ | |
3536 | static bool mas_wr_walk(struct ma_wr_state *wr_mas) | |
3537 | { | |
3538 | struct ma_state *mas = wr_mas->mas; | |
3539 | ||
3540 | while (true) { | |
3541 | mas_wr_walk_descend(wr_mas); | |
3542 | if (unlikely(mas_is_span_wr(wr_mas))) | |
3543 | return false; | |
3544 | ||
3545 | wr_mas->content = mas_slot_locked(mas, wr_mas->slots, | |
3546 | mas->offset); | |
3547 | if (ma_is_leaf(wr_mas->type)) | |
3548 | return true; | |
3549 | ||
ad88fc17 SK |
3550 | if (mas->end < mt_slots[wr_mas->type] - 1) |
3551 | wr_mas->vacant_height = mas->depth + 1; | |
3552 | ||
271152a9 SK |
3553 | if (ma_is_root(mas_mn(mas))) { |
3554 | /* root needs more than 2 entries to be sufficient + 1 */ | |
3555 | if (mas->end > 2) | |
3556 | wr_mas->sufficient_height = 1; | |
3557 | } else if (mas->end > mt_min_slots[wr_mas->type] + 1) | |
3558 | wr_mas->sufficient_height = mas->depth + 1; | |
3559 | ||
54a611b6 LH |
3560 | mas_wr_walk_traverse(wr_mas); |
3561 | } | |
3562 | ||
3563 | return true; | |
3564 | } | |
3565 | ||
bea07fd6 | 3566 | static void mas_wr_walk_index(struct ma_wr_state *wr_mas) |
54a611b6 LH |
3567 | { |
3568 | struct ma_state *mas = wr_mas->mas; | |
3569 | ||
3570 | while (true) { | |
3571 | mas_wr_walk_descend(wr_mas); | |
3572 | wr_mas->content = mas_slot_locked(mas, wr_mas->slots, | |
3573 | mas->offset); | |
3574 | if (ma_is_leaf(wr_mas->type)) | |
bea07fd6 | 3575 | return; |
54a611b6 | 3576 | mas_wr_walk_traverse(wr_mas); |
54a611b6 | 3577 | } |
54a611b6 LH |
3578 | } |
3579 | /* | |
3580 | * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs. | |
3581 | * @l_wr_mas: The left maple write state | |
3582 | * @r_wr_mas: The right maple write state | |
3583 | */ | |
3584 | static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas, | |
3585 | struct ma_wr_state *r_wr_mas) | |
3586 | { | |
3587 | struct ma_state *r_mas = r_wr_mas->mas; | |
3588 | struct ma_state *l_mas = l_wr_mas->mas; | |
3589 | unsigned char l_slot; | |
3590 | ||
3591 | l_slot = l_mas->offset; | |
3592 | if (!l_wr_mas->content) | |
3593 | l_mas->index = l_wr_mas->r_min; | |
3594 | ||
3595 | if ((l_mas->index == l_wr_mas->r_min) && | |
3596 | (l_slot && | |
3597 | !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) { | |
3598 | if (l_slot > 1) | |
3599 | l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1; | |
3600 | else | |
3601 | l_mas->index = l_mas->min; | |
3602 | ||
3603 | l_mas->offset = l_slot - 1; | |
3604 | } | |
3605 | ||
3606 | if (!r_wr_mas->content) { | |
3607 | if (r_mas->last < r_wr_mas->r_max) | |
3608 | r_mas->last = r_wr_mas->r_max; | |
3609 | r_mas->offset++; | |
3610 | } else if ((r_mas->last == r_wr_mas->r_max) && | |
3611 | (r_mas->last < r_mas->max) && | |
3612 | !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) { | |
3613 | r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots, | |
3614 | r_wr_mas->type, r_mas->offset + 1); | |
3615 | r_mas->offset++; | |
3616 | } | |
3617 | } | |
3618 | ||
3619 | static inline void *mas_state_walk(struct ma_state *mas) | |
3620 | { | |
3621 | void *entry; | |
3622 | ||
3623 | entry = mas_start(mas); | |
3624 | if (mas_is_none(mas)) | |
3625 | return NULL; | |
3626 | ||
3627 | if (mas_is_ptr(mas)) | |
3628 | return entry; | |
3629 | ||
3630 | return mtree_range_walk(mas); | |
3631 | } | |
3632 | ||
3633 | /* | |
3634 | * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up | |
3635 | * to date. | |
3636 | * | |
3637 | * @mas: The maple state. | |
3638 | * | |
3639 | * Note: Leaves mas in undesirable state. | |
3640 | * Return: The entry for @mas->index or %NULL on dead node. | |
3641 | */ | |
3642 | static inline void *mtree_lookup_walk(struct ma_state *mas) | |
3643 | { | |
3644 | unsigned long *pivots; | |
3645 | unsigned char offset; | |
3646 | struct maple_node *node; | |
3647 | struct maple_enode *next; | |
3648 | enum maple_type type; | |
3649 | void __rcu **slots; | |
3650 | unsigned char end; | |
54a611b6 LH |
3651 | |
3652 | next = mas->node; | |
54a611b6 | 3653 | do { |
54a611b6 LH |
3654 | node = mte_to_node(next); |
3655 | type = mte_node_type(next); | |
3656 | pivots = ma_pivots(node, type); | |
24662dec LH |
3657 | end = mt_pivots[type]; |
3658 | offset = 0; | |
54a611b6 | 3659 | do { |
24662dec | 3660 | if (pivots[offset] >= mas->index) |
ec07967d | 3661 | break; |
ec07967d | 3662 | } while (++offset < end); |
54a611b6 | 3663 | |
54a611b6 LH |
3664 | slots = ma_slots(node, type); |
3665 | next = mt_slot(mas->tree, slots, offset); | |
3666 | if (unlikely(ma_dead_node(node))) | |
3667 | goto dead_node; | |
3668 | } while (!ma_is_leaf(type)); | |
3669 | ||
831978e3 | 3670 | return (void *)next; |
54a611b6 LH |
3671 | |
3672 | dead_node: | |
3673 | mas_reset(mas); | |
3674 | return NULL; | |
3675 | } | |
3676 | ||
530f745c | 3677 | static void mte_destroy_walk(struct maple_enode *, struct maple_tree *); |
54a611b6 LH |
3678 | /* |
3679 | * mas_new_root() - Create a new root node that only contains the entry passed | |
3680 | * in. | |
3681 | * @mas: The maple state | |
3682 | * @entry: The entry to store. | |
3683 | * | |
3684 | * Only valid when the index == 0 and the last == ULONG_MAX | |
54a611b6 | 3685 | */ |
ed4dfd9a | 3686 | static inline void mas_new_root(struct ma_state *mas, void *entry) |
54a611b6 LH |
3687 | { |
3688 | struct maple_enode *root = mas_root_locked(mas); | |
3689 | enum maple_type type = maple_leaf_64; | |
3690 | struct maple_node *node; | |
3691 | void __rcu **slots; | |
3692 | unsigned long *pivots; | |
3693 | ||
8c836f17 WY |
3694 | WARN_ON_ONCE(mas->index || mas->last != ULONG_MAX); |
3695 | ||
3696 | if (!entry) { | |
f9d3a963 | 3697 | mt_set_height(mas->tree, 0); |
54a611b6 | 3698 | rcu_assign_pointer(mas->tree->ma_root, entry); |
067311d3 | 3699 | mas->status = ma_start; |
54a611b6 LH |
3700 | goto done; |
3701 | } | |
3702 | ||
54a611b6 LH |
3703 | node = mas_pop_node(mas); |
3704 | pivots = ma_pivots(node, type); | |
3705 | slots = ma_slots(node, type); | |
4ffc2ee2 | 3706 | node->parent = ma_parent_ptr(mas_tree_parent(mas)); |
54a611b6 | 3707 | mas->node = mt_mk_node(node, type); |
067311d3 | 3708 | mas->status = ma_active; |
54a611b6 LH |
3709 | rcu_assign_pointer(slots[0], entry); |
3710 | pivots[0] = mas->last; | |
f9d3a963 | 3711 | mt_set_height(mas->tree, 1); |
54a611b6 LH |
3712 | rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); |
3713 | ||
3714 | done: | |
3715 | if (xa_is_node(root)) | |
3716 | mte_destroy_walk(root, mas->tree); | |
3717 | ||
ed4dfd9a | 3718 | return; |
54a611b6 LH |
3719 | } |
3720 | /* | |
3721 | * mas_wr_spanning_store() - Create a subtree with the store operation completed | |
3722 | * and new nodes where necessary, then place the sub-tree in the actual tree. | |
3723 | * Note that mas is expected to point to the node which caused the store to | |
3724 | * span. | |
3725 | * @wr_mas: The maple write state | |
54a611b6 | 3726 | */ |
ed4dfd9a | 3727 | static noinline void mas_wr_spanning_store(struct ma_wr_state *wr_mas) |
54a611b6 LH |
3728 | { |
3729 | struct maple_subtree_state mast; | |
3730 | struct maple_big_node b_node; | |
3731 | struct ma_state *mas; | |
3732 | unsigned char height; | |
3733 | ||
3734 | /* Left and Right side of spanning store */ | |
3735 | MA_STATE(l_mas, NULL, 0, 0); | |
3736 | MA_STATE(r_mas, NULL, 0, 0); | |
54a611b6 LH |
3737 | MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry); |
3738 | MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry); | |
3739 | ||
3740 | /* | |
3741 | * A store operation that spans multiple nodes is called a spanning | |
3742 | * store and is handled early in the store call stack by the function | |
3743 | * mas_is_span_wr(). When a spanning store is identified, the maple | |
3744 | * state is duplicated. The first maple state walks the left tree path | |
3745 | * to ``index``, the duplicate walks the right tree path to ``last``. | |
3746 | * The data in the two nodes are combined into a single node, two nodes, | |
3747 | * or possibly three nodes (see the 3-way split above). A ``NULL`` | |
3748 | * written to the last entry of a node is considered a spanning store as | |
3749 | * a rebalance is required for the operation to complete and an overflow | |
3750 | * of data may happen. | |
3751 | */ | |
3752 | mas = wr_mas->mas; | |
3753 | trace_ma_op(__func__, mas); | |
3754 | ||
3755 | if (unlikely(!mas->index && mas->last == ULONG_MAX)) | |
3756 | return mas_new_root(mas, wr_mas->entry); | |
3757 | /* | |
3758 | * Node rebalancing may occur due to this store, so there may be three new | |
3759 | * entries per level plus a new root. | |
3760 | */ | |
3761 | height = mas_mt_height(mas); | |
54a611b6 LH |
3762 | |
3763 | /* | |
3764 | * Set up right side. Need to get to the next offset after the spanning | |
3765 | * store to ensure it's not NULL and to combine both the next node and | |
3766 | * the node with the start together. | |
3767 | */ | |
3768 | r_mas = *mas; | |
3769 | /* Avoid overflow, walk to next slot in the tree. */ | |
3770 | if (r_mas.last + 1) | |
3771 | r_mas.last++; | |
3772 | ||
3773 | r_mas.index = r_mas.last; | |
3774 | mas_wr_walk_index(&r_wr_mas); | |
3775 | r_mas.last = r_mas.index = mas->last; | |
3776 | ||
3777 | /* Set up left side. */ | |
3778 | l_mas = *mas; | |
3779 | mas_wr_walk_index(&l_wr_mas); | |
3780 | ||
3781 | if (!wr_mas->entry) { | |
3782 | mas_extend_spanning_null(&l_wr_mas, &r_wr_mas); | |
3783 | mas->offset = l_mas.offset; | |
3784 | mas->index = l_mas.index; | |
3785 | mas->last = l_mas.last = r_mas.last; | |
3786 | } | |
3787 | ||
3788 | /* expanding NULLs may make this cover the entire range */ | |
3789 | if (!l_mas.index && r_mas.last == ULONG_MAX) { | |
3790 | mas_set_range(mas, 0, ULONG_MAX); | |
3791 | return mas_new_root(mas, wr_mas->entry); | |
3792 | } | |
3793 | ||
3794 | memset(&b_node, 0, sizeof(struct maple_big_node)); | |
3795 | /* Copy l_mas and store the value in b_node. */ | |
0de56e38 | 3796 | mas_store_b_node(&l_wr_mas, &b_node, l_mas.end); |
bea07fd6 LS |
3797 | /* Copy r_mas into b_node if there is anything to copy. */ |
3798 | if (r_mas.max > r_mas.last) | |
0de56e38 | 3799 | mas_mab_cp(&r_mas, r_mas.offset, r_mas.end, |
54a611b6 LH |
3800 | &b_node, b_node.b_end + 1); |
3801 | else | |
3802 | b_node.b_end++; | |
3803 | ||
3804 | /* Stop spanning searches by searching for just index. */ | |
3805 | l_mas.index = l_mas.last = mas->index; | |
3806 | ||
3807 | mast.bn = &b_node; | |
3808 | mast.orig_l = &l_mas; | |
3809 | mast.orig_r = &r_mas; | |
3810 | /* Combine l_mas and r_mas and split them up evenly again. */ | |
3811 | return mas_spanning_rebalance(mas, &mast, height + 1); | |
3812 | } | |
3813 | ||
3814 | /* | |
3815 | * mas_wr_node_store() - Attempt to store the value in a node | |
3816 | * @wr_mas: The maple write state | |
3817 | * | |
3818 | * Attempts to reuse the node, but may allocate. | |
54a611b6 | 3819 | */ |
ed4dfd9a | 3820 | static inline void mas_wr_node_store(struct ma_wr_state *wr_mas, |
7a03ae39 | 3821 | unsigned char new_end) |
54a611b6 LH |
3822 | { |
3823 | struct ma_state *mas = wr_mas->mas; | |
3824 | void __rcu **dst_slots; | |
3825 | unsigned long *dst_pivots; | |
7a03ae39 | 3826 | unsigned char dst_offset, offset_end = wr_mas->offset_end; |
54a611b6 | 3827 | struct maple_node reuse, *newnode; |
7a03ae39 | 3828 | unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type]; |
54a611b6 | 3829 | bool in_rcu = mt_in_rcu(mas->tree); |
f9d3a963 | 3830 | unsigned char height = mas_mt_height(mas); |
54a611b6 | 3831 | |
7a03ae39 PZ |
3832 | if (mas->last == wr_mas->end_piv) |
3833 | offset_end++; /* don't copy this offset */ | |
3834 | else if (unlikely(wr_mas->r_max == ULONG_MAX)) | |
0de56e38 | 3835 | mas_bulk_rebalance(mas, mas->end, wr_mas->type); |
7a03ae39 | 3836 | |
54a611b6 LH |
3837 | /* set up node. */ |
3838 | if (in_rcu) { | |
54a611b6 LH |
3839 | newnode = mas_pop_node(mas); |
3840 | } else { | |
3841 | memset(&reuse, 0, sizeof(struct maple_node)); | |
3842 | newnode = &reuse; | |
3843 | } | |
3844 | ||
3845 | newnode->parent = mas_mn(mas)->parent; | |
3846 | dst_pivots = ma_pivots(newnode, wr_mas->type); | |
3847 | dst_slots = ma_slots(newnode, wr_mas->type); | |
3848 | /* Copy from start to insert point */ | |
7a03ae39 PZ |
3849 | memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset); |
3850 | memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset); | |
54a611b6 LH |
3851 | |
3852 | /* Handle insert of new range starting after old range */ | |
3853 | if (wr_mas->r_min < mas->index) { | |
7a03ae39 PZ |
3854 | rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content); |
3855 | dst_pivots[mas->offset++] = mas->index - 1; | |
54a611b6 LH |
3856 | } |
3857 | ||
3858 | /* Store the new entry and range end. */ | |
7a03ae39 PZ |
3859 | if (mas->offset < node_pivots) |
3860 | dst_pivots[mas->offset] = mas->last; | |
3861 | rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry); | |
54a611b6 LH |
3862 | |
3863 | /* | |
3864 | * this range wrote to the end of the node or it overwrote the rest of | |
3865 | * the data | |
3866 | */ | |
0de56e38 | 3867 | if (offset_end > mas->end) |
54a611b6 | 3868 | goto done; |
54a611b6 | 3869 | |
7a03ae39 | 3870 | dst_offset = mas->offset + 1; |
54a611b6 | 3871 | /* Copy to the end of node if necessary. */ |
0de56e38 | 3872 | copy_size = mas->end - offset_end + 1; |
7a03ae39 | 3873 | memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end, |
54a611b6 | 3874 | sizeof(void *) * copy_size); |
7a03ae39 PZ |
3875 | memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end, |
3876 | sizeof(unsigned long) * (copy_size - 1)); | |
54a611b6 | 3877 | |
7a03ae39 | 3878 | if (new_end < node_pivots) |
54a611b6 LH |
3879 | dst_pivots[new_end] = mas->max; |
3880 | ||
3881 | done: | |
330018fe | 3882 | mas_leaf_set_meta(newnode, maple_leaf_64, new_end); |
54a611b6 | 3883 | if (in_rcu) { |
72bcf4aa LH |
3884 | struct maple_enode *old_enode = mas->node; |
3885 | ||
54a611b6 | 3886 | mas->node = mt_mk_node(newnode, wr_mas->type); |
f9d3a963 | 3887 | mas_replace_node(mas, old_enode, height); |
54a611b6 LH |
3888 | } else { |
3889 | memcpy(wr_mas->node, newnode, sizeof(struct maple_node)); | |
3890 | } | |
3891 | trace_ma_write(__func__, mas, 0, wr_mas->entry); | |
3892 | mas_update_gap(mas); | |
31c532a8 | 3893 | mas->end = new_end; |
ed4dfd9a | 3894 | return; |
54a611b6 LH |
3895 | } |
3896 | ||
3897 | /* | |
3898 | * mas_wr_slot_store: Attempt to store a value in a slot. | |
3899 | * @wr_mas: the maple write state | |
54a611b6 | 3900 | */ |
ed4dfd9a | 3901 | static inline void mas_wr_slot_store(struct ma_wr_state *wr_mas) |
54a611b6 LH |
3902 | { |
3903 | struct ma_state *mas = wr_mas->mas; | |
54a611b6 | 3904 | unsigned char offset = mas->offset; |
64891ba3 | 3905 | void __rcu **slots = wr_mas->slots; |
e6d1ffd6 | 3906 | bool gap = false; |
54a611b6 | 3907 | |
64891ba3 PZ |
3908 | gap |= !mt_slot_locked(mas->tree, slots, offset); |
3909 | gap |= !mt_slot_locked(mas->tree, slots, offset + 1); | |
54a611b6 | 3910 | |
64891ba3 PZ |
3911 | if (wr_mas->offset_end - offset == 1) { |
3912 | if (mas->index == wr_mas->r_min) { | |
3913 | /* Overwriting the range and a part of the next one */ | |
3914 | rcu_assign_pointer(slots[offset], wr_mas->entry); | |
3915 | wr_mas->pivots[offset] = mas->last; | |
3916 | } else { | |
3917 | /* Overwriting a part of the range and the next one */ | |
3918 | rcu_assign_pointer(slots[offset + 1], wr_mas->entry); | |
3919 | wr_mas->pivots[offset] = mas->index - 1; | |
3920 | mas->offset++; /* Keep mas accurate. */ | |
3921 | } | |
38dc8f49 WY |
3922 | } else { |
3923 | WARN_ON_ONCE(mt_in_rcu(mas->tree)); | |
64891ba3 PZ |
3924 | /* |
3925 | * Expand the range, only partially overwriting the previous and | |
3926 | * next ranges | |
3927 | */ | |
3928 | gap |= !mt_slot_locked(mas->tree, slots, offset + 2); | |
3929 | rcu_assign_pointer(slots[offset + 1], wr_mas->entry); | |
e6d1ffd6 | 3930 | wr_mas->pivots[offset] = mas->index - 1; |
64891ba3 | 3931 | wr_mas->pivots[offset + 1] = mas->last; |
e6d1ffd6 | 3932 | mas->offset++; /* Keep mas accurate. */ |
54a611b6 LH |
3933 | } |
3934 | ||
54a611b6 | 3935 | trace_ma_write(__func__, mas, 0, wr_mas->entry); |
e6d1ffd6 PZ |
3936 | /* |
3937 | * Only update gap when the new entry is empty or there is an empty | |
3938 | * entry in the original two ranges. | |
3939 | */ | |
3940 | if (!wr_mas->entry || gap) | |
3941 | mas_update_gap(mas); | |
3942 | ||
ed4dfd9a | 3943 | return; |
54a611b6 LH |
3944 | } |
3945 | ||
54a611b6 LH |
3946 | static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas) |
3947 | { | |
3948 | struct ma_state *mas = wr_mas->mas; | |
3949 | ||
8c995a63 PZ |
3950 | if (!wr_mas->slots[wr_mas->offset_end]) { |
3951 | /* If this one is null, the next and prev are not */ | |
54a611b6 | 3952 | mas->last = wr_mas->end_piv; |
8c995a63 PZ |
3953 | } else { |
3954 | /* Check next slot(s) if we are overwriting the end */ | |
3955 | if ((mas->last == wr_mas->end_piv) && | |
0de56e38 | 3956 | (mas->end != wr_mas->offset_end) && |
8c995a63 PZ |
3957 | !wr_mas->slots[wr_mas->offset_end + 1]) { |
3958 | wr_mas->offset_end++; | |
0de56e38 | 3959 | if (wr_mas->offset_end == mas->end) |
8c995a63 PZ |
3960 | mas->last = mas->max; |
3961 | else | |
3962 | mas->last = wr_mas->pivots[wr_mas->offset_end]; | |
3963 | wr_mas->end_piv = mas->last; | |
3964 | } | |
54a611b6 LH |
3965 | } |
3966 | ||
3967 | if (!wr_mas->content) { | |
3968 | /* If this one is null, the next and prev are not */ | |
3969 | mas->index = wr_mas->r_min; | |
3970 | } else { | |
3971 | /* Check prev slot if we are overwriting the start */ | |
3972 | if (mas->index == wr_mas->r_min && mas->offset && | |
3973 | !wr_mas->slots[mas->offset - 1]) { | |
3974 | mas->offset--; | |
3975 | wr_mas->r_min = mas->index = | |
3976 | mas_safe_min(mas, wr_mas->pivots, mas->offset); | |
3977 | wr_mas->r_max = wr_mas->pivots[mas->offset]; | |
3978 | } | |
3979 | } | |
3980 | } | |
3981 | ||
a7496ad5 LH |
3982 | static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas) |
3983 | { | |
0de56e38 | 3984 | while ((wr_mas->offset_end < wr_mas->mas->end) && |
a7496ad5 LH |
3985 | (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end])) |
3986 | wr_mas->offset_end++; | |
3987 | ||
0de56e38 | 3988 | if (wr_mas->offset_end < wr_mas->mas->end) |
a7496ad5 LH |
3989 | wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end]; |
3990 | else | |
3991 | wr_mas->end_piv = wr_mas->mas->max; | |
a7496ad5 LH |
3992 | } |
3993 | ||
c6fc9e4a PZ |
3994 | static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas) |
3995 | { | |
3996 | struct ma_state *mas = wr_mas->mas; | |
0de56e38 | 3997 | unsigned char new_end = mas->end + 2; |
c6fc9e4a PZ |
3998 | |
3999 | new_end -= wr_mas->offset_end - mas->offset; | |
4000 | if (wr_mas->r_min == mas->index) | |
4001 | new_end--; | |
4002 | ||
4003 | if (wr_mas->end_piv == mas->last) | |
4004 | new_end--; | |
4005 | ||
4006 | return new_end; | |
4007 | } | |
4008 | ||
2e1da329 PZ |
4009 | /* |
4010 | * mas_wr_append: Attempt to append | |
4011 | * @wr_mas: the maple write state | |
432af5c9 | 4012 | * @new_end: The end of the node after the modification |
2e1da329 | 4013 | * |
cfeb6ae8 LH |
4014 | * This is currently unsafe in rcu mode since the end of the node may be cached |
4015 | * by readers while the node contents may be updated which could result in | |
4016 | * inaccurate information. | |
2e1da329 | 4017 | */ |
ed4dfd9a | 4018 | static inline void mas_wr_append(struct ma_wr_state *wr_mas, |
432af5c9 | 4019 | unsigned char new_end) |
54a611b6 | 4020 | { |
add60ea5 | 4021 | struct ma_state *mas = wr_mas->mas; |
432af5c9 | 4022 | void __rcu **slots; |
add60ea5 | 4023 | unsigned char end = mas->end; |
432af5c9 LH |
4024 | |
4025 | if (new_end < mt_pivots[wr_mas->type]) { | |
2e1da329 | 4026 | wr_mas->pivots[new_end] = wr_mas->pivots[end]; |
432af5c9 | 4027 | ma_set_meta(wr_mas->node, wr_mas->type, 0, new_end); |
2e1da329 | 4028 | } |
54a611b6 | 4029 | |
432af5c9 LH |
4030 | slots = wr_mas->slots; |
4031 | if (new_end == end + 1) { | |
23e9dde0 PZ |
4032 | if (mas->last == wr_mas->r_max) { |
4033 | /* Append to end of range */ | |
432af5c9 | 4034 | rcu_assign_pointer(slots[new_end], wr_mas->entry); |
23e9dde0 PZ |
4035 | wr_mas->pivots[end] = mas->index - 1; |
4036 | mas->offset = new_end; | |
4037 | } else { | |
4038 | /* Append to start of range */ | |
432af5c9 | 4039 | rcu_assign_pointer(slots[new_end], wr_mas->content); |
23e9dde0 | 4040 | wr_mas->pivots[end] = mas->last; |
432af5c9 | 4041 | rcu_assign_pointer(slots[end], wr_mas->entry); |
23e9dde0 | 4042 | } |
2e1da329 | 4043 | } else { |
23e9dde0 | 4044 | /* Append to the range without touching any boundaries. */ |
432af5c9 | 4045 | rcu_assign_pointer(slots[new_end], wr_mas->content); |
23e9dde0 | 4046 | wr_mas->pivots[end + 1] = mas->last; |
432af5c9 | 4047 | rcu_assign_pointer(slots[end + 1], wr_mas->entry); |
23e9dde0 PZ |
4048 | wr_mas->pivots[end] = mas->index - 1; |
4049 | mas->offset = end + 1; | |
54a611b6 LH |
4050 | } |
4051 | ||
2e1da329 PZ |
4052 | if (!wr_mas->content || !wr_mas->entry) |
4053 | mas_update_gap(mas); | |
4054 | ||
31c532a8 | 4055 | mas->end = new_end; |
432af5c9 | 4056 | trace_ma_write(__func__, mas, new_end, wr_mas->entry); |
ed4dfd9a | 4057 | return; |
54a611b6 LH |
4058 | } |
4059 | ||
4060 | /* | |
4061 | * mas_wr_bnode() - Slow path for a modification. | |
4062 | * @wr_mas: The write maple state | |
4063 | * | |
4064 | * This is where split, rebalance end up. | |
4065 | */ | |
4066 | static void mas_wr_bnode(struct ma_wr_state *wr_mas) | |
4067 | { | |
4068 | struct maple_big_node b_node; | |
4069 | ||
4070 | trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry); | |
4071 | memset(&b_node, 0, sizeof(struct maple_big_node)); | |
4072 | mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end); | |
62c7b2b9 | 4073 | mas_commit_b_node(wr_mas, &b_node); |
54a611b6 LH |
4074 | } |
4075 | ||
54a611b6 LH |
4076 | /* |
4077 | * mas_wr_store_entry() - Internal call to store a value | |
96ae4c90 | 4078 | * @wr_mas: The maple write state |
54a611b6 | 4079 | */ |
739820a6 | 4080 | static inline void mas_wr_store_entry(struct ma_wr_state *wr_mas) |
54a611b6 | 4081 | { |
54a611b6 | 4082 | struct ma_state *mas = wr_mas->mas; |
580fcbd6 | 4083 | unsigned char new_end = mas_wr_new_end(wr_mas); |
54a611b6 | 4084 | |
580fcbd6 | 4085 | switch (mas->store_type) { |
580fcbd6 | 4086 | case wr_exact_fit: |
54a611b6 LH |
4087 | rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry); |
4088 | if (!!wr_mas->entry ^ !!wr_mas->content) | |
4089 | mas_update_gap(mas); | |
580fcbd6 SK |
4090 | break; |
4091 | case wr_append: | |
4092 | mas_wr_append(wr_mas, new_end); | |
4093 | break; | |
4094 | case wr_slot_store: | |
4095 | mas_wr_slot_store(wr_mas); | |
4096 | break; | |
4097 | case wr_node_store: | |
4098 | mas_wr_node_store(wr_mas, new_end); | |
4099 | break; | |
4100 | case wr_spanning_store: | |
54a611b6 | 4101 | mas_wr_spanning_store(wr_mas); |
580fcbd6 SK |
4102 | break; |
4103 | case wr_split_store: | |
4104 | case wr_rebalance: | |
4105 | mas_wr_bnode(wr_mas); | |
4106 | break; | |
2a6ed1b4 SK |
4107 | case wr_new_root: |
4108 | mas_new_root(mas, wr_mas->entry); | |
4109 | break; | |
4110 | case wr_store_root: | |
4111 | mas_store_root(mas, wr_mas->entry); | |
4112 | break; | |
4113 | case wr_invalid: | |
4114 | MT_BUG_ON(mas->tree, 1); | |
54a611b6 LH |
4115 | } |
4116 | ||
580fcbd6 | 4117 | return; |
54a611b6 LH |
4118 | } |
4119 | ||
580fcbd6 | 4120 | static inline void mas_wr_prealloc_setup(struct ma_wr_state *wr_mas) |
3cc6f42a | 4121 | { |
580fcbd6 SK |
4122 | struct ma_state *mas = wr_mas->mas; |
4123 | ||
4124 | if (!mas_is_active(mas)) { | |
4125 | if (mas_is_start(mas)) | |
4126 | goto set_content; | |
3cc6f42a | 4127 | |
580fcbd6 | 4128 | if (unlikely(mas_is_paused(mas))) |
3cc6f42a SK |
4129 | goto reset; |
4130 | ||
580fcbd6 | 4131 | if (unlikely(mas_is_none(mas))) |
3cc6f42a SK |
4132 | goto reset; |
4133 | ||
580fcbd6 | 4134 | if (unlikely(mas_is_overflow(mas))) |
3cc6f42a SK |
4135 | goto reset; |
4136 | ||
580fcbd6 | 4137 | if (unlikely(mas_is_underflow(mas))) |
3cc6f42a | 4138 | goto reset; |
54a611b6 LH |
4139 | } |
4140 | ||
c6fc9e4a | 4141 | /* |
3cc6f42a SK |
4142 | * A less strict version of mas_is_span_wr() where we allow spanning |
4143 | * writes within this node. This is to stop partial walks in | |
4144 | * mas_prealloc() from being reset. | |
c6fc9e4a | 4145 | */ |
580fcbd6 | 4146 | if (mas->last > mas->max) |
3cc6f42a | 4147 | goto reset; |
54a611b6 | 4148 | |
3cc6f42a | 4149 | if (wr_mas->entry) |
580fcbd6 | 4150 | goto set_content; |
7a03ae39 | 4151 | |
580fcbd6 | 4152 | if (mte_is_leaf(mas->node) && mas->last == mas->max) |
3cc6f42a | 4153 | goto reset; |
54a611b6 | 4154 | |
580fcbd6 | 4155 | goto set_content; |
54a611b6 | 4156 | |
3cc6f42a | 4157 | reset: |
580fcbd6 SK |
4158 | mas_reset(mas); |
4159 | set_content: | |
3cc6f42a | 4160 | wr_mas->content = mas_start(mas); |
54a611b6 LH |
4161 | } |
4162 | ||
5d659bbb SK |
4163 | /** |
4164 | * mas_prealloc_calc() - Calculate number of nodes needed for a | |
4165 | * given store oepration | |
28092a65 | 4166 | * @wr_mas: The maple write state |
5d659bbb | 4167 | * @entry: The entry to store into the tree |
54a611b6 | 4168 | * |
5d659bbb | 4169 | * Return: Number of nodes required for preallocation. |
54a611b6 | 4170 | */ |
28092a65 | 4171 | static inline int mas_prealloc_calc(struct ma_wr_state *wr_mas, void *entry) |
5d659bbb | 4172 | { |
28092a65 | 4173 | struct ma_state *mas = wr_mas->mas; |
ad88fc17 SK |
4174 | unsigned char height = mas_mt_height(mas); |
4175 | int ret = height * 3 + 1; | |
4176 | unsigned char delta = height - wr_mas->vacant_height; | |
5d659bbb SK |
4177 | |
4178 | switch (mas->store_type) { | |
2a6ed1b4 SK |
4179 | case wr_exact_fit: |
4180 | case wr_append: | |
4181 | case wr_slot_store: | |
4182 | ret = 0; | |
5d659bbb SK |
4183 | break; |
4184 | case wr_spanning_store: | |
271152a9 SK |
4185 | if (wr_mas->sufficient_height < wr_mas->vacant_height) |
4186 | ret = (height - wr_mas->sufficient_height) * 3 + 1; | |
4187 | else | |
4188 | ret = delta * 3 + 1; | |
5d659bbb SK |
4189 | break; |
4190 | case wr_split_store: | |
ad88fc17 | 4191 | ret = delta * 2 + 1; |
5d659bbb SK |
4192 | break; |
4193 | case wr_rebalance: | |
271152a9 SK |
4194 | if (wr_mas->sufficient_height < wr_mas->vacant_height) |
4195 | ret = (height - wr_mas->sufficient_height) * 2 + 1; | |
4196 | else | |
4197 | ret = delta * 2 + 1; | |
5d659bbb SK |
4198 | break; |
4199 | case wr_node_store: | |
4200 | ret = mt_in_rcu(mas->tree) ? 1 : 0; | |
4201 | break; | |
2a6ed1b4 SK |
4202 | case wr_new_root: |
4203 | ret = 1; | |
4204 | break; | |
4205 | case wr_store_root: | |
4206 | if (likely((mas->last != 0) || (mas->index != 0))) | |
4207 | ret = 1; | |
4208 | else if (((unsigned long) (entry) & 3) == 2) | |
4209 | ret = 1; | |
4210 | else | |
4211 | ret = 0; | |
4212 | break; | |
4213 | case wr_invalid: | |
4214 | WARN_ON_ONCE(1); | |
5d659bbb SK |
4215 | } |
4216 | ||
4217 | return ret; | |
4218 | } | |
4219 | ||
4220 | /* | |
f0c99037 | 4221 | * mas_wr_store_type() - Determine the store type for a given |
5d659bbb SK |
4222 | * store operation. |
4223 | * @wr_mas: The maple write state | |
f0c99037 SK |
4224 | * |
4225 | * Return: the type of store needed for the operation | |
5d659bbb | 4226 | */ |
f0c99037 | 4227 | static inline enum store_type mas_wr_store_type(struct ma_wr_state *wr_mas) |
54a611b6 LH |
4228 | { |
4229 | struct ma_state *mas = wr_mas->mas; | |
5d659bbb | 4230 | unsigned char new_end; |
54a611b6 | 4231 | |
f0c99037 SK |
4232 | if (unlikely(mas_is_none(mas) || mas_is_ptr(mas))) |
4233 | return wr_store_root; | |
54a611b6 | 4234 | |
f0c99037 SK |
4235 | if (unlikely(!mas_wr_walk(wr_mas))) |
4236 | return wr_spanning_store; | |
54a611b6 LH |
4237 | |
4238 | /* At this point, we are at the leaf node that needs to be altered. */ | |
54a611b6 | 4239 | mas_wr_end_piv(wr_mas); |
5d659bbb SK |
4240 | if (!wr_mas->entry) |
4241 | mas_wr_extend_null(wr_mas); | |
4242 | ||
f0c99037 SK |
4243 | if ((wr_mas->r_min == mas->index) && (wr_mas->r_max == mas->last)) |
4244 | return wr_exact_fit; | |
5d659bbb | 4245 | |
f0c99037 SK |
4246 | if (unlikely(!mas->index && mas->last == ULONG_MAX)) |
4247 | return wr_new_root; | |
5d659bbb | 4248 | |
61e9df70 | 4249 | new_end = mas_wr_new_end(wr_mas); |
5d659bbb SK |
4250 | /* Potential spanning rebalance collapsing a node */ |
4251 | if (new_end < mt_min_slots[wr_mas->type]) { | |
f0c99037 SK |
4252 | if (!mte_is_root(mas->node) && !(mas->mas_flags & MA_STATE_BULK)) |
4253 | return wr_rebalance; | |
4254 | return wr_node_store; | |
5d659bbb SK |
4255 | } |
4256 | ||
f0c99037 SK |
4257 | if (new_end >= mt_slots[wr_mas->type]) |
4258 | return wr_split_store; | |
5d659bbb | 4259 | |
f0c99037 SK |
4260 | if (!mt_in_rcu(mas->tree) && (mas->offset == mas->end)) |
4261 | return wr_append; | |
5d659bbb SK |
4262 | |
4263 | if ((new_end == mas->end) && (!mt_in_rcu(mas->tree) || | |
f0c99037 SK |
4264 | (wr_mas->offset_end - mas->offset == 1))) |
4265 | return wr_slot_store; | |
5d659bbb | 4266 | |
f0c99037 | 4267 | return wr_node_store; |
5d659bbb SK |
4268 | } |
4269 | ||
4270 | /** | |
4271 | * mas_wr_preallocate() - Preallocate enough nodes for a store operation | |
4272 | * @wr_mas: The maple write state | |
4273 | * @entry: The entry that will be stored | |
4274 | * | |
4275 | */ | |
4276 | static inline void mas_wr_preallocate(struct ma_wr_state *wr_mas, void *entry) | |
4277 | { | |
5d659bbb SK |
4278 | int request; |
4279 | ||
4280 | mas_wr_prealloc_setup(wr_mas); | |
28092a65 SK |
4281 | wr_mas->mas->store_type = mas_wr_store_type(wr_mas); |
4282 | request = mas_prealloc_calc(wr_mas, entry); | |
5d659bbb SK |
4283 | if (!request) |
4284 | return; | |
4285 | ||
28092a65 | 4286 | mas_node_count(wr_mas->mas, request); |
54a611b6 LH |
4287 | } |
4288 | ||
4289 | /** | |
4290 | * mas_insert() - Internal call to insert a value | |
4291 | * @mas: The maple state | |
4292 | * @entry: The entry to store | |
4293 | * | |
4294 | * Return: %NULL or the contents that already exists at the requested index | |
4295 | * otherwise. The maple state needs to be checked for error conditions. | |
4296 | */ | |
4297 | static inline void *mas_insert(struct ma_state *mas, void *entry) | |
4298 | { | |
4299 | MA_WR_STATE(wr_mas, mas, entry); | |
4300 | ||
4301 | /* | |
4302 | * Inserting a new range inserts either 0, 1, or 2 pivots within the | |
4303 | * tree. If the insert fits exactly into an existing gap with a value | |
4304 | * of NULL, then the slot only needs to be written with the new value. | |
4305 | * If the range being inserted is adjacent to another range, then only a | |
4306 | * single pivot needs to be inserted (as well as writing the entry). If | |
4307 | * the new range is within a gap but does not touch any other ranges, | |
4308 | * then two pivots need to be inserted: the start - 1, and the end. As | |
4309 | * usual, the entry must be written. Most operations require a new node | |
4310 | * to be allocated and replace an existing node to ensure RCU safety, | |
4311 | * when in RCU mode. The exception to requiring a newly allocated node | |
4312 | * is when inserting at the end of a node (appending). When done | |
4313 | * carefully, appending can reuse the node in place. | |
4314 | */ | |
4315 | wr_mas.content = mas_start(mas); | |
4316 | if (wr_mas.content) | |
4317 | goto exists; | |
4318 | ||
1fd7c4f3 SK |
4319 | mas_wr_preallocate(&wr_mas, entry); |
4320 | if (mas_is_err(mas)) | |
54a611b6 | 4321 | return NULL; |
54a611b6 LH |
4322 | |
4323 | /* spanning writes always overwrite something */ | |
1fd7c4f3 | 4324 | if (mas->store_type == wr_spanning_store) |
54a611b6 LH |
4325 | goto exists; |
4326 | ||
4327 | /* At this point, we are at the leaf node that needs to be altered. */ | |
1fd7c4f3 SK |
4328 | if (mas->store_type != wr_new_root && mas->store_type != wr_store_root) { |
4329 | wr_mas.offset_end = mas->offset; | |
4330 | wr_mas.end_piv = wr_mas.r_max; | |
54a611b6 | 4331 | |
1fd7c4f3 SK |
4332 | if (wr_mas.content || (mas->last > wr_mas.r_max)) |
4333 | goto exists; | |
4334 | } | |
54a611b6 | 4335 | |
1fd7c4f3 | 4336 | mas_wr_store_entry(&wr_mas); |
54a611b6 LH |
4337 | return wr_mas.content; |
4338 | ||
4339 | exists: | |
4340 | mas_set_err(mas, -EEXIST); | |
4341 | return wr_mas.content; | |
4342 | ||
4343 | } | |
4344 | ||
9b6713cc CL |
4345 | /** |
4346 | * mas_alloc_cyclic() - Internal call to find somewhere to store an entry | |
4347 | * @mas: The maple state. | |
4348 | * @startp: Pointer to ID. | |
4349 | * @range_lo: Lower bound of range to search. | |
4350 | * @range_hi: Upper bound of range to search. | |
4351 | * @entry: The entry to store. | |
4352 | * @next: Pointer to next ID to allocate. | |
4353 | * @gfp: The GFP_FLAGS to use for allocations. | |
4354 | * | |
4355 | * Return: 0 if the allocation succeeded without wrapping, 1 if the | |
4356 | * allocation succeeded after wrapping, or -EBUSY if there are no | |
4357 | * free entries. | |
4358 | */ | |
4359 | int mas_alloc_cyclic(struct ma_state *mas, unsigned long *startp, | |
4360 | void *entry, unsigned long range_lo, unsigned long range_hi, | |
4361 | unsigned long *next, gfp_t gfp) | |
4362 | { | |
4363 | unsigned long min = range_lo; | |
4364 | int ret = 0; | |
4365 | ||
4366 | range_lo = max(min, *next); | |
4367 | ret = mas_empty_area(mas, range_lo, range_hi, 1); | |
4368 | if ((mas->tree->ma_flags & MT_FLAGS_ALLOC_WRAPPED) && ret == 0) { | |
4369 | mas->tree->ma_flags &= ~MT_FLAGS_ALLOC_WRAPPED; | |
4370 | ret = 1; | |
4371 | } | |
4372 | if (ret < 0 && range_lo > min) { | |
1fd8bc7c | 4373 | mas_reset(mas); |
9b6713cc CL |
4374 | ret = mas_empty_area(mas, min, range_hi, 1); |
4375 | if (ret == 0) | |
4376 | ret = 1; | |
4377 | } | |
4378 | if (ret < 0) | |
4379 | return ret; | |
4380 | ||
4381 | do { | |
4382 | mas_insert(mas, entry); | |
4383 | } while (mas_nomem(mas, gfp)); | |
4384 | if (mas_is_err(mas)) | |
4385 | return xa_err(mas->node); | |
4386 | ||
4387 | *startp = mas->index; | |
4388 | *next = *startp + 1; | |
4389 | if (*next == 0) | |
4390 | mas->tree->ma_flags |= MT_FLAGS_ALLOC_WRAPPED; | |
4391 | ||
3cd9e92e | 4392 | mas_destroy(mas); |
9b6713cc CL |
4393 | return ret; |
4394 | } | |
4395 | EXPORT_SYMBOL(mas_alloc_cyclic); | |
4396 | ||
271f61a8 | 4397 | static __always_inline void mas_rewalk(struct ma_state *mas, unsigned long index) |
de6e386c LH |
4398 | { |
4399 | retry: | |
4400 | mas_set(mas, index); | |
4401 | mas_state_walk(mas); | |
4402 | if (mas_is_start(mas)) | |
4403 | goto retry; | |
4404 | } | |
4405 | ||
271f61a8 | 4406 | static __always_inline bool mas_rewalk_if_dead(struct ma_state *mas, |
de6e386c LH |
4407 | struct maple_node *node, const unsigned long index) |
4408 | { | |
4409 | if (unlikely(ma_dead_node(node))) { | |
4410 | mas_rewalk(mas, index); | |
4411 | return true; | |
4412 | } | |
4413 | return false; | |
4414 | } | |
4415 | ||
54a611b6 LH |
4416 | /* |
4417 | * mas_prev_node() - Find the prev non-null entry at the same level in the | |
067311d3 LH |
4418 | * tree. The prev value will be mas->node[mas->offset] or the status will be |
4419 | * ma_none. | |
54a611b6 LH |
4420 | * @mas: The maple state |
4421 | * @min: The lower limit to search | |
4422 | * | |
067311d3 LH |
4423 | * The prev node value will be mas->node[mas->offset] or the status will be |
4424 | * ma_none. | |
54a611b6 LH |
4425 | * Return: 1 if the node is dead, 0 otherwise. |
4426 | */ | |
271f61a8 | 4427 | static int mas_prev_node(struct ma_state *mas, unsigned long min) |
54a611b6 LH |
4428 | { |
4429 | enum maple_type mt; | |
4430 | int offset, level; | |
4431 | void __rcu **slots; | |
4432 | struct maple_node *node; | |
54a611b6 | 4433 | unsigned long *pivots; |
dd9a8513 | 4434 | unsigned long max; |
54a611b6 | 4435 | |
dd9a8513 LH |
4436 | node = mas_mn(mas); |
4437 | if (!mas->min) | |
4438 | goto no_entry; | |
4439 | ||
4440 | max = mas->min - 1; | |
4441 | if (max < min) | |
4442 | goto no_entry; | |
54a611b6 LH |
4443 | |
4444 | level = 0; | |
4445 | do { | |
54a611b6 LH |
4446 | if (ma_is_root(node)) |
4447 | goto no_entry; | |
4448 | ||
4449 | /* Walk up. */ | |
4450 | if (unlikely(mas_ascend(mas))) | |
4451 | return 1; | |
4452 | offset = mas->offset; | |
4453 | level++; | |
dd9a8513 | 4454 | node = mas_mn(mas); |
54a611b6 LH |
4455 | } while (!offset); |
4456 | ||
4457 | offset--; | |
4458 | mt = mte_node_type(mas->node); | |
54a611b6 LH |
4459 | while (level > 1) { |
4460 | level--; | |
dd9a8513 LH |
4461 | slots = ma_slots(node, mt); |
4462 | mas->node = mas_slot(mas, slots, offset); | |
54a611b6 LH |
4463 | if (unlikely(ma_dead_node(node))) |
4464 | return 1; | |
4465 | ||
54a611b6 LH |
4466 | mt = mte_node_type(mas->node); |
4467 | node = mas_mn(mas); | |
54a611b6 | 4468 | pivots = ma_pivots(node, mt); |
dd9a8513 | 4469 | offset = ma_data_end(node, mt, pivots, max); |
39d0bd86 LH |
4470 | if (unlikely(ma_dead_node(node))) |
4471 | return 1; | |
54a611b6 LH |
4472 | } |
4473 | ||
dd9a8513 | 4474 | slots = ma_slots(node, mt); |
54a611b6 | 4475 | mas->node = mas_slot(mas, slots, offset); |
dd9a8513 | 4476 | pivots = ma_pivots(node, mt); |
54a611b6 LH |
4477 | if (unlikely(ma_dead_node(node))) |
4478 | return 1; | |
4479 | ||
dd9a8513 LH |
4480 | if (likely(offset)) |
4481 | mas->min = pivots[offset - 1] + 1; | |
4482 | mas->max = max; | |
54a611b6 LH |
4483 | mas->offset = mas_data_end(mas); |
4484 | if (unlikely(mte_dead_node(mas->node))) | |
4485 | return 1; | |
4486 | ||
31c532a8 | 4487 | mas->end = mas->offset; |
54a611b6 LH |
4488 | return 0; |
4489 | ||
54a611b6 LH |
4490 | no_entry: |
4491 | if (unlikely(ma_dead_node(node))) | |
4492 | return 1; | |
4493 | ||
067311d3 | 4494 | mas->status = ma_underflow; |
54a611b6 LH |
4495 | return 0; |
4496 | } | |
4497 | ||
dd9a8513 LH |
4498 | /* |
4499 | * mas_prev_slot() - Get the entry in the previous slot | |
4500 | * | |
4501 | * @mas: The maple state | |
96ae4c90 | 4502 | * @min: The minimum starting range |
a8091f03 | 4503 | * @empty: Can be empty |
dd9a8513 LH |
4504 | * |
4505 | * Return: The entry in the previous slot which is possibly NULL | |
4506 | */ | |
067311d3 | 4507 | static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty) |
dd9a8513 LH |
4508 | { |
4509 | void *entry; | |
4510 | void __rcu **slots; | |
4511 | unsigned long pivot; | |
4512 | enum maple_type type; | |
4513 | unsigned long *pivots; | |
4514 | struct maple_node *node; | |
4515 | unsigned long save_point = mas->index; | |
4516 | ||
4517 | retry: | |
4518 | node = mas_mn(mas); | |
4519 | type = mte_node_type(mas->node); | |
4520 | pivots = ma_pivots(node, type); | |
4521 | if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) | |
4522 | goto retry; | |
4523 | ||
dd9a8513 LH |
4524 | if (mas->min <= min) { |
4525 | pivot = mas_safe_min(mas, pivots, mas->offset); | |
4526 | ||
4527 | if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) | |
4528 | goto retry; | |
4529 | ||
4530 | if (pivot <= min) | |
a8091f03 | 4531 | goto underflow; |
dd9a8513 LH |
4532 | } |
4533 | ||
a8091f03 | 4534 | again: |
dd9a8513 LH |
4535 | if (likely(mas->offset)) { |
4536 | mas->offset--; | |
4537 | mas->last = mas->index - 1; | |
4538 | mas->index = mas_safe_min(mas, pivots, mas->offset); | |
4539 | } else { | |
067311d3 LH |
4540 | if (mas->index <= min) |
4541 | goto underflow; | |
4542 | ||
dd9a8513 LH |
4543 | if (mas_prev_node(mas, min)) { |
4544 | mas_rewalk(mas, save_point); | |
4545 | goto retry; | |
4546 | } | |
4547 | ||
067311d3 LH |
4548 | if (WARN_ON_ONCE(mas_is_underflow(mas))) |
4549 | return NULL; | |
dd9a8513 LH |
4550 | |
4551 | mas->last = mas->max; | |
4552 | node = mas_mn(mas); | |
4553 | type = mte_node_type(mas->node); | |
4554 | pivots = ma_pivots(node, type); | |
4555 | mas->index = pivots[mas->offset - 1] + 1; | |
4556 | } | |
4557 | ||
4558 | slots = ma_slots(node, type); | |
4559 | entry = mas_slot(mas, slots, mas->offset); | |
4560 | if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) | |
4561 | goto retry; | |
4562 | ||
067311d3 | 4563 | |
dd9a8513 LH |
4564 | if (likely(entry)) |
4565 | return entry; | |
4566 | ||
a8091f03 | 4567 | if (!empty) { |
067311d3 LH |
4568 | if (mas->index <= min) { |
4569 | mas->status = ma_underflow; | |
4570 | return NULL; | |
4571 | } | |
a8091f03 | 4572 | |
dd9a8513 | 4573 | goto again; |
a8091f03 | 4574 | } |
dd9a8513 LH |
4575 | |
4576 | return entry; | |
a8091f03 LH |
4577 | |
4578 | underflow: | |
067311d3 | 4579 | mas->status = ma_underflow; |
a8091f03 | 4580 | return NULL; |
dd9a8513 LH |
4581 | } |
4582 | ||
54a611b6 LH |
4583 | /* |
4584 | * mas_next_node() - Get the next node at the same level in the tree. | |
4585 | * @mas: The maple state | |
96ae4c90 | 4586 | * @node: The maple node |
54a611b6 LH |
4587 | * @max: The maximum pivot value to check. |
4588 | * | |
067311d3 LH |
4589 | * The next value will be mas->node[mas->offset] or the status will have |
4590 | * overflowed. | |
54a611b6 LH |
4591 | * Return: 1 on dead node, 0 otherwise. |
4592 | */ | |
271f61a8 LH |
4593 | static int mas_next_node(struct ma_state *mas, struct maple_node *node, |
4594 | unsigned long max) | |
54a611b6 | 4595 | { |
fff4a58c | 4596 | unsigned long min; |
54a611b6 LH |
4597 | unsigned long *pivots; |
4598 | struct maple_enode *enode; | |
e9c52d89 | 4599 | struct maple_node *tmp; |
54a611b6 | 4600 | int level = 0; |
39d0bd86 | 4601 | unsigned char node_end; |
54a611b6 LH |
4602 | enum maple_type mt; |
4603 | void __rcu **slots; | |
4604 | ||
4605 | if (mas->max >= max) | |
067311d3 | 4606 | goto overflow; |
54a611b6 | 4607 | |
fff4a58c | 4608 | min = mas->max + 1; |
54a611b6 LH |
4609 | level = 0; |
4610 | do { | |
4611 | if (ma_is_root(node)) | |
067311d3 | 4612 | goto overflow; |
54a611b6 | 4613 | |
fff4a58c | 4614 | /* Walk up. */ |
54a611b6 LH |
4615 | if (unlikely(mas_ascend(mas))) |
4616 | return 1; | |
4617 | ||
54a611b6 LH |
4618 | level++; |
4619 | node = mas_mn(mas); | |
4620 | mt = mte_node_type(mas->node); | |
4621 | pivots = ma_pivots(node, mt); | |
39d0bd86 LH |
4622 | node_end = ma_data_end(node, mt, pivots, mas->max); |
4623 | if (unlikely(ma_dead_node(node))) | |
4624 | return 1; | |
4625 | ||
fff4a58c | 4626 | } while (unlikely(mas->offset == node_end)); |
54a611b6 LH |
4627 | |
4628 | slots = ma_slots(node, mt); | |
fff4a58c LH |
4629 | mas->offset++; |
4630 | enode = mas_slot(mas, slots, mas->offset); | |
4631 | if (unlikely(ma_dead_node(node))) | |
4632 | return 1; | |
54a611b6 | 4633 | |
fff4a58c LH |
4634 | if (level > 1) |
4635 | mas->offset = 0; | |
4636 | ||
4637 | while (unlikely(level > 1)) { | |
54a611b6 | 4638 | level--; |
fff4a58c | 4639 | mas->node = enode; |
54a611b6 LH |
4640 | node = mas_mn(mas); |
4641 | mt = mte_node_type(mas->node); | |
4642 | slots = ma_slots(node, mt); | |
fff4a58c | 4643 | enode = mas_slot(mas, slots, 0); |
39d0bd86 LH |
4644 | if (unlikely(ma_dead_node(node))) |
4645 | return 1; | |
54a611b6 LH |
4646 | } |
4647 | ||
fff4a58c LH |
4648 | if (!mas->offset) |
4649 | pivots = ma_pivots(node, mt); | |
4650 | ||
4651 | mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt); | |
e9c52d89 LH |
4652 | tmp = mte_to_node(enode); |
4653 | mt = mte_node_type(enode); | |
4654 | pivots = ma_pivots(tmp, mt); | |
4655 | mas->end = ma_data_end(tmp, mt, pivots, mas->max); | |
54a611b6 LH |
4656 | if (unlikely(ma_dead_node(node))) |
4657 | return 1; | |
4658 | ||
4659 | mas->node = enode; | |
4660 | mas->min = min; | |
54a611b6 LH |
4661 | return 0; |
4662 | ||
067311d3 | 4663 | overflow: |
54a611b6 LH |
4664 | if (unlikely(ma_dead_node(node))) |
4665 | return 1; | |
4666 | ||
067311d3 | 4667 | mas->status = ma_overflow; |
54a611b6 LH |
4668 | return 0; |
4669 | } | |
4670 | ||
4671 | /* | |
fff4a58c | 4672 | * mas_next_slot() - Get the entry in the next slot |
54a611b6 | 4673 | * |
fff4a58c LH |
4674 | * @mas: The maple state |
4675 | * @max: The maximum starting range | |
4676 | * @empty: Can be empty | |
54a611b6 | 4677 | * |
fff4a58c | 4678 | * Return: The entry in the next slot which is possibly NULL |
54a611b6 | 4679 | */ |
067311d3 | 4680 | static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty) |
54a611b6 | 4681 | { |
54a611b6 | 4682 | void __rcu **slots; |
fff4a58c LH |
4683 | unsigned long *pivots; |
4684 | unsigned long pivot; | |
4685 | enum maple_type type; | |
4686 | struct maple_node *node; | |
fff4a58c | 4687 | unsigned long save_point = mas->last; |
54a611b6 LH |
4688 | void *entry; |
4689 | ||
fff4a58c LH |
4690 | retry: |
4691 | node = mas_mn(mas); | |
4692 | type = mte_node_type(mas->node); | |
39d0bd86 | 4693 | pivots = ma_pivots(node, type); |
fff4a58c LH |
4694 | if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) |
4695 | goto retry; | |
54a611b6 | 4696 | |
fff4a58c | 4697 | if (mas->max >= max) { |
e9c52d89 | 4698 | if (likely(mas->offset < mas->end)) |
fff4a58c LH |
4699 | pivot = pivots[mas->offset]; |
4700 | else | |
067311d3 | 4701 | pivot = mas->max; |
54a611b6 | 4702 | |
fff4a58c LH |
4703 | if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) |
4704 | goto retry; | |
54a611b6 | 4705 | |
067311d3 LH |
4706 | if (pivot >= max) { /* Was at the limit, next will extend beyond */ |
4707 | mas->status = ma_overflow; | |
4708 | return NULL; | |
4709 | } | |
fff4a58c | 4710 | } |
54a611b6 | 4711 | |
e9c52d89 | 4712 | if (likely(mas->offset < mas->end)) { |
fff4a58c | 4713 | mas->index = pivots[mas->offset] + 1; |
a8091f03 | 4714 | again: |
fff4a58c | 4715 | mas->offset++; |
e9c52d89 | 4716 | if (likely(mas->offset < mas->end)) |
fff4a58c LH |
4717 | mas->last = pivots[mas->offset]; |
4718 | else | |
4719 | mas->last = mas->max; | |
4720 | } else { | |
067311d3 LH |
4721 | if (mas->last >= max) { |
4722 | mas->status = ma_overflow; | |
4723 | return NULL; | |
4724 | } | |
4725 | ||
fff4a58c LH |
4726 | if (mas_next_node(mas, node, max)) { |
4727 | mas_rewalk(mas, save_point); | |
4728 | goto retry; | |
4729 | } | |
4730 | ||
067311d3 | 4731 | if (WARN_ON_ONCE(mas_is_overflow(mas))) |
ca80f610 LH |
4732 | return NULL; |
4733 | ||
fff4a58c LH |
4734 | mas->offset = 0; |
4735 | mas->index = mas->min; | |
4736 | node = mas_mn(mas); | |
4737 | type = mte_node_type(mas->node); | |
4738 | pivots = ma_pivots(node, type); | |
4739 | mas->last = pivots[0]; | |
54a611b6 LH |
4740 | } |
4741 | ||
fff4a58c LH |
4742 | slots = ma_slots(node, type); |
4743 | entry = mt_slot(mas->tree, slots, mas->offset); | |
4744 | if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) | |
4745 | goto retry; | |
54a611b6 | 4746 | |
fff4a58c LH |
4747 | if (entry) |
4748 | return entry; | |
54a611b6 | 4749 | |
067311d3 | 4750 | |
fff4a58c | 4751 | if (!empty) { |
067311d3 LH |
4752 | if (mas->last >= max) { |
4753 | mas->status = ma_overflow; | |
4754 | return NULL; | |
4755 | } | |
a8091f03 LH |
4756 | |
4757 | mas->index = mas->last + 1; | |
fff4a58c LH |
4758 | goto again; |
4759 | } | |
4760 | ||
4761 | return entry; | |
54a611b6 LH |
4762 | } |
4763 | ||
54a611b6 LH |
4764 | /* |
4765 | * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the | |
4766 | * highest gap address of a given size in a given node and descend. | |
4767 | * @mas: The maple state | |
4768 | * @size: The needed size. | |
4769 | * | |
4770 | * Return: True if found in a leaf, false otherwise. | |
4771 | * | |
4772 | */ | |
fad8e429 LH |
4773 | static bool mas_rev_awalk(struct ma_state *mas, unsigned long size, |
4774 | unsigned long *gap_min, unsigned long *gap_max) | |
54a611b6 LH |
4775 | { |
4776 | enum maple_type type = mte_node_type(mas->node); | |
4777 | struct maple_node *node = mas_mn(mas); | |
4778 | unsigned long *pivots, *gaps; | |
4779 | void __rcu **slots; | |
4780 | unsigned long gap = 0; | |
7327e811 | 4781 | unsigned long max, min; |
54a611b6 LH |
4782 | unsigned char offset; |
4783 | ||
4784 | if (unlikely(mas_is_err(mas))) | |
4785 | return true; | |
4786 | ||
4787 | if (ma_is_dense(type)) { | |
4788 | /* dense nodes. */ | |
4789 | mas->offset = (unsigned char)(mas->index - mas->min); | |
4790 | return true; | |
4791 | } | |
4792 | ||
4793 | pivots = ma_pivots(node, type); | |
4794 | slots = ma_slots(node, type); | |
4795 | gaps = ma_gaps(node, type); | |
4796 | offset = mas->offset; | |
4797 | min = mas_safe_min(mas, pivots, offset); | |
4798 | /* Skip out of bounds. */ | |
4799 | while (mas->last < min) | |
4800 | min = mas_safe_min(mas, pivots, --offset); | |
4801 | ||
4802 | max = mas_safe_pivot(mas, pivots, offset, type); | |
7327e811 | 4803 | while (mas->index <= max) { |
54a611b6 LH |
4804 | gap = 0; |
4805 | if (gaps) | |
4806 | gap = gaps[offset]; | |
4807 | else if (!mas_slot(mas, slots, offset)) | |
4808 | gap = max - min + 1; | |
4809 | ||
4810 | if (gap) { | |
4811 | if ((size <= gap) && (size <= mas->last - min + 1)) | |
4812 | break; | |
4813 | ||
4814 | if (!gaps) { | |
4815 | /* Skip the next slot, it cannot be a gap. */ | |
4816 | if (offset < 2) | |
4817 | goto ascend; | |
4818 | ||
4819 | offset -= 2; | |
4820 | max = pivots[offset]; | |
4821 | min = mas_safe_min(mas, pivots, offset); | |
4822 | continue; | |
4823 | } | |
4824 | } | |
4825 | ||
4826 | if (!offset) | |
4827 | goto ascend; | |
4828 | ||
4829 | offset--; | |
4830 | max = min - 1; | |
4831 | min = mas_safe_min(mas, pivots, offset); | |
4832 | } | |
4833 | ||
7327e811 LH |
4834 | if (unlikely((mas->index > max) || (size - 1 > max - mas->index))) |
4835 | goto no_space; | |
54a611b6 LH |
4836 | |
4837 | if (unlikely(ma_is_leaf(type))) { | |
4838 | mas->offset = offset; | |
fad8e429 LH |
4839 | *gap_min = min; |
4840 | *gap_max = min + gap - 1; | |
54a611b6 LH |
4841 | return true; |
4842 | } | |
4843 | ||
4844 | /* descend, only happens under lock. */ | |
4845 | mas->node = mas_slot(mas, slots, offset); | |
4846 | mas->min = min; | |
4847 | mas->max = max; | |
4848 | mas->offset = mas_data_end(mas); | |
4849 | return false; | |
4850 | ||
4851 | ascend: | |
7327e811 LH |
4852 | if (!mte_is_root(mas->node)) |
4853 | return false; | |
54a611b6 | 4854 | |
7327e811 LH |
4855 | no_space: |
4856 | mas_set_err(mas, -EBUSY); | |
54a611b6 LH |
4857 | return false; |
4858 | } | |
4859 | ||
4860 | static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) | |
4861 | { | |
4862 | enum maple_type type = mte_node_type(mas->node); | |
4863 | unsigned long pivot, min, gap = 0; | |
06e8fd99 LH |
4864 | unsigned char offset, data_end; |
4865 | unsigned long *gaps, *pivots; | |
4866 | void __rcu **slots; | |
4867 | struct maple_node *node; | |
54a611b6 LH |
4868 | bool found = false; |
4869 | ||
4870 | if (ma_is_dense(type)) { | |
4871 | mas->offset = (unsigned char)(mas->index - mas->min); | |
4872 | return true; | |
4873 | } | |
4874 | ||
06e8fd99 LH |
4875 | node = mas_mn(mas); |
4876 | pivots = ma_pivots(node, type); | |
4877 | slots = ma_slots(node, type); | |
4878 | gaps = ma_gaps(node, type); | |
54a611b6 | 4879 | offset = mas->offset; |
54a611b6 | 4880 | min = mas_safe_min(mas, pivots, offset); |
06e8fd99 LH |
4881 | data_end = ma_data_end(node, type, pivots, mas->max); |
4882 | for (; offset <= data_end; offset++) { | |
29b2681f | 4883 | pivot = mas_safe_pivot(mas, pivots, offset, type); |
54a611b6 LH |
4884 | |
4885 | /* Not within lower bounds */ | |
4886 | if (mas->index > pivot) | |
4887 | goto next_slot; | |
4888 | ||
4889 | if (gaps) | |
4890 | gap = gaps[offset]; | |
4891 | else if (!mas_slot(mas, slots, offset)) | |
4892 | gap = min(pivot, mas->last) - max(mas->index, min) + 1; | |
4893 | else | |
4894 | goto next_slot; | |
4895 | ||
4896 | if (gap >= size) { | |
4897 | if (ma_is_leaf(type)) { | |
4898 | found = true; | |
f5bd4187 | 4899 | break; |
54a611b6 | 4900 | } |
5f8db8d4 WY |
4901 | |
4902 | mas->node = mas_slot(mas, slots, offset); | |
4903 | mas->min = min; | |
4904 | mas->max = pivot; | |
4905 | offset = 0; | |
4906 | break; | |
54a611b6 LH |
4907 | } |
4908 | next_slot: | |
4909 | min = pivot + 1; | |
4910 | if (mas->last <= pivot) { | |
4911 | mas_set_err(mas, -EBUSY); | |
4912 | return true; | |
4913 | } | |
4914 | } | |
4915 | ||
54a611b6 LH |
4916 | mas->offset = offset; |
4917 | return found; | |
4918 | } | |
4919 | ||
4920 | /** | |
4921 | * mas_walk() - Search for @mas->index in the tree. | |
4922 | * @mas: The maple state. | |
4923 | * | |
4924 | * mas->index and mas->last will be set to the range if there is a value. If | |
067311d3 | 4925 | * mas->status is ma_none, reset to ma_start |
54a611b6 LH |
4926 | * |
4927 | * Return: the entry at the location or %NULL. | |
4928 | */ | |
4929 | void *mas_walk(struct ma_state *mas) | |
4930 | { | |
4931 | void *entry; | |
4932 | ||
a8091f03 | 4933 | if (!mas_is_active(mas) || !mas_is_start(mas)) |
067311d3 | 4934 | mas->status = ma_start; |
54a611b6 LH |
4935 | retry: |
4936 | entry = mas_state_walk(mas); | |
6b23a290 | 4937 | if (mas_is_start(mas)) { |
54a611b6 | 4938 | goto retry; |
6b23a290 LH |
4939 | } else if (mas_is_none(mas)) { |
4940 | mas->index = 0; | |
4941 | mas->last = ULONG_MAX; | |
4942 | } else if (mas_is_ptr(mas)) { | |
54a611b6 LH |
4943 | if (!mas->index) { |
4944 | mas->last = 0; | |
6b23a290 | 4945 | return entry; |
54a611b6 | 4946 | } |
54a611b6 | 4947 | |
6b23a290 | 4948 | mas->index = 1; |
54a611b6 | 4949 | mas->last = ULONG_MAX; |
067311d3 | 4950 | mas->status = ma_none; |
6b23a290 | 4951 | return NULL; |
54a611b6 LH |
4952 | } |
4953 | ||
4954 | return entry; | |
4955 | } | |
120b1162 | 4956 | EXPORT_SYMBOL_GPL(mas_walk); |
54a611b6 LH |
4957 | |
4958 | static inline bool mas_rewind_node(struct ma_state *mas) | |
4959 | { | |
4960 | unsigned char slot; | |
4961 | ||
4962 | do { | |
4963 | if (mte_is_root(mas->node)) { | |
4964 | slot = mas->offset; | |
4965 | if (!slot) | |
4966 | return false; | |
4967 | } else { | |
4968 | mas_ascend(mas); | |
4969 | slot = mas->offset; | |
4970 | } | |
4971 | } while (!slot); | |
4972 | ||
4973 | mas->offset = --slot; | |
4974 | return true; | |
4975 | } | |
4976 | ||
4977 | /* | |
4978 | * mas_skip_node() - Internal function. Skip over a node. | |
4979 | * @mas: The maple state. | |
4980 | * | |
4981 | * Return: true if there is another node, false otherwise. | |
4982 | */ | |
4983 | static inline bool mas_skip_node(struct ma_state *mas) | |
4984 | { | |
0fa99fdf LH |
4985 | if (mas_is_err(mas)) |
4986 | return false; | |
54a611b6 | 4987 | |
54a611b6 LH |
4988 | do { |
4989 | if (mte_is_root(mas->node)) { | |
0fa99fdf | 4990 | if (mas->offset >= mas_data_end(mas)) { |
54a611b6 LH |
4991 | mas_set_err(mas, -EBUSY); |
4992 | return false; | |
4993 | } | |
4994 | } else { | |
4995 | mas_ascend(mas); | |
54a611b6 | 4996 | } |
0fa99fdf | 4997 | } while (mas->offset >= mas_data_end(mas)); |
54a611b6 | 4998 | |
0fa99fdf | 4999 | mas->offset++; |
54a611b6 LH |
5000 | return true; |
5001 | } | |
5002 | ||
5003 | /* | |
5004 | * mas_awalk() - Allocation walk. Search from low address to high, for a gap of | |
5005 | * @size | |
5006 | * @mas: The maple state | |
5007 | * @size: The size of the gap required | |
5008 | * | |
5009 | * Search between @mas->index and @mas->last for a gap of @size. | |
5010 | */ | |
5011 | static inline void mas_awalk(struct ma_state *mas, unsigned long size) | |
5012 | { | |
5013 | struct maple_enode *last = NULL; | |
5014 | ||
5015 | /* | |
5016 | * There are 4 options: | |
5017 | * go to child (descend) | |
5018 | * go back to parent (ascend) | |
f2760364 WY |
5019 | * no gap found. (return, error == -EBUSY) |
5020 | * found the gap. (return) | |
54a611b6 LH |
5021 | */ |
5022 | while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) { | |
5023 | if (last == mas->node) | |
5024 | mas_skip_node(mas); | |
5025 | else | |
5026 | last = mas->node; | |
5027 | } | |
5028 | } | |
5029 | ||
54a611b6 LH |
5030 | /* |
5031 | * mas_sparse_area() - Internal function. Return upper or lower limit when | |
5032 | * searching for a gap in an empty tree. | |
5033 | * @mas: The maple state | |
5034 | * @min: the minimum range | |
5035 | * @max: The maximum range | |
5036 | * @size: The size of the gap | |
5037 | * @fwd: Searching forward or back | |
5038 | */ | |
29ad6bb3 | 5039 | static inline int mas_sparse_area(struct ma_state *mas, unsigned long min, |
54a611b6 LH |
5040 | unsigned long max, unsigned long size, bool fwd) |
5041 | { | |
29ad6bb3 PZ |
5042 | if (!unlikely(mas_is_none(mas)) && min == 0) { |
5043 | min++; | |
5044 | /* | |
5045 | * At this time, min is increased, we need to recheck whether | |
5046 | * the size is satisfied. | |
5047 | */ | |
5048 | if (min > max || max - min + 1 < size) | |
5049 | return -EBUSY; | |
5050 | } | |
54a611b6 LH |
5051 | /* mas_is_ptr */ |
5052 | ||
54a611b6 | 5053 | if (fwd) { |
29ad6bb3 PZ |
5054 | mas->index = min; |
5055 | mas->last = min + size - 1; | |
5056 | } else { | |
5057 | mas->last = max; | |
5058 | mas->index = max - size + 1; | |
54a611b6 | 5059 | } |
29ad6bb3 | 5060 | return 0; |
54a611b6 LH |
5061 | } |
5062 | ||
5063 | /* | |
5064 | * mas_empty_area() - Get the lowest address within the range that is | |
5065 | * sufficient for the size requested. | |
5066 | * @mas: The maple state | |
5067 | * @min: The lowest value of the range | |
5068 | * @max: The highest value of the range | |
5069 | * @size: The size needed | |
5070 | */ | |
5071 | int mas_empty_area(struct ma_state *mas, unsigned long min, | |
5072 | unsigned long max, unsigned long size) | |
5073 | { | |
5074 | unsigned char offset; | |
5075 | unsigned long *pivots; | |
5076 | enum maple_type mt; | |
0de56e38 | 5077 | struct maple_node *node; |
54a611b6 | 5078 | |
ba997212 LH |
5079 | if (min > max) |
5080 | return -EINVAL; | |
5081 | ||
5082 | if (size == 0 || max - min < size - 1) | |
fad8e429 LH |
5083 | return -EINVAL; |
5084 | ||
54a611b6 LH |
5085 | if (mas_is_start(mas)) |
5086 | mas_start(mas); | |
5087 | else if (mas->offset >= 2) | |
5088 | mas->offset -= 2; | |
5089 | else if (!mas_skip_node(mas)) | |
5090 | return -EBUSY; | |
5091 | ||
5092 | /* Empty set */ | |
29ad6bb3 PZ |
5093 | if (mas_is_none(mas) || mas_is_ptr(mas)) |
5094 | return mas_sparse_area(mas, min, max, size, true); | |
54a611b6 LH |
5095 | |
5096 | /* The start of the window can only be within these values */ | |
5097 | mas->index = min; | |
5098 | mas->last = max; | |
5099 | mas_awalk(mas, size); | |
5100 | ||
5101 | if (unlikely(mas_is_err(mas))) | |
5102 | return xa_err(mas->node); | |
5103 | ||
5104 | offset = mas->offset; | |
0de56e38 | 5105 | node = mas_mn(mas); |
54a611b6 | 5106 | mt = mte_node_type(mas->node); |
0de56e38 | 5107 | pivots = ma_pivots(node, mt); |
0257d990 PZ |
5108 | min = mas_safe_min(mas, pivots, offset); |
5109 | if (mas->index < min) | |
5110 | mas->index = min; | |
54a611b6 | 5111 | mas->last = mas->index + size - 1; |
0de56e38 | 5112 | mas->end = ma_data_end(node, mt, pivots, mas->max); |
54a611b6 LH |
5113 | return 0; |
5114 | } | |
120b1162 | 5115 | EXPORT_SYMBOL_GPL(mas_empty_area); |
54a611b6 LH |
5116 | |
5117 | /* | |
5118 | * mas_empty_area_rev() - Get the highest address within the range that is | |
5119 | * sufficient for the size requested. | |
5120 | * @mas: The maple state | |
5121 | * @min: The lowest value of the range | |
5122 | * @max: The highest value of the range | |
5123 | * @size: The size needed | |
5124 | */ | |
5125 | int mas_empty_area_rev(struct ma_state *mas, unsigned long min, | |
5126 | unsigned long max, unsigned long size) | |
5127 | { | |
5128 | struct maple_enode *last = mas->node; | |
5129 | ||
ba997212 LH |
5130 | if (min > max) |
5131 | return -EINVAL; | |
5132 | ||
5133 | if (size == 0 || max - min < size - 1) | |
fad8e429 LH |
5134 | return -EINVAL; |
5135 | ||
955a923d | 5136 | if (mas_is_start(mas)) |
54a611b6 | 5137 | mas_start(mas); |
955a923d | 5138 | else if ((mas->offset < 2) && (!mas_rewind_node(mas))) |
54a611b6 | 5139 | return -EBUSY; |
54a611b6 | 5140 | |
955a923d | 5141 | if (unlikely(mas_is_none(mas) || mas_is_ptr(mas))) |
29ad6bb3 | 5142 | return mas_sparse_area(mas, min, max, size, false); |
955a923d LH |
5143 | else if (mas->offset >= 2) |
5144 | mas->offset -= 2; | |
5145 | else | |
5146 | mas->offset = mas_data_end(mas); | |
5147 | ||
54a611b6 LH |
5148 | |
5149 | /* The start of the window can only be within these values. */ | |
5150 | mas->index = min; | |
5151 | mas->last = max; | |
5152 | ||
fad8e429 | 5153 | while (!mas_rev_awalk(mas, size, &min, &max)) { |
54a611b6 LH |
5154 | if (last == mas->node) { |
5155 | if (!mas_rewind_node(mas)) | |
5156 | return -EBUSY; | |
5157 | } else { | |
5158 | last = mas->node; | |
5159 | } | |
5160 | } | |
5161 | ||
5162 | if (mas_is_err(mas)) | |
5163 | return xa_err(mas->node); | |
5164 | ||
5165 | if (unlikely(mas->offset == MAPLE_NODE_SLOTS)) | |
5166 | return -EBUSY; | |
5167 | ||
54a611b6 | 5168 | /* Trim the upper limit to the max. */ |
ba997212 | 5169 | if (max < mas->last) |
fad8e429 | 5170 | mas->last = max; |
54a611b6 LH |
5171 | |
5172 | mas->index = mas->last - size + 1; | |
31c532a8 | 5173 | mas->end = mas_data_end(mas); |
54a611b6 LH |
5174 | return 0; |
5175 | } | |
120b1162 | 5176 | EXPORT_SYMBOL_GPL(mas_empty_area_rev); |
54a611b6 | 5177 | |
54a611b6 | 5178 | /* |
790e1fa8 | 5179 | * mte_dead_leaves() - Mark all leaves of a node as dead. |
96ae4c90 WY |
5180 | * @enode: the encoded node |
5181 | * @mt: the maple tree | |
54a611b6 LH |
5182 | * @slots: Pointer to the slot array |
5183 | * | |
5184 | * Must hold the write lock. | |
5185 | * | |
5186 | * Return: The number of leaves marked as dead. | |
5187 | */ | |
5188 | static inline | |
790e1fa8 LH |
5189 | unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt, |
5190 | void __rcu **slots) | |
54a611b6 LH |
5191 | { |
5192 | struct maple_node *node; | |
5193 | enum maple_type type; | |
5194 | void *entry; | |
5195 | int offset; | |
5196 | ||
790e1fa8 LH |
5197 | for (offset = 0; offset < mt_slot_count(enode); offset++) { |
5198 | entry = mt_slot(mt, slots, offset); | |
54a611b6 LH |
5199 | type = mte_node_type(entry); |
5200 | node = mte_to_node(entry); | |
5201 | /* Use both node and type to catch LE & BE metadata */ | |
5202 | if (!node || !type) | |
5203 | break; | |
5204 | ||
5205 | mte_set_node_dead(entry); | |
54a611b6 LH |
5206 | node->type = type; |
5207 | rcu_assign_pointer(slots[offset], node); | |
5208 | } | |
5209 | ||
5210 | return offset; | |
5211 | } | |
5212 | ||
790e1fa8 LH |
5213 | /** |
5214 | * mte_dead_walk() - Walk down a dead tree to just before the leaves | |
5215 | * @enode: The maple encoded node | |
5216 | * @offset: The starting offset | |
5217 | * | |
5218 | * Note: This can only be used from the RCU callback context. | |
5219 | */ | |
5220 | static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset) | |
54a611b6 LH |
5221 | { |
5222 | struct maple_node *node, *next; | |
5223 | void __rcu **slots = NULL; | |
5224 | ||
790e1fa8 | 5225 | next = mte_to_node(*enode); |
54a611b6 | 5226 | do { |
790e1fa8 LH |
5227 | *enode = ma_enode_ptr(next); |
5228 | node = mte_to_node(*enode); | |
54a611b6 | 5229 | slots = ma_slots(node, node->type); |
790e1fa8 LH |
5230 | next = rcu_dereference_protected(slots[offset], |
5231 | lock_is_held(&rcu_callback_map)); | |
54a611b6 LH |
5232 | offset = 0; |
5233 | } while (!ma_is_leaf(next->type)); | |
5234 | ||
5235 | return slots; | |
5236 | } | |
5237 | ||
790e1fa8 LH |
5238 | /** |
5239 | * mt_free_walk() - Walk & free a tree in the RCU callback context | |
5240 | * @head: The RCU head that's within the node. | |
5241 | * | |
5242 | * Note: This can only be used from the RCU callback context. | |
5243 | */ | |
54a611b6 LH |
5244 | static void mt_free_walk(struct rcu_head *head) |
5245 | { | |
5246 | void __rcu **slots; | |
5247 | struct maple_node *node, *start; | |
790e1fa8 | 5248 | struct maple_enode *enode; |
54a611b6 LH |
5249 | unsigned char offset; |
5250 | enum maple_type type; | |
54a611b6 LH |
5251 | |
5252 | node = container_of(head, struct maple_node, rcu); | |
5253 | ||
5254 | if (ma_is_leaf(node->type)) | |
5255 | goto free_leaf; | |
5256 | ||
54a611b6 | 5257 | start = node; |
790e1fa8 LH |
5258 | enode = mt_mk_node(node, node->type); |
5259 | slots = mte_dead_walk(&enode, 0); | |
5260 | node = mte_to_node(enode); | |
54a611b6 LH |
5261 | do { |
5262 | mt_free_bulk(node->slot_len, slots); | |
5263 | offset = node->parent_slot + 1; | |
790e1fa8 LH |
5264 | enode = node->piv_parent; |
5265 | if (mte_to_node(enode) == node) | |
5266 | goto free_leaf; | |
5267 | ||
5268 | type = mte_node_type(enode); | |
5269 | slots = ma_slots(mte_to_node(enode), type); | |
5270 | if ((offset < mt_slots[type]) && | |
5271 | rcu_dereference_protected(slots[offset], | |
5272 | lock_is_held(&rcu_callback_map))) | |
5273 | slots = mte_dead_walk(&enode, offset); | |
5274 | node = mte_to_node(enode); | |
54a611b6 LH |
5275 | } while ((node != start) || (node->slot_len < offset)); |
5276 | ||
5277 | slots = ma_slots(node, node->type); | |
5278 | mt_free_bulk(node->slot_len, slots); | |
5279 | ||
54a611b6 LH |
5280 | free_leaf: |
5281 | mt_free_rcu(&node->rcu); | |
5282 | } | |
5283 | ||
790e1fa8 LH |
5284 | static inline void __rcu **mte_destroy_descend(struct maple_enode **enode, |
5285 | struct maple_tree *mt, struct maple_enode *prev, unsigned char offset) | |
54a611b6 LH |
5286 | { |
5287 | struct maple_node *node; | |
790e1fa8 | 5288 | struct maple_enode *next = *enode; |
54a611b6 | 5289 | void __rcu **slots = NULL; |
790e1fa8 LH |
5290 | enum maple_type type; |
5291 | unsigned char next_offset = 0; | |
54a611b6 LH |
5292 | |
5293 | do { | |
790e1fa8 LH |
5294 | *enode = next; |
5295 | node = mte_to_node(*enode); | |
5296 | type = mte_node_type(*enode); | |
5297 | slots = ma_slots(node, type); | |
5298 | next = mt_slot_locked(mt, slots, next_offset); | |
54a611b6 | 5299 | if ((mte_dead_node(next))) |
790e1fa8 | 5300 | next = mt_slot_locked(mt, slots, ++next_offset); |
54a611b6 | 5301 | |
790e1fa8 LH |
5302 | mte_set_node_dead(*enode); |
5303 | node->type = type; | |
54a611b6 LH |
5304 | node->piv_parent = prev; |
5305 | node->parent_slot = offset; | |
790e1fa8 LH |
5306 | offset = next_offset; |
5307 | next_offset = 0; | |
5308 | prev = *enode; | |
54a611b6 LH |
5309 | } while (!mte_is_leaf(next)); |
5310 | ||
5311 | return slots; | |
5312 | } | |
5313 | ||
790e1fa8 | 5314 | static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt, |
54a611b6 LH |
5315 | bool free) |
5316 | { | |
5317 | void __rcu **slots; | |
5318 | struct maple_node *node = mte_to_node(enode); | |
5319 | struct maple_enode *start; | |
54a611b6 | 5320 | |
2e5b4921 | 5321 | if (mte_is_leaf(enode)) { |
ea9b77f9 | 5322 | mte_set_node_dead(enode); |
2e5b4921 | 5323 | node->type = mte_node_type(enode); |
54a611b6 | 5324 | goto free_leaf; |
2e5b4921 | 5325 | } |
54a611b6 | 5326 | |
2e5b4921 | 5327 | start = enode; |
790e1fa8 LH |
5328 | slots = mte_destroy_descend(&enode, mt, start, 0); |
5329 | node = mte_to_node(enode); // Updated in the above call. | |
54a611b6 LH |
5330 | do { |
5331 | enum maple_type type; | |
5332 | unsigned char offset; | |
5333 | struct maple_enode *parent, *tmp; | |
5334 | ||
790e1fa8 | 5335 | node->slot_len = mte_dead_leaves(enode, mt, slots); |
54a611b6 LH |
5336 | if (free) |
5337 | mt_free_bulk(node->slot_len, slots); | |
5338 | offset = node->parent_slot + 1; | |
790e1fa8 LH |
5339 | enode = node->piv_parent; |
5340 | if (mte_to_node(enode) == node) | |
5341 | goto free_leaf; | |
54a611b6 | 5342 | |
790e1fa8 LH |
5343 | type = mte_node_type(enode); |
5344 | slots = ma_slots(mte_to_node(enode), type); | |
54a611b6 LH |
5345 | if (offset >= mt_slots[type]) |
5346 | goto next; | |
5347 | ||
790e1fa8 | 5348 | tmp = mt_slot_locked(mt, slots, offset); |
54a611b6 | 5349 | if (mte_node_type(tmp) && mte_to_node(tmp)) { |
790e1fa8 LH |
5350 | parent = enode; |
5351 | enode = tmp; | |
5352 | slots = mte_destroy_descend(&enode, mt, parent, offset); | |
54a611b6 LH |
5353 | } |
5354 | next: | |
790e1fa8 LH |
5355 | node = mte_to_node(enode); |
5356 | } while (start != enode); | |
54a611b6 | 5357 | |
790e1fa8 LH |
5358 | node = mte_to_node(enode); |
5359 | node->slot_len = mte_dead_leaves(enode, mt, slots); | |
54a611b6 LH |
5360 | if (free) |
5361 | mt_free_bulk(node->slot_len, slots); | |
5362 | ||
54a611b6 LH |
5363 | free_leaf: |
5364 | if (free) | |
5365 | mt_free_rcu(&node->rcu); | |
2e5b4921 | 5366 | else |
790e1fa8 | 5367 | mt_clear_meta(mt, node, node->type); |
54a611b6 LH |
5368 | } |
5369 | ||
5370 | /* | |
5371 | * mte_destroy_walk() - Free a tree or sub-tree. | |
f942b0f0 VY |
5372 | * @enode: the encoded maple node (maple_enode) to start |
5373 | * @mt: the tree to free - needed for node types. | |
54a611b6 LH |
5374 | * |
5375 | * Must hold the write lock. | |
5376 | */ | |
5377 | static inline void mte_destroy_walk(struct maple_enode *enode, | |
5378 | struct maple_tree *mt) | |
5379 | { | |
5380 | struct maple_node *node = mte_to_node(enode); | |
5381 | ||
5382 | if (mt_in_rcu(mt)) { | |
790e1fa8 | 5383 | mt_destroy_walk(enode, mt, false); |
54a611b6 LH |
5384 | call_rcu(&node->rcu, mt_free_walk); |
5385 | } else { | |
790e1fa8 | 5386 | mt_destroy_walk(enode, mt, true); |
54a611b6 LH |
5387 | } |
5388 | } | |
54a611b6 LH |
5389 | /* Interface */ |
5390 | ||
5391 | /** | |
5392 | * mas_store() - Store an @entry. | |
5393 | * @mas: The maple state. | |
5394 | * @entry: The entry to store. | |
5395 | * | |
5396 | * The @mas->index and @mas->last is used to set the range for the @entry. | |
54a611b6 LH |
5397 | * |
5398 | * Return: the first entry between mas->index and mas->last or %NULL. | |
5399 | */ | |
5400 | void *mas_store(struct ma_state *mas, void *entry) | |
5401 | { | |
4037d44f | 5402 | int request; |
54a611b6 LH |
5403 | MA_WR_STATE(wr_mas, mas, entry); |
5404 | ||
5405 | trace_ma_write(__func__, mas, 0, entry); | |
5406 | #ifdef CONFIG_DEBUG_MAPLE_TREE | |
e6d6792a | 5407 | if (MAS_WARN_ON(mas, mas->index > mas->last)) |
b314e215 LS |
5408 | pr_err("Error %lX > %lX " PTR_FMT "\n", mas->index, mas->last, |
5409 | entry); | |
e6d6792a | 5410 | |
54a611b6 LH |
5411 | if (mas->index > mas->last) { |
5412 | mas_set_err(mas, -EINVAL); | |
5413 | return NULL; | |
5414 | } | |
5415 | ||
5416 | #endif | |
5417 | ||
5418 | /* | |
5419 | * Storing is the same operation as insert with the added caveat that it | |
5420 | * can overwrite entries. Although this seems simple enough, one may | |
5421 | * want to examine what happens if a single store operation was to | |
5422 | * overwrite multiple entries within a self-balancing B-Tree. | |
5423 | */ | |
580fcbd6 | 5424 | mas_wr_prealloc_setup(&wr_mas); |
f0c99037 | 5425 | mas->store_type = mas_wr_store_type(&wr_mas); |
4037d44f SK |
5426 | if (mas->mas_flags & MA_STATE_PREALLOC) { |
5427 | mas_wr_store_entry(&wr_mas); | |
5428 | MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); | |
5429 | return wr_mas.content; | |
5430 | } | |
5431 | ||
28092a65 | 5432 | request = mas_prealloc_calc(&wr_mas, entry); |
4037d44f SK |
5433 | if (!request) |
5434 | goto store; | |
5435 | ||
5436 | mas_node_count(mas, request); | |
5437 | if (mas_is_err(mas)) | |
5438 | return NULL; | |
5439 | ||
5440 | store: | |
54a611b6 | 5441 | mas_wr_store_entry(&wr_mas); |
4037d44f | 5442 | mas_destroy(mas); |
54a611b6 LH |
5443 | return wr_mas.content; |
5444 | } | |
120b1162 | 5445 | EXPORT_SYMBOL_GPL(mas_store); |
54a611b6 LH |
5446 | |
5447 | /** | |
5448 | * mas_store_gfp() - Store a value into the tree. | |
5449 | * @mas: The maple state | |
5450 | * @entry: The entry to store | |
5451 | * @gfp: The GFP_FLAGS to use for allocations if necessary. | |
5452 | * | |
5453 | * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not | |
5454 | * be allocated. | |
5455 | */ | |
5456 | int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp) | |
5457 | { | |
e1b8b883 SK |
5458 | unsigned long index = mas->index; |
5459 | unsigned long last = mas->last; | |
54a611b6 | 5460 | MA_WR_STATE(wr_mas, mas, entry); |
3cd9e92e | 5461 | int ret = 0; |
54a611b6 | 5462 | |
54a611b6 | 5463 | retry: |
3cd9e92e | 5464 | mas_wr_preallocate(&wr_mas, entry); |
e1b8b883 SK |
5465 | if (unlikely(mas_nomem(mas, gfp))) { |
5466 | if (!entry) | |
5467 | __mas_set_range(mas, index, last); | |
54a611b6 | 5468 | goto retry; |
e1b8b883 | 5469 | } |
54a611b6 | 5470 | |
3cd9e92e SK |
5471 | if (mas_is_err(mas)) { |
5472 | ret = xa_err(mas->node); | |
5473 | goto out; | |
5474 | } | |
54a611b6 | 5475 | |
3cd9e92e SK |
5476 | mas_wr_store_entry(&wr_mas); |
5477 | out: | |
5478 | mas_destroy(mas); | |
5479 | return ret; | |
54a611b6 | 5480 | } |
120b1162 | 5481 | EXPORT_SYMBOL_GPL(mas_store_gfp); |
54a611b6 LH |
5482 | |
5483 | /** | |
5484 | * mas_store_prealloc() - Store a value into the tree using memory | |
5485 | * preallocated in the maple state. | |
5486 | * @mas: The maple state | |
5487 | * @entry: The entry to store. | |
5488 | */ | |
5489 | void mas_store_prealloc(struct ma_state *mas, void *entry) | |
5490 | { | |
5491 | MA_WR_STATE(wr_mas, mas, entry); | |
5492 | ||
c27e6183 SK |
5493 | if (mas->store_type == wr_store_root) { |
5494 | mas_wr_prealloc_setup(&wr_mas); | |
5495 | goto store; | |
5496 | } | |
5497 | ||
5498 | mas_wr_walk_descend(&wr_mas); | |
5499 | if (mas->store_type != wr_spanning_store) { | |
5500 | /* set wr_mas->content to current slot */ | |
5501 | wr_mas.content = mas_slot_locked(mas, wr_mas.slots, mas->offset); | |
5502 | mas_wr_end_piv(&wr_mas); | |
5503 | } | |
5504 | ||
5505 | store: | |
54a611b6 LH |
5506 | trace_ma_write(__func__, mas, 0, entry); |
5507 | mas_wr_store_entry(&wr_mas); | |
1c414c6a | 5508 | MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); |
54a611b6 LH |
5509 | mas_destroy(mas); |
5510 | } | |
120b1162 | 5511 | EXPORT_SYMBOL_GPL(mas_store_prealloc); |
54a611b6 LH |
5512 | |
5513 | /** | |
5514 | * mas_preallocate() - Preallocate enough nodes for a store operation | |
5515 | * @mas: The maple state | |
da089254 | 5516 | * @entry: The entry that will be stored |
54a611b6 LH |
5517 | * @gfp: The GFP_FLAGS to use for allocations. |
5518 | * | |
5519 | * Return: 0 on success, -ENOMEM if memory could not be allocated. | |
5520 | */ | |
da089254 | 5521 | int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp) |
54a611b6 | 5522 | { |
17983dc6 | 5523 | MA_WR_STATE(wr_mas, mas, entry); |
5d659bbb SK |
5524 | int ret = 0; |
5525 | int request; | |
4249f13c | 5526 | |
19138a2c | 5527 | mas_wr_prealloc_setup(&wr_mas); |
f0c99037 | 5528 | mas->store_type = mas_wr_store_type(&wr_mas); |
28092a65 | 5529 | request = mas_prealloc_calc(&wr_mas, entry); |
5d659bbb | 5530 | if (!request) |
fba46a5d | 5531 | goto set_flag; |
4249f13c | 5532 | |
fba46a5d | 5533 | mas->mas_flags &= ~MA_STATE_PREALLOC; |
5d659bbb SK |
5534 | mas_node_count_gfp(mas, request, gfp); |
5535 | if (mas_is_err(mas)) { | |
5536 | mas_set_alloc_req(mas, 0); | |
5537 | ret = xa_err(mas->node); | |
5538 | mas_destroy(mas); | |
5539 | mas_reset(mas); | |
5540 | return ret; | |
17983dc6 LH |
5541 | } |
5542 | ||
fba46a5d | 5543 | set_flag: |
54a611b6 | 5544 | mas->mas_flags |= MA_STATE_PREALLOC; |
54a611b6 LH |
5545 | return ret; |
5546 | } | |
5c63a7c3 | 5547 | EXPORT_SYMBOL_GPL(mas_preallocate); |
54a611b6 LH |
5548 | |
5549 | /* | |
5550 | * mas_destroy() - destroy a maple state. | |
5551 | * @mas: The maple state | |
5552 | * | |
5553 | * Upon completion, check the left-most node and rebalance against the node to | |
5554 | * the right if necessary. Frees any allocated nodes associated with this maple | |
5555 | * state. | |
5556 | */ | |
5557 | void mas_destroy(struct ma_state *mas) | |
5558 | { | |
5559 | struct maple_alloc *node; | |
541e06b7 | 5560 | unsigned long total; |
54a611b6 LH |
5561 | |
5562 | /* | |
5563 | * When using mas_for_each() to insert an expected number of elements, | |
5564 | * it is possible that the number inserted is less than the expected | |
5565 | * number. To fix an invalid final node, a check is performed here to | |
5566 | * rebalance the previous node with the final node. | |
5567 | */ | |
5568 | if (mas->mas_flags & MA_STATE_REBALANCE) { | |
5569 | unsigned char end; | |
5d659bbb SK |
5570 | if (mas_is_err(mas)) |
5571 | mas_reset(mas); | |
23e734ec | 5572 | mas_start(mas); |
54a611b6 | 5573 | mtree_range_walk(mas); |
1f41ef12 | 5574 | end = mas->end + 1; |
54a611b6 LH |
5575 | if (end < mt_min_slot_count(mas->node) - 1) |
5576 | mas_destroy_rebalance(mas, end); | |
5577 | ||
5578 | mas->mas_flags &= ~MA_STATE_REBALANCE; | |
5579 | } | |
5580 | mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC); | |
5581 | ||
541e06b7 LH |
5582 | total = mas_allocated(mas); |
5583 | while (total) { | |
54a611b6 LH |
5584 | node = mas->alloc; |
5585 | mas->alloc = node->slot[0]; | |
541e06b7 LH |
5586 | if (node->node_count > 1) { |
5587 | size_t count = node->node_count - 1; | |
5588 | ||
5589 | mt_free_bulk(count, (void __rcu **)&node->slot[1]); | |
5590 | total -= count; | |
5591 | } | |
4f2267b5 | 5592 | mt_free_one(ma_mnode_ptr(node)); |
541e06b7 | 5593 | total--; |
54a611b6 | 5594 | } |
541e06b7 | 5595 | |
54a611b6 LH |
5596 | mas->alloc = NULL; |
5597 | } | |
120b1162 | 5598 | EXPORT_SYMBOL_GPL(mas_destroy); |
54a611b6 LH |
5599 | |
5600 | /* | |
5601 | * mas_expected_entries() - Set the expected number of entries that will be inserted. | |
5602 | * @mas: The maple state | |
5603 | * @nr_entries: The number of expected entries. | |
5604 | * | |
5605 | * This will attempt to pre-allocate enough nodes to store the expected number | |
5606 | * of entries. The allocations will occur using the bulk allocator interface | |
5607 | * for speed. Please call mas_destroy() on the @mas after inserting the entries | |
5608 | * to ensure any unused nodes are freed. | |
5609 | * | |
5610 | * Return: 0 on success, -ENOMEM if memory could not be allocated. | |
5611 | */ | |
5612 | int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) | |
5613 | { | |
5614 | int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2; | |
5615 | struct maple_enode *enode = mas->node; | |
5616 | int nr_nodes; | |
5617 | int ret; | |
5618 | ||
5619 | /* | |
5620 | * Sometimes it is necessary to duplicate a tree to a new tree, such as | |
5621 | * forking a process and duplicating the VMAs from one tree to a new | |
5622 | * tree. When such a situation arises, it is known that the new tree is | |
5623 | * not going to be used until the entire tree is populated. For | |
5624 | * performance reasons, it is best to use a bulk load with RCU disabled. | |
5625 | * This allows for optimistic splitting that favours the left and reuse | |
5626 | * of nodes during the operation. | |
5627 | */ | |
5628 | ||
5629 | /* Optimize splitting for bulk insert in-order */ | |
5630 | mas->mas_flags |= MA_STATE_BULK; | |
5631 | ||
5632 | /* | |
5633 | * Avoid overflow, assume a gap between each entry and a trailing null. | |
5634 | * If this is wrong, it just means allocation can happen during | |
5635 | * insertion of entries. | |
5636 | */ | |
5637 | nr_nodes = max(nr_entries, nr_entries * 2 + 1); | |
5638 | if (!mt_is_alloc(mas->tree)) | |
5639 | nonleaf_cap = MAPLE_RANGE64_SLOTS - 2; | |
5640 | ||
5641 | /* Leaves; reduce slots to keep space for expansion */ | |
5642 | nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2); | |
5643 | /* Internal nodes */ | |
5644 | nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap); | |
5645 | /* Add working room for split (2 nodes) + new parents */ | |
099d7439 | 5646 | mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL); |
54a611b6 LH |
5647 | |
5648 | /* Detect if allocations run out */ | |
5649 | mas->mas_flags |= MA_STATE_PREALLOC; | |
5650 | ||
5651 | if (!mas_is_err(mas)) | |
5652 | return 0; | |
5653 | ||
5654 | ret = xa_err(mas->node); | |
5655 | mas->node = enode; | |
5656 | mas_destroy(mas); | |
5657 | return ret; | |
5658 | ||
5659 | } | |
120b1162 | 5660 | EXPORT_SYMBOL_GPL(mas_expected_entries); |
54a611b6 | 5661 | |
271f61a8 | 5662 | static bool mas_next_setup(struct ma_state *mas, unsigned long max, |
6169b553 | 5663 | void **entry) |
54a611b6 | 5664 | { |
ca80f610 LH |
5665 | bool was_none = mas_is_none(mas); |
5666 | ||
a8091f03 | 5667 | if (unlikely(mas->last >= max)) { |
067311d3 | 5668 | mas->status = ma_overflow; |
a8091f03 LH |
5669 | return true; |
5670 | } | |
5671 | ||
067311d3 LH |
5672 | switch (mas->status) { |
5673 | case ma_active: | |
a8091f03 | 5674 | return false; |
067311d3 LH |
5675 | case ma_none: |
5676 | fallthrough; | |
5677 | case ma_pause: | |
5678 | mas->status = ma_start; | |
5679 | fallthrough; | |
5680 | case ma_start: | |
5681 | mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ | |
5682 | break; | |
5683 | case ma_overflow: | |
a8091f03 | 5684 | /* Overflowed before, but the max changed */ |
067311d3 LH |
5685 | mas->status = ma_active; |
5686 | break; | |
5687 | case ma_underflow: | |
5688 | /* The user expects the mas to be one before where it is */ | |
5689 | mas->status = ma_active; | |
a8091f03 LH |
5690 | *entry = mas_walk(mas); |
5691 | if (*entry) | |
5692 | return true; | |
067311d3 LH |
5693 | break; |
5694 | case ma_root: | |
5695 | break; | |
5696 | case ma_error: | |
5697 | return true; | |
a8091f03 | 5698 | } |
54a611b6 | 5699 | |
067311d3 LH |
5700 | if (likely(mas_is_active(mas))) /* Fast path */ |
5701 | return false; | |
54a611b6 LH |
5702 | |
5703 | if (mas_is_ptr(mas)) { | |
6169b553 | 5704 | *entry = NULL; |
ca80f610 LH |
5705 | if (was_none && mas->index == 0) { |
5706 | mas->index = mas->last = 0; | |
6169b553 | 5707 | return true; |
54a611b6 | 5708 | } |
ca80f610 LH |
5709 | mas->index = 1; |
5710 | mas->last = ULONG_MAX; | |
067311d3 | 5711 | mas->status = ma_none; |
6169b553 | 5712 | return true; |
54a611b6 LH |
5713 | } |
5714 | ||
6169b553 LH |
5715 | if (mas_is_none(mas)) |
5716 | return true; | |
a8091f03 | 5717 | |
6169b553 LH |
5718 | return false; |
5719 | } | |
5720 | ||
5721 | /** | |
5722 | * mas_next() - Get the next entry. | |
5723 | * @mas: The maple state | |
5724 | * @max: The maximum index to check. | |
5725 | * | |
5726 | * Returns the next entry after @mas->index. | |
5727 | * Must hold rcu_read_lock or the write lock. | |
5728 | * Can return the zero entry. | |
5729 | * | |
5730 | * Return: The next entry or %NULL | |
5731 | */ | |
5732 | void *mas_next(struct ma_state *mas, unsigned long max) | |
5733 | { | |
5734 | void *entry = NULL; | |
5735 | ||
5736 | if (mas_next_setup(mas, max, &entry)) | |
5737 | return entry; | |
5738 | ||
5739 | /* Retries on dead nodes handled by mas_next_slot */ | |
067311d3 | 5740 | return mas_next_slot(mas, max, false); |
54a611b6 LH |
5741 | } |
5742 | EXPORT_SYMBOL_GPL(mas_next); | |
5743 | ||
6169b553 LH |
5744 | /** |
5745 | * mas_next_range() - Advance the maple state to the next range | |
5746 | * @mas: The maple state | |
5747 | * @max: The maximum index to check. | |
5748 | * | |
5749 | * Sets @mas->index and @mas->last to the range. | |
5750 | * Must hold rcu_read_lock or the write lock. | |
5751 | * Can return the zero entry. | |
5752 | * | |
5753 | * Return: The next entry or %NULL | |
5754 | */ | |
5755 | void *mas_next_range(struct ma_state *mas, unsigned long max) | |
5756 | { | |
5757 | void *entry = NULL; | |
5758 | ||
5759 | if (mas_next_setup(mas, max, &entry)) | |
5760 | return entry; | |
5761 | ||
5762 | /* Retries on dead nodes handled by mas_next_slot */ | |
067311d3 | 5763 | return mas_next_slot(mas, max, true); |
6169b553 LH |
5764 | } |
5765 | EXPORT_SYMBOL_GPL(mas_next_range); | |
5766 | ||
54a611b6 LH |
5767 | /** |
5768 | * mt_next() - get the next value in the maple tree | |
5769 | * @mt: The maple tree | |
5770 | * @index: The start index | |
5771 | * @max: The maximum index to check | |
5772 | * | |
fad9c80e TG |
5773 | * Takes RCU read lock internally to protect the search, which does not |
5774 | * protect the returned pointer after dropping RCU read lock. | |
5775 | * See also: Documentation/core-api/maple_tree.rst | |
5776 | * | |
5777 | * Return: The entry higher than @index or %NULL if nothing is found. | |
54a611b6 LH |
5778 | */ |
5779 | void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max) | |
5780 | { | |
5781 | void *entry = NULL; | |
5782 | MA_STATE(mas, mt, index, index); | |
5783 | ||
5784 | rcu_read_lock(); | |
5785 | entry = mas_next(&mas, max); | |
5786 | rcu_read_unlock(); | |
5787 | return entry; | |
5788 | } | |
5789 | EXPORT_SYMBOL_GPL(mt_next); | |
5790 | ||
271f61a8 | 5791 | static bool mas_prev_setup(struct ma_state *mas, unsigned long min, void **entry) |
54a611b6 | 5792 | { |
a8091f03 | 5793 | if (unlikely(mas->index <= min)) { |
067311d3 | 5794 | mas->status = ma_underflow; |
a8091f03 LH |
5795 | return true; |
5796 | } | |
54a611b6 | 5797 | |
067311d3 LH |
5798 | switch (mas->status) { |
5799 | case ma_active: | |
a8091f03 | 5800 | return false; |
067311d3 LH |
5801 | case ma_start: |
5802 | break; | |
5803 | case ma_none: | |
5804 | fallthrough; | |
5805 | case ma_pause: | |
5806 | mas->status = ma_start; | |
5807 | break; | |
5808 | case ma_underflow: | |
5809 | /* underflowed before but the min changed */ | |
5810 | mas->status = ma_active; | |
5811 | break; | |
5812 | case ma_overflow: | |
5813 | /* User expects mas to be one after where it is */ | |
5814 | mas->status = ma_active; | |
a8091f03 LH |
5815 | *entry = mas_walk(mas); |
5816 | if (*entry) | |
5817 | return true; | |
067311d3 LH |
5818 | break; |
5819 | case ma_root: | |
5820 | break; | |
5821 | case ma_error: | |
5822 | return true; | |
54a611b6 LH |
5823 | } |
5824 | ||
a8091f03 LH |
5825 | if (mas_is_start(mas)) |
5826 | mas_walk(mas); | |
5827 | ||
39193685 | 5828 | if (unlikely(mas_is_ptr(mas))) { |
067311d3 LH |
5829 | if (!mas->index) { |
5830 | mas->status = ma_none; | |
5831 | return true; | |
5832 | } | |
54a611b6 | 5833 | mas->index = mas->last = 0; |
6b9e93e0 LH |
5834 | *entry = mas_root(mas); |
5835 | return true; | |
39193685 LH |
5836 | } |
5837 | ||
5838 | if (mas_is_none(mas)) { | |
5839 | if (mas->index) { | |
5840 | /* Walked to out-of-range pointer? */ | |
5841 | mas->index = mas->last = 0; | |
067311d3 | 5842 | mas->status = ma_root; |
6b9e93e0 LH |
5843 | *entry = mas_root(mas); |
5844 | return true; | |
39193685 | 5845 | } |
6b9e93e0 | 5846 | return true; |
54a611b6 | 5847 | } |
6b9e93e0 LH |
5848 | |
5849 | return false; | |
6b9e93e0 LH |
5850 | } |
5851 | ||
5852 | /** | |
5853 | * mas_prev() - Get the previous entry | |
5854 | * @mas: The maple state | |
5855 | * @min: The minimum value to check. | |
5856 | * | |
5857 | * Must hold rcu_read_lock or the write lock. | |
067311d3 | 5858 | * Will reset mas to ma_start if the status is ma_none. Will stop on not |
6b9e93e0 LH |
5859 | * searchable nodes. |
5860 | * | |
5861 | * Return: the previous value or %NULL. | |
5862 | */ | |
5863 | void *mas_prev(struct ma_state *mas, unsigned long min) | |
5864 | { | |
5865 | void *entry = NULL; | |
5866 | ||
5867 | if (mas_prev_setup(mas, min, &entry)) | |
5868 | return entry; | |
5869 | ||
067311d3 | 5870 | return mas_prev_slot(mas, min, false); |
54a611b6 LH |
5871 | } |
5872 | EXPORT_SYMBOL_GPL(mas_prev); | |
5873 | ||
6b9e93e0 LH |
5874 | /** |
5875 | * mas_prev_range() - Advance to the previous range | |
5876 | * @mas: The maple state | |
5877 | * @min: The minimum value to check. | |
5878 | * | |
5879 | * Sets @mas->index and @mas->last to the range. | |
5880 | * Must hold rcu_read_lock or the write lock. | |
067311d3 | 5881 | * Will reset mas to ma_start if the node is ma_none. Will stop on not |
6b9e93e0 LH |
5882 | * searchable nodes. |
5883 | * | |
5884 | * Return: the previous value or %NULL. | |
5885 | */ | |
5886 | void *mas_prev_range(struct ma_state *mas, unsigned long min) | |
5887 | { | |
5888 | void *entry = NULL; | |
5889 | ||
5890 | if (mas_prev_setup(mas, min, &entry)) | |
5891 | return entry; | |
5892 | ||
067311d3 | 5893 | return mas_prev_slot(mas, min, true); |
6b9e93e0 LH |
5894 | } |
5895 | EXPORT_SYMBOL_GPL(mas_prev_range); | |
5896 | ||
54a611b6 LH |
5897 | /** |
5898 | * mt_prev() - get the previous value in the maple tree | |
5899 | * @mt: The maple tree | |
5900 | * @index: The start index | |
5901 | * @min: The minimum index to check | |
5902 | * | |
fad9c80e TG |
5903 | * Takes RCU read lock internally to protect the search, which does not |
5904 | * protect the returned pointer after dropping RCU read lock. | |
5905 | * See also: Documentation/core-api/maple_tree.rst | |
5906 | * | |
5907 | * Return: The entry before @index or %NULL if nothing is found. | |
54a611b6 LH |
5908 | */ |
5909 | void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min) | |
5910 | { | |
5911 | void *entry = NULL; | |
5912 | MA_STATE(mas, mt, index, index); | |
5913 | ||
5914 | rcu_read_lock(); | |
5915 | entry = mas_prev(&mas, min); | |
5916 | rcu_read_unlock(); | |
5917 | return entry; | |
5918 | } | |
5919 | EXPORT_SYMBOL_GPL(mt_prev); | |
5920 | ||
5921 | /** | |
5922 | * mas_pause() - Pause a mas_find/mas_for_each to drop the lock. | |
5923 | * @mas: The maple state to pause | |
5924 | * | |
5925 | * Some users need to pause a walk and drop the lock they're holding in | |
5926 | * order to yield to a higher priority thread or carry out an operation | |
5927 | * on an entry. Those users should call this function before they drop | |
5928 | * the lock. It resets the @mas to be suitable for the next iteration | |
5929 | * of the loop after the user has reacquired the lock. If most entries | |
5930 | * found during a walk require you to call mas_pause(), the mt_for_each() | |
5931 | * iterator may be more appropriate. | |
5932 | * | |
5933 | */ | |
5934 | void mas_pause(struct ma_state *mas) | |
5935 | { | |
067311d3 LH |
5936 | mas->status = ma_pause; |
5937 | mas->node = NULL; | |
54a611b6 LH |
5938 | } |
5939 | EXPORT_SYMBOL_GPL(mas_pause); | |
5940 | ||
5941 | /** | |
6169b553 | 5942 | * mas_find_setup() - Internal function to set up mas_find*(). |
54a611b6 | 5943 | * @mas: The maple state |
6169b553 LH |
5944 | * @max: The maximum index |
5945 | * @entry: Pointer to the entry | |
54a611b6 | 5946 | * |
6169b553 | 5947 | * Returns: True if entry is the answer, false otherwise. |
54a611b6 | 5948 | */ |
271f61a8 | 5949 | static __always_inline bool mas_find_setup(struct ma_state *mas, unsigned long max, void **entry) |
54a611b6 | 5950 | { |
067311d3 LH |
5951 | switch (mas->status) { |
5952 | case ma_active: | |
a8091f03 LH |
5953 | if (mas->last < max) |
5954 | return false; | |
a8091f03 | 5955 | return true; |
067311d3 LH |
5956 | case ma_start: |
5957 | break; | |
5958 | case ma_pause: | |
ca80f610 | 5959 | if (unlikely(mas->last >= max)) |
6169b553 | 5960 | return true; |
ca80f610 | 5961 | |
a8091f03 | 5962 | mas->index = ++mas->last; |
067311d3 LH |
5963 | mas->status = ma_start; |
5964 | break; | |
5965 | case ma_none: | |
ca80f610 | 5966 | if (unlikely(mas->last >= max)) |
6169b553 | 5967 | return true; |
ca80f610 | 5968 | |
a8091f03 | 5969 | mas->index = mas->last; |
067311d3 LH |
5970 | mas->status = ma_start; |
5971 | break; | |
5972 | case ma_underflow: | |
5973 | /* mas is pointing at entry before unable to go lower */ | |
5974 | if (unlikely(mas->index >= max)) { | |
5975 | mas->status = ma_overflow; | |
a8091f03 LH |
5976 | return true; |
5977 | } | |
17dc622c | 5978 | |
067311d3 LH |
5979 | mas->status = ma_active; |
5980 | *entry = mas_walk(mas); | |
5981 | if (*entry) | |
5982 | return true; | |
5983 | break; | |
5984 | case ma_overflow: | |
5985 | if (unlikely(mas->last >= max)) | |
5986 | return true; | |
5987 | ||
5988 | mas->status = ma_active; | |
5989 | *entry = mas_walk(mas); | |
5990 | if (*entry) | |
5991 | return true; | |
5992 | break; | |
5993 | case ma_root: | |
5994 | break; | |
5995 | case ma_error: | |
5996 | return true; | |
a8091f03 LH |
5997 | } |
5998 | ||
5999 | if (mas_is_start(mas)) { | |
54a611b6 | 6000 | /* First run or continue */ |
54a611b6 | 6001 | if (mas->index > max) |
6169b553 | 6002 | return true; |
54a611b6 | 6003 | |
6169b553 LH |
6004 | *entry = mas_walk(mas); |
6005 | if (*entry) | |
6006 | return true; | |
ca80f610 | 6007 | |
54a611b6 LH |
6008 | } |
6009 | ||
9a40d45c LH |
6010 | if (unlikely(mas_is_ptr(mas))) |
6011 | goto ptr_out_of_range; | |
ca80f610 | 6012 | |
9a40d45c | 6013 | if (unlikely(mas_is_none(mas))) |
6169b553 | 6014 | return true; |
ca80f610 LH |
6015 | |
6016 | if (mas->index == max) | |
6169b553 | 6017 | return true; |
54a611b6 | 6018 | |
6169b553 | 6019 | return false; |
ca80f610 LH |
6020 | |
6021 | ptr_out_of_range: | |
067311d3 | 6022 | mas->status = ma_none; |
ca80f610 LH |
6023 | mas->index = 1; |
6024 | mas->last = ULONG_MAX; | |
6169b553 LH |
6025 | return true; |
6026 | } | |
6027 | ||
6028 | /** | |
6029 | * mas_find() - On the first call, find the entry at or after mas->index up to | |
6030 | * %max. Otherwise, find the entry after mas->index. | |
6031 | * @mas: The maple state | |
6032 | * @max: The maximum value to check. | |
6033 | * | |
6034 | * Must hold rcu_read_lock or the write lock. | |
6035 | * If an entry exists, last and index are updated accordingly. | |
067311d3 | 6036 | * May set @mas->status to ma_overflow. |
6169b553 LH |
6037 | * |
6038 | * Return: The entry or %NULL. | |
6039 | */ | |
6040 | void *mas_find(struct ma_state *mas, unsigned long max) | |
6041 | { | |
6042 | void *entry = NULL; | |
6043 | ||
6044 | if (mas_find_setup(mas, max, &entry)) | |
6045 | return entry; | |
6046 | ||
6047 | /* Retries on dead nodes handled by mas_next_slot */ | |
067311d3 LH |
6048 | entry = mas_next_slot(mas, max, false); |
6049 | /* Ignore overflow */ | |
6050 | mas->status = ma_active; | |
6051 | return entry; | |
54a611b6 | 6052 | } |
120b1162 | 6053 | EXPORT_SYMBOL_GPL(mas_find); |
54a611b6 | 6054 | |
6169b553 LH |
6055 | /** |
6056 | * mas_find_range() - On the first call, find the entry at or after | |
6057 | * mas->index up to %max. Otherwise, advance to the next slot mas->index. | |
6058 | * @mas: The maple state | |
6059 | * @max: The maximum value to check. | |
6060 | * | |
6061 | * Must hold rcu_read_lock or the write lock. | |
6062 | * If an entry exists, last and index are updated accordingly. | |
067311d3 | 6063 | * May set @mas->status to ma_overflow. |
6169b553 LH |
6064 | * |
6065 | * Return: The entry or %NULL. | |
6066 | */ | |
6067 | void *mas_find_range(struct ma_state *mas, unsigned long max) | |
6068 | { | |
a8091f03 | 6069 | void *entry = NULL; |
6169b553 LH |
6070 | |
6071 | if (mas_find_setup(mas, max, &entry)) | |
6072 | return entry; | |
6073 | ||
6074 | /* Retries on dead nodes handled by mas_next_slot */ | |
067311d3 | 6075 | return mas_next_slot(mas, max, true); |
6169b553 LH |
6076 | } |
6077 | EXPORT_SYMBOL_GPL(mas_find_range); | |
6078 | ||
54a611b6 | 6079 | /** |
6b9e93e0 | 6080 | * mas_find_rev_setup() - Internal function to set up mas_find_*_rev() |
54a611b6 | 6081 | * @mas: The maple state |
6b9e93e0 LH |
6082 | * @min: The minimum index |
6083 | * @entry: Pointer to the entry | |
54a611b6 | 6084 | * |
6b9e93e0 | 6085 | * Returns: True if entry is the answer, false otherwise. |
54a611b6 | 6086 | */ |
271f61a8 | 6087 | static bool mas_find_rev_setup(struct ma_state *mas, unsigned long min, |
6b9e93e0 | 6088 | void **entry) |
54a611b6 | 6089 | { |
39193685 | 6090 | |
067311d3 LH |
6091 | switch (mas->status) { |
6092 | case ma_active: | |
6093 | goto active; | |
6094 | case ma_start: | |
6095 | break; | |
6096 | case ma_pause: | |
39193685 | 6097 | if (unlikely(mas->index <= min)) { |
067311d3 | 6098 | mas->status = ma_underflow; |
6b9e93e0 | 6099 | return true; |
54a611b6 | 6100 | } |
54a611b6 | 6101 | mas->last = --mas->index; |
067311d3 LH |
6102 | mas->status = ma_start; |
6103 | break; | |
6104 | case ma_none: | |
a8091f03 LH |
6105 | if (mas->index <= min) |
6106 | goto none; | |
6107 | ||
6108 | mas->last = mas->index; | |
067311d3 LH |
6109 | mas->status = ma_start; |
6110 | break; | |
6111 | case ma_overflow: /* user expects the mas to be one after where it is */ | |
6112 | if (unlikely(mas->index <= min)) { | |
6113 | mas->status = ma_underflow; | |
a8091f03 LH |
6114 | return true; |
6115 | } | |
6116 | ||
067311d3 LH |
6117 | mas->status = ma_active; |
6118 | break; | |
6119 | case ma_underflow: /* user expects the mas to be one before where it is */ | |
6120 | if (unlikely(mas->index <= min)) | |
6121 | return true; | |
6122 | ||
6123 | mas->status = ma_active; | |
6124 | break; | |
6125 | case ma_root: | |
6126 | break; | |
6127 | case ma_error: | |
6128 | return true; | |
54a611b6 LH |
6129 | } |
6130 | ||
a8091f03 | 6131 | if (mas_is_start(mas)) { |
54a611b6 | 6132 | /* First run or continue */ |
54a611b6 | 6133 | if (mas->index < min) |
6b9e93e0 | 6134 | return true; |
54a611b6 | 6135 | |
6b9e93e0 LH |
6136 | *entry = mas_walk(mas); |
6137 | if (*entry) | |
6138 | return true; | |
54a611b6 LH |
6139 | } |
6140 | ||
9a40d45c LH |
6141 | if (unlikely(mas_is_ptr(mas))) |
6142 | goto none; | |
39193685 | 6143 | |
9a40d45c LH |
6144 | if (unlikely(mas_is_none(mas))) { |
6145 | /* | |
6146 | * Walked to the location, and there was nothing so the previous | |
6147 | * location is 0. | |
6148 | */ | |
6149 | mas->last = mas->index = 0; | |
6150 | mas->status = ma_root; | |
6151 | *entry = mas_root(mas); | |
6152 | return true; | |
39193685 | 6153 | } |
54a611b6 | 6154 | |
067311d3 | 6155 | active: |
54a611b6 | 6156 | if (mas->index < min) |
6b9e93e0 | 6157 | return true; |
54a611b6 | 6158 | |
6b9e93e0 | 6159 | return false; |
39193685 LH |
6160 | |
6161 | none: | |
067311d3 | 6162 | mas->status = ma_none; |
6b9e93e0 LH |
6163 | return true; |
6164 | } | |
6165 | ||
6166 | /** | |
6167 | * mas_find_rev: On the first call, find the first non-null entry at or below | |
6168 | * mas->index down to %min. Otherwise find the first non-null entry below | |
6169 | * mas->index down to %min. | |
6170 | * @mas: The maple state | |
6171 | * @min: The minimum value to check. | |
6172 | * | |
6173 | * Must hold rcu_read_lock or the write lock. | |
6174 | * If an entry exists, last and index are updated accordingly. | |
067311d3 | 6175 | * May set @mas->status to ma_underflow. |
6b9e93e0 LH |
6176 | * |
6177 | * Return: The entry or %NULL. | |
6178 | */ | |
6179 | void *mas_find_rev(struct ma_state *mas, unsigned long min) | |
6180 | { | |
a8091f03 | 6181 | void *entry = NULL; |
6b9e93e0 LH |
6182 | |
6183 | if (mas_find_rev_setup(mas, min, &entry)) | |
6184 | return entry; | |
6185 | ||
6186 | /* Retries on dead nodes handled by mas_prev_slot */ | |
067311d3 | 6187 | return mas_prev_slot(mas, min, false); |
6b9e93e0 | 6188 | |
54a611b6 | 6189 | } |
120b1162 | 6190 | EXPORT_SYMBOL_GPL(mas_find_rev); |
54a611b6 | 6191 | |
6b9e93e0 LH |
6192 | /** |
6193 | * mas_find_range_rev: On the first call, find the first non-null entry at or | |
6194 | * below mas->index down to %min. Otherwise advance to the previous slot after | |
6195 | * mas->index down to %min. | |
6196 | * @mas: The maple state | |
6197 | * @min: The minimum value to check. | |
6198 | * | |
6199 | * Must hold rcu_read_lock or the write lock. | |
6200 | * If an entry exists, last and index are updated accordingly. | |
067311d3 | 6201 | * May set @mas->status to ma_underflow. |
6b9e93e0 LH |
6202 | * |
6203 | * Return: The entry or %NULL. | |
6204 | */ | |
6205 | void *mas_find_range_rev(struct ma_state *mas, unsigned long min) | |
6206 | { | |
a8091f03 | 6207 | void *entry = NULL; |
6b9e93e0 LH |
6208 | |
6209 | if (mas_find_rev_setup(mas, min, &entry)) | |
6210 | return entry; | |
6211 | ||
6212 | /* Retries on dead nodes handled by mas_prev_slot */ | |
067311d3 | 6213 | return mas_prev_slot(mas, min, true); |
6b9e93e0 LH |
6214 | } |
6215 | EXPORT_SYMBOL_GPL(mas_find_range_rev); | |
6216 | ||
54a611b6 LH |
6217 | /** |
6218 | * mas_erase() - Find the range in which index resides and erase the entire | |
6219 | * range. | |
6220 | * @mas: The maple state | |
6221 | * | |
6222 | * Must hold the write lock. | |
6223 | * Searches for @mas->index, sets @mas->index and @mas->last to the range and | |
6224 | * erases that range. | |
6225 | * | |
6226 | * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated. | |
6227 | */ | |
6228 | void *mas_erase(struct ma_state *mas) | |
6229 | { | |
6230 | void *entry; | |
e1b8b883 | 6231 | unsigned long index = mas->index; |
54a611b6 LH |
6232 | MA_WR_STATE(wr_mas, mas, NULL); |
6233 | ||
f7a59018 | 6234 | if (!mas_is_active(mas) || !mas_is_start(mas)) |
067311d3 | 6235 | mas->status = ma_start; |
54a611b6 | 6236 | |
e1b8b883 | 6237 | write_retry: |
54a611b6 LH |
6238 | entry = mas_state_walk(mas); |
6239 | if (!entry) | |
6240 | return NULL; | |
6241 | ||
54a611b6 LH |
6242 | /* Must reset to ensure spanning writes of last slot are detected */ |
6243 | mas_reset(mas); | |
7e093834 | 6244 | mas_wr_preallocate(&wr_mas, NULL); |
e1b8b883 SK |
6245 | if (mas_nomem(mas, GFP_KERNEL)) { |
6246 | /* in case the range of entry changed when unlocked */ | |
6247 | mas->index = mas->last = index; | |
54a611b6 | 6248 | goto write_retry; |
e1b8b883 | 6249 | } |
54a611b6 | 6250 | |
7e093834 SK |
6251 | if (mas_is_err(mas)) |
6252 | goto out; | |
6253 | ||
6254 | mas_wr_store_entry(&wr_mas); | |
6255 | out: | |
3cd9e92e | 6256 | mas_destroy(mas); |
54a611b6 LH |
6257 | return entry; |
6258 | } | |
6259 | EXPORT_SYMBOL_GPL(mas_erase); | |
6260 | ||
6261 | /** | |
6262 | * mas_nomem() - Check if there was an error allocating and do the allocation | |
6263 | * if necessary If there are allocations, then free them. | |
6264 | * @mas: The maple state | |
6265 | * @gfp: The GFP_FLAGS to use for allocations | |
6266 | * Return: true on allocation, false otherwise. | |
6267 | */ | |
6268 | bool mas_nomem(struct ma_state *mas, gfp_t gfp) | |
14c4b5ab | 6269 | __must_hold(mas->tree->ma_lock) |
54a611b6 | 6270 | { |
3cd9e92e | 6271 | if (likely(mas->node != MA_ERROR(-ENOMEM))) |
54a611b6 | 6272 | return false; |
54a611b6 LH |
6273 | |
6274 | if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) { | |
6275 | mtree_unlock(mas->tree); | |
6276 | mas_alloc_nodes(mas, gfp); | |
6277 | mtree_lock(mas->tree); | |
6278 | } else { | |
6279 | mas_alloc_nodes(mas, gfp); | |
6280 | } | |
6281 | ||
6282 | if (!mas_allocated(mas)) | |
6283 | return false; | |
6284 | ||
067311d3 | 6285 | mas->status = ma_start; |
54a611b6 LH |
6286 | return true; |
6287 | } | |
6288 | ||
6289 | void __init maple_tree_init(void) | |
6290 | { | |
6291 | maple_node_cache = kmem_cache_create("maple_node", | |
6292 | sizeof(struct maple_node), sizeof(struct maple_node), | |
6293 | SLAB_PANIC, NULL); | |
6294 | } | |
6295 | ||
6296 | /** | |
6297 | * mtree_load() - Load a value stored in a maple tree | |
6298 | * @mt: The maple tree | |
6299 | * @index: The index to load | |
6300 | * | |
6301 | * Return: the entry or %NULL | |
6302 | */ | |
6303 | void *mtree_load(struct maple_tree *mt, unsigned long index) | |
6304 | { | |
6305 | MA_STATE(mas, mt, index, index); | |
6306 | void *entry; | |
6307 | ||
6308 | trace_ma_read(__func__, &mas); | |
6309 | rcu_read_lock(); | |
6310 | retry: | |
6311 | entry = mas_start(&mas); | |
6312 | if (unlikely(mas_is_none(&mas))) | |
6313 | goto unlock; | |
6314 | ||
6315 | if (unlikely(mas_is_ptr(&mas))) { | |
6316 | if (index) | |
6317 | entry = NULL; | |
6318 | ||
6319 | goto unlock; | |
6320 | } | |
6321 | ||
6322 | entry = mtree_lookup_walk(&mas); | |
6323 | if (!entry && unlikely(mas_is_start(&mas))) | |
6324 | goto retry; | |
6325 | unlock: | |
6326 | rcu_read_unlock(); | |
6327 | if (xa_is_zero(entry)) | |
6328 | return NULL; | |
6329 | ||
6330 | return entry; | |
6331 | } | |
6332 | EXPORT_SYMBOL(mtree_load); | |
6333 | ||
6334 | /** | |
6335 | * mtree_store_range() - Store an entry at a given range. | |
6336 | * @mt: The maple tree | |
6337 | * @index: The start of the range | |
6338 | * @last: The end of the range | |
6339 | * @entry: The entry to store | |
6340 | * @gfp: The GFP_FLAGS to use for allocations | |
6341 | * | |
6342 | * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not | |
6343 | * be allocated. | |
6344 | */ | |
6345 | int mtree_store_range(struct maple_tree *mt, unsigned long index, | |
6346 | unsigned long last, void *entry, gfp_t gfp) | |
6347 | { | |
6348 | MA_STATE(mas, mt, index, last); | |
3cd9e92e | 6349 | int ret = 0; |
54a611b6 LH |
6350 | |
6351 | trace_ma_write(__func__, &mas, 0, entry); | |
6352 | if (WARN_ON_ONCE(xa_is_advanced(entry))) | |
6353 | return -EINVAL; | |
6354 | ||
6355 | if (index > last) | |
6356 | return -EINVAL; | |
6357 | ||
6358 | mtree_lock(mt); | |
85db8f24 | 6359 | ret = mas_store_gfp(&mas, entry, gfp); |
54a611b6 | 6360 | mtree_unlock(mt); |
54a611b6 | 6361 | |
3cd9e92e | 6362 | return ret; |
54a611b6 LH |
6363 | } |
6364 | EXPORT_SYMBOL(mtree_store_range); | |
6365 | ||
6366 | /** | |
6367 | * mtree_store() - Store an entry at a given index. | |
6368 | * @mt: The maple tree | |
6369 | * @index: The index to store the value | |
6370 | * @entry: The entry to store | |
6371 | * @gfp: The GFP_FLAGS to use for allocations | |
6372 | * | |
6373 | * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not | |
6374 | * be allocated. | |
6375 | */ | |
6376 | int mtree_store(struct maple_tree *mt, unsigned long index, void *entry, | |
6377 | gfp_t gfp) | |
6378 | { | |
6379 | return mtree_store_range(mt, index, index, entry, gfp); | |
6380 | } | |
6381 | EXPORT_SYMBOL(mtree_store); | |
6382 | ||
6383 | /** | |
4445e582 | 6384 | * mtree_insert_range() - Insert an entry at a given range if there is no value. |
54a611b6 LH |
6385 | * @mt: The maple tree |
6386 | * @first: The start of the range | |
6387 | * @last: The end of the range | |
6388 | * @entry: The entry to store | |
6389 | * @gfp: The GFP_FLAGS to use for allocations. | |
6390 | * | |
6391 | * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid | |
6392 | * request, -ENOMEM if memory could not be allocated. | |
6393 | */ | |
6394 | int mtree_insert_range(struct maple_tree *mt, unsigned long first, | |
6395 | unsigned long last, void *entry, gfp_t gfp) | |
6396 | { | |
6397 | MA_STATE(ms, mt, first, last); | |
3cd9e92e | 6398 | int ret = 0; |
54a611b6 LH |
6399 | |
6400 | if (WARN_ON_ONCE(xa_is_advanced(entry))) | |
6401 | return -EINVAL; | |
6402 | ||
6403 | if (first > last) | |
6404 | return -EINVAL; | |
6405 | ||
6406 | mtree_lock(mt); | |
6407 | retry: | |
6408 | mas_insert(&ms, entry); | |
6409 | if (mas_nomem(&ms, gfp)) | |
6410 | goto retry; | |
6411 | ||
6412 | mtree_unlock(mt); | |
6413 | if (mas_is_err(&ms)) | |
3cd9e92e | 6414 | ret = xa_err(ms.node); |
54a611b6 | 6415 | |
3cd9e92e SK |
6416 | mas_destroy(&ms); |
6417 | return ret; | |
54a611b6 LH |
6418 | } |
6419 | EXPORT_SYMBOL(mtree_insert_range); | |
6420 | ||
6421 | /** | |
4445e582 | 6422 | * mtree_insert() - Insert an entry at a given index if there is no value. |
54a611b6 LH |
6423 | * @mt: The maple tree |
6424 | * @index : The index to store the value | |
6425 | * @entry: The entry to store | |
4ae6944d | 6426 | * @gfp: The GFP_FLAGS to use for allocations. |
54a611b6 LH |
6427 | * |
6428 | * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid | |
6429 | * request, -ENOMEM if memory could not be allocated. | |
6430 | */ | |
6431 | int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry, | |
6432 | gfp_t gfp) | |
6433 | { | |
6434 | return mtree_insert_range(mt, index, index, entry, gfp); | |
6435 | } | |
6436 | EXPORT_SYMBOL(mtree_insert); | |
6437 | ||
6438 | int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, | |
6439 | void *entry, unsigned long size, unsigned long min, | |
6440 | unsigned long max, gfp_t gfp) | |
6441 | { | |
6442 | int ret = 0; | |
6443 | ||
52371677 | 6444 | MA_STATE(mas, mt, 0, 0); |
54a611b6 LH |
6445 | if (!mt_is_alloc(mt)) |
6446 | return -EINVAL; | |
6447 | ||
6448 | if (WARN_ON_ONCE(mt_is_reserved(entry))) | |
6449 | return -EINVAL; | |
6450 | ||
54a611b6 LH |
6451 | mtree_lock(mt); |
6452 | retry: | |
52371677 PZ |
6453 | ret = mas_empty_area(&mas, min, max, size); |
6454 | if (ret) | |
6455 | goto unlock; | |
6456 | ||
6457 | mas_insert(&mas, entry); | |
6458 | /* | |
6459 | * mas_nomem() may release the lock, causing the allocated area | |
6460 | * to be unavailable, so try to allocate a free area again. | |
6461 | */ | |
54a611b6 LH |
6462 | if (mas_nomem(&mas, gfp)) |
6463 | goto retry; | |
6464 | ||
52371677 PZ |
6465 | if (mas_is_err(&mas)) |
6466 | ret = xa_err(mas.node); | |
6467 | else | |
6468 | *startp = mas.index; | |
6469 | ||
6470 | unlock: | |
54a611b6 | 6471 | mtree_unlock(mt); |
3cd9e92e | 6472 | mas_destroy(&mas); |
54a611b6 LH |
6473 | return ret; |
6474 | } | |
6475 | EXPORT_SYMBOL(mtree_alloc_range); | |
6476 | ||
9b6713cc CL |
6477 | /** |
6478 | * mtree_alloc_cyclic() - Find somewhere to store this entry in the tree. | |
6479 | * @mt: The maple tree. | |
6480 | * @startp: Pointer to ID. | |
6481 | * @range_lo: Lower bound of range to search. | |
6482 | * @range_hi: Upper bound of range to search. | |
6483 | * @entry: The entry to store. | |
6484 | * @next: Pointer to next ID to allocate. | |
6485 | * @gfp: The GFP_FLAGS to use for allocations. | |
6486 | * | |
6487 | * Finds an empty entry in @mt after @next, stores the new index into | |
6488 | * the @id pointer, stores the entry at that index, then updates @next. | |
6489 | * | |
6490 | * @mt must be initialized with the MT_FLAGS_ALLOC_RANGE flag. | |
6491 | * | |
6492 | * Context: Any context. Takes and releases the mt.lock. May sleep if | |
6493 | * the @gfp flags permit. | |
6494 | * | |
6495 | * Return: 0 if the allocation succeeded without wrapping, 1 if the | |
6496 | * allocation succeeded after wrapping, -ENOMEM if memory could not be | |
6497 | * allocated, -EINVAL if @mt cannot be used, or -EBUSY if there are no | |
6498 | * free entries. | |
6499 | */ | |
6500 | int mtree_alloc_cyclic(struct maple_tree *mt, unsigned long *startp, | |
6501 | void *entry, unsigned long range_lo, unsigned long range_hi, | |
6502 | unsigned long *next, gfp_t gfp) | |
6503 | { | |
6504 | int ret; | |
6505 | ||
6506 | MA_STATE(mas, mt, 0, 0); | |
6507 | ||
6508 | if (!mt_is_alloc(mt)) | |
6509 | return -EINVAL; | |
6510 | if (WARN_ON_ONCE(mt_is_reserved(entry))) | |
6511 | return -EINVAL; | |
6512 | mtree_lock(mt); | |
6513 | ret = mas_alloc_cyclic(&mas, startp, entry, range_lo, range_hi, | |
6514 | next, gfp); | |
6515 | mtree_unlock(mt); | |
6516 | return ret; | |
6517 | } | |
6518 | EXPORT_SYMBOL(mtree_alloc_cyclic); | |
6519 | ||
54a611b6 LH |
6520 | int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, |
6521 | void *entry, unsigned long size, unsigned long min, | |
6522 | unsigned long max, gfp_t gfp) | |
6523 | { | |
6524 | int ret = 0; | |
6525 | ||
52371677 | 6526 | MA_STATE(mas, mt, 0, 0); |
54a611b6 LH |
6527 | if (!mt_is_alloc(mt)) |
6528 | return -EINVAL; | |
6529 | ||
6530 | if (WARN_ON_ONCE(mt_is_reserved(entry))) | |
6531 | return -EINVAL; | |
6532 | ||
54a611b6 LH |
6533 | mtree_lock(mt); |
6534 | retry: | |
52371677 PZ |
6535 | ret = mas_empty_area_rev(&mas, min, max, size); |
6536 | if (ret) | |
6537 | goto unlock; | |
6538 | ||
6539 | mas_insert(&mas, entry); | |
6540 | /* | |
6541 | * mas_nomem() may release the lock, causing the allocated area | |
6542 | * to be unavailable, so try to allocate a free area again. | |
6543 | */ | |
54a611b6 LH |
6544 | if (mas_nomem(&mas, gfp)) |
6545 | goto retry; | |
6546 | ||
52371677 PZ |
6547 | if (mas_is_err(&mas)) |
6548 | ret = xa_err(mas.node); | |
6549 | else | |
6550 | *startp = mas.index; | |
6551 | ||
6552 | unlock: | |
54a611b6 | 6553 | mtree_unlock(mt); |
3cd9e92e | 6554 | mas_destroy(&mas); |
54a611b6 LH |
6555 | return ret; |
6556 | } | |
6557 | EXPORT_SYMBOL(mtree_alloc_rrange); | |
6558 | ||
6559 | /** | |
6560 | * mtree_erase() - Find an index and erase the entire range. | |
6561 | * @mt: The maple tree | |
6562 | * @index: The index to erase | |
6563 | * | |
6564 | * Erasing is the same as a walk to an entry then a store of a NULL to that | |
6565 | * ENTIRE range. In fact, it is implemented as such using the advanced API. | |
6566 | * | |
6567 | * Return: The entry stored at the @index or %NULL | |
6568 | */ | |
6569 | void *mtree_erase(struct maple_tree *mt, unsigned long index) | |
6570 | { | |
6571 | void *entry = NULL; | |
6572 | ||
6573 | MA_STATE(mas, mt, index, index); | |
6574 | trace_ma_op(__func__, &mas); | |
6575 | ||
6576 | mtree_lock(mt); | |
6577 | entry = mas_erase(&mas); | |
6578 | mtree_unlock(mt); | |
6579 | ||
6580 | return entry; | |
6581 | } | |
6582 | EXPORT_SYMBOL(mtree_erase); | |
6583 | ||
fd32e4e9 PZ |
6584 | /* |
6585 | * mas_dup_free() - Free an incomplete duplication of a tree. | |
6586 | * @mas: The maple state of a incomplete tree. | |
6587 | * | |
6588 | * The parameter @mas->node passed in indicates that the allocation failed on | |
6589 | * this node. This function frees all nodes starting from @mas->node in the | |
6590 | * reverse order of mas_dup_build(). There is no need to hold the source tree | |
6591 | * lock at this time. | |
6592 | */ | |
6593 | static void mas_dup_free(struct ma_state *mas) | |
6594 | { | |
6595 | struct maple_node *node; | |
6596 | enum maple_type type; | |
6597 | void __rcu **slots; | |
6598 | unsigned char count, i; | |
6599 | ||
6600 | /* Maybe the first node allocation failed. */ | |
6601 | if (mas_is_none(mas)) | |
6602 | return; | |
6603 | ||
6604 | while (!mte_is_root(mas->node)) { | |
6605 | mas_ascend(mas); | |
6606 | if (mas->offset) { | |
6607 | mas->offset--; | |
6608 | do { | |
6609 | mas_descend(mas); | |
6610 | mas->offset = mas_data_end(mas); | |
6611 | } while (!mte_is_leaf(mas->node)); | |
6612 | ||
6613 | mas_ascend(mas); | |
6614 | } | |
6615 | ||
6616 | node = mte_to_node(mas->node); | |
6617 | type = mte_node_type(mas->node); | |
6618 | slots = ma_slots(node, type); | |
6619 | count = mas_data_end(mas) + 1; | |
6620 | for (i = 0; i < count; i++) | |
6621 | ((unsigned long *)slots)[i] &= ~MAPLE_NODE_MASK; | |
6622 | mt_free_bulk(count, slots); | |
6623 | } | |
6624 | ||
6625 | node = mte_to_node(mas->node); | |
6626 | mt_free_one(node); | |
6627 | } | |
6628 | ||
6629 | /* | |
6630 | * mas_copy_node() - Copy a maple node and replace the parent. | |
6631 | * @mas: The maple state of source tree. | |
6632 | * @new_mas: The maple state of new tree. | |
6633 | * @parent: The parent of the new node. | |
6634 | * | |
6635 | * Copy @mas->node to @new_mas->node, set @parent to be the parent of | |
6636 | * @new_mas->node. If memory allocation fails, @mas is set to -ENOMEM. | |
6637 | */ | |
6638 | static inline void mas_copy_node(struct ma_state *mas, struct ma_state *new_mas, | |
6639 | struct maple_pnode *parent) | |
6640 | { | |
6641 | struct maple_node *node = mte_to_node(mas->node); | |
6642 | struct maple_node *new_node = mte_to_node(new_mas->node); | |
6643 | unsigned long val; | |
6644 | ||
6645 | /* Copy the node completely. */ | |
6646 | memcpy(new_node, node, sizeof(struct maple_node)); | |
6647 | /* Update the parent node pointer. */ | |
6648 | val = (unsigned long)node->parent & MAPLE_NODE_MASK; | |
6649 | new_node->parent = ma_parent_ptr(val | (unsigned long)parent); | |
6650 | } | |
6651 | ||
6652 | /* | |
6653 | * mas_dup_alloc() - Allocate child nodes for a maple node. | |
6654 | * @mas: The maple state of source tree. | |
6655 | * @new_mas: The maple state of new tree. | |
6656 | * @gfp: The GFP_FLAGS to use for allocations. | |
6657 | * | |
6658 | * This function allocates child nodes for @new_mas->node during the duplication | |
6659 | * process. If memory allocation fails, @mas is set to -ENOMEM. | |
6660 | */ | |
6661 | static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas, | |
6662 | gfp_t gfp) | |
6663 | { | |
6664 | struct maple_node *node = mte_to_node(mas->node); | |
6665 | struct maple_node *new_node = mte_to_node(new_mas->node); | |
6666 | enum maple_type type; | |
6667 | unsigned char request, count, i; | |
6668 | void __rcu **slots; | |
6669 | void __rcu **new_slots; | |
6670 | unsigned long val; | |
6671 | ||
6672 | /* Allocate memory for child nodes. */ | |
6673 | type = mte_node_type(mas->node); | |
6674 | new_slots = ma_slots(new_node, type); | |
6675 | request = mas_data_end(mas) + 1; | |
6676 | count = mt_alloc_bulk(gfp, request, (void **)new_slots); | |
6677 | if (unlikely(count < request)) { | |
6678 | memset(new_slots, 0, request * sizeof(void *)); | |
6679 | mas_set_err(mas, -ENOMEM); | |
6680 | return; | |
6681 | } | |
6682 | ||
6683 | /* Restore node type information in slots. */ | |
6684 | slots = ma_slots(node, type); | |
6685 | for (i = 0; i < count; i++) { | |
6686 | val = (unsigned long)mt_slot_locked(mas->tree, slots, i); | |
6687 | val &= MAPLE_NODE_MASK; | |
6688 | ((unsigned long *)new_slots)[i] |= val; | |
6689 | } | |
6690 | } | |
6691 | ||
6692 | /* | |
6693 | * mas_dup_build() - Build a new maple tree from a source tree | |
6694 | * @mas: The maple state of source tree, need to be in MAS_START state. | |
6695 | * @new_mas: The maple state of new tree, need to be in MAS_START state. | |
6696 | * @gfp: The GFP_FLAGS to use for allocations. | |
6697 | * | |
6698 | * This function builds a new tree in DFS preorder. If the memory allocation | |
6699 | * fails, the error code -ENOMEM will be set in @mas, and @new_mas points to the | |
6700 | * last node. mas_dup_free() will free the incomplete duplication of a tree. | |
6701 | * | |
6702 | * Note that the attributes of the two trees need to be exactly the same, and the | |
6703 | * new tree needs to be empty, otherwise -EINVAL will be set in @mas. | |
6704 | */ | |
6705 | static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas, | |
6706 | gfp_t gfp) | |
6707 | { | |
6708 | struct maple_node *node; | |
6709 | struct maple_pnode *parent = NULL; | |
6710 | struct maple_enode *root; | |
6711 | enum maple_type type; | |
6712 | ||
6713 | if (unlikely(mt_attr(mas->tree) != mt_attr(new_mas->tree)) || | |
6714 | unlikely(!mtree_empty(new_mas->tree))) { | |
6715 | mas_set_err(mas, -EINVAL); | |
6716 | return; | |
6717 | } | |
6718 | ||
6719 | root = mas_start(mas); | |
6720 | if (mas_is_ptr(mas) || mas_is_none(mas)) | |
6721 | goto set_new_tree; | |
6722 | ||
6723 | node = mt_alloc_one(gfp); | |
6724 | if (!node) { | |
067311d3 | 6725 | new_mas->status = ma_none; |
fd32e4e9 PZ |
6726 | mas_set_err(mas, -ENOMEM); |
6727 | return; | |
6728 | } | |
6729 | ||
6730 | type = mte_node_type(mas->node); | |
6731 | root = mt_mk_node(node, type); | |
6732 | new_mas->node = root; | |
6733 | new_mas->min = 0; | |
6734 | new_mas->max = ULONG_MAX; | |
6735 | root = mte_mk_root(root); | |
6736 | while (1) { | |
6737 | mas_copy_node(mas, new_mas, parent); | |
6738 | if (!mte_is_leaf(mas->node)) { | |
6739 | /* Only allocate child nodes for non-leaf nodes. */ | |
6740 | mas_dup_alloc(mas, new_mas, gfp); | |
6741 | if (unlikely(mas_is_err(mas))) | |
6742 | return; | |
6743 | } else { | |
6744 | /* | |
6745 | * This is the last leaf node and duplication is | |
6746 | * completed. | |
6747 | */ | |
6748 | if (mas->max == ULONG_MAX) | |
6749 | goto done; | |
6750 | ||
6751 | /* This is not the last leaf node and needs to go up. */ | |
6752 | do { | |
6753 | mas_ascend(mas); | |
6754 | mas_ascend(new_mas); | |
6755 | } while (mas->offset == mas_data_end(mas)); | |
6756 | ||
6757 | /* Move to the next subtree. */ | |
6758 | mas->offset++; | |
6759 | new_mas->offset++; | |
6760 | } | |
6761 | ||
6762 | mas_descend(mas); | |
6763 | parent = ma_parent_ptr(mte_to_node(new_mas->node)); | |
6764 | mas_descend(new_mas); | |
6765 | mas->offset = 0; | |
6766 | new_mas->offset = 0; | |
6767 | } | |
6768 | done: | |
6769 | /* Specially handle the parent of the root node. */ | |
6770 | mte_to_node(root)->parent = ma_parent_ptr(mas_tree_parent(new_mas)); | |
6771 | set_new_tree: | |
6772 | /* Make them the same height */ | |
6773 | new_mas->tree->ma_flags = mas->tree->ma_flags; | |
6774 | rcu_assign_pointer(new_mas->tree->ma_root, root); | |
6775 | } | |
6776 | ||
6777 | /** | |
6778 | * __mt_dup(): Duplicate an entire maple tree | |
6779 | * @mt: The source maple tree | |
6780 | * @new: The new maple tree | |
6781 | * @gfp: The GFP_FLAGS to use for allocations | |
6782 | * | |
6783 | * This function duplicates a maple tree in Depth-First Search (DFS) pre-order | |
6784 | * traversal. It uses memcpy() to copy nodes in the source tree and allocate | |
6785 | * new child nodes in non-leaf nodes. The new node is exactly the same as the | |
6786 | * source node except for all the addresses stored in it. It will be faster than | |
6787 | * traversing all elements in the source tree and inserting them one by one into | |
6788 | * the new tree. | |
6789 | * The user needs to ensure that the attributes of the source tree and the new | |
6790 | * tree are the same, and the new tree needs to be an empty tree, otherwise | |
6791 | * -EINVAL will be returned. | |
6792 | * Note that the user needs to manually lock the source tree and the new tree. | |
6793 | * | |
6794 | * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If | |
6795 | * the attributes of the two trees are different or the new tree is not an empty | |
6796 | * tree. | |
6797 | */ | |
6798 | int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp) | |
6799 | { | |
6800 | int ret = 0; | |
6801 | MA_STATE(mas, mt, 0, 0); | |
6802 | MA_STATE(new_mas, new, 0, 0); | |
6803 | ||
6804 | mas_dup_build(&mas, &new_mas, gfp); | |
6805 | if (unlikely(mas_is_err(&mas))) { | |
6806 | ret = xa_err(mas.node); | |
6807 | if (ret == -ENOMEM) | |
6808 | mas_dup_free(&new_mas); | |
6809 | } | |
6810 | ||
6811 | return ret; | |
6812 | } | |
6813 | EXPORT_SYMBOL(__mt_dup); | |
6814 | ||
6815 | /** | |
6816 | * mtree_dup(): Duplicate an entire maple tree | |
6817 | * @mt: The source maple tree | |
6818 | * @new: The new maple tree | |
6819 | * @gfp: The GFP_FLAGS to use for allocations | |
6820 | * | |
6821 | * This function duplicates a maple tree in Depth-First Search (DFS) pre-order | |
6822 | * traversal. It uses memcpy() to copy nodes in the source tree and allocate | |
6823 | * new child nodes in non-leaf nodes. The new node is exactly the same as the | |
6824 | * source node except for all the addresses stored in it. It will be faster than | |
6825 | * traversing all elements in the source tree and inserting them one by one into | |
6826 | * the new tree. | |
6827 | * The user needs to ensure that the attributes of the source tree and the new | |
6828 | * tree are the same, and the new tree needs to be an empty tree, otherwise | |
6829 | * -EINVAL will be returned. | |
6830 | * | |
6831 | * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If | |
6832 | * the attributes of the two trees are different or the new tree is not an empty | |
6833 | * tree. | |
6834 | */ | |
6835 | int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp) | |
6836 | { | |
6837 | int ret = 0; | |
6838 | MA_STATE(mas, mt, 0, 0); | |
6839 | MA_STATE(new_mas, new, 0, 0); | |
6840 | ||
6841 | mas_lock(&new_mas); | |
6842 | mas_lock_nested(&mas, SINGLE_DEPTH_NESTING); | |
6843 | mas_dup_build(&mas, &new_mas, gfp); | |
6844 | mas_unlock(&mas); | |
6845 | if (unlikely(mas_is_err(&mas))) { | |
6846 | ret = xa_err(mas.node); | |
6847 | if (ret == -ENOMEM) | |
6848 | mas_dup_free(&new_mas); | |
6849 | } | |
6850 | ||
6851 | mas_unlock(&new_mas); | |
6852 | return ret; | |
6853 | } | |
6854 | EXPORT_SYMBOL(mtree_dup); | |
6855 | ||
54a611b6 LH |
6856 | /** |
6857 | * __mt_destroy() - Walk and free all nodes of a locked maple tree. | |
6858 | * @mt: The maple tree | |
6859 | * | |
6860 | * Note: Does not handle locking. | |
6861 | */ | |
6862 | void __mt_destroy(struct maple_tree *mt) | |
6863 | { | |
6864 | void *root = mt_root_locked(mt); | |
6865 | ||
6866 | rcu_assign_pointer(mt->ma_root, NULL); | |
6867 | if (xa_is_node(root)) | |
6868 | mte_destroy_walk(root, mt); | |
6869 | ||
8e50d32c | 6870 | mt->ma_flags = mt_attr(mt); |
54a611b6 LH |
6871 | } |
6872 | EXPORT_SYMBOL_GPL(__mt_destroy); | |
6873 | ||
6874 | /** | |
6875 | * mtree_destroy() - Destroy a maple tree | |
6876 | * @mt: The maple tree | |
6877 | * | |
6878 | * Frees all resources used by the tree. Handles locking. | |
6879 | */ | |
6880 | void mtree_destroy(struct maple_tree *mt) | |
6881 | { | |
6882 | mtree_lock(mt); | |
6883 | __mt_destroy(mt); | |
6884 | mtree_unlock(mt); | |
6885 | } | |
6886 | EXPORT_SYMBOL(mtree_destroy); | |
6887 | ||
6888 | /** | |
6889 | * mt_find() - Search from the start up until an entry is found. | |
6890 | * @mt: The maple tree | |
6891 | * @index: Pointer which contains the start location of the search | |
fad9c80e TG |
6892 | * @max: The maximum value of the search range |
6893 | * | |
6894 | * Takes RCU read lock internally to protect the search, which does not | |
6895 | * protect the returned pointer after dropping RCU read lock. | |
6896 | * See also: Documentation/core-api/maple_tree.rst | |
54a611b6 | 6897 | * |
fad9c80e TG |
6898 | * In case that an entry is found @index is updated to point to the next |
6899 | * possible entry independent whether the found entry is occupying a | |
6900 | * single index or a range if indices. | |
54a611b6 LH |
6901 | * |
6902 | * Return: The entry at or after the @index or %NULL | |
6903 | */ | |
6904 | void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max) | |
6905 | { | |
6906 | MA_STATE(mas, mt, *index, *index); | |
6907 | void *entry; | |
6908 | #ifdef CONFIG_DEBUG_MAPLE_TREE | |
6909 | unsigned long copy = *index; | |
6910 | #endif | |
6911 | ||
6912 | trace_ma_read(__func__, &mas); | |
6913 | ||
6914 | if ((*index) > max) | |
6915 | return NULL; | |
6916 | ||
6917 | rcu_read_lock(); | |
6918 | retry: | |
6919 | entry = mas_state_walk(&mas); | |
6920 | if (mas_is_start(&mas)) | |
6921 | goto retry; | |
6922 | ||
6923 | if (unlikely(xa_is_zero(entry))) | |
6924 | entry = NULL; | |
6925 | ||
6926 | if (entry) | |
6927 | goto unlock; | |
6928 | ||
9a40d45c | 6929 | while (mas_is_active(&mas) && (mas.last < max)) { |
002ebb92 | 6930 | entry = mas_next_slot(&mas, max, false); |
54a611b6 LH |
6931 | if (likely(entry && !xa_is_zero(entry))) |
6932 | break; | |
6933 | } | |
6934 | ||
6935 | if (unlikely(xa_is_zero(entry))) | |
6936 | entry = NULL; | |
6937 | unlock: | |
6938 | rcu_read_unlock(); | |
6939 | if (likely(entry)) { | |
6940 | *index = mas.last + 1; | |
6941 | #ifdef CONFIG_DEBUG_MAPLE_TREE | |
e6d6792a | 6942 | if (MT_WARN_ON(mt, (*index) && ((*index) <= copy))) |
54a611b6 LH |
6943 | pr_err("index not increased! %lx <= %lx\n", |
6944 | *index, copy); | |
54a611b6 LH |
6945 | #endif |
6946 | } | |
6947 | ||
6948 | return entry; | |
6949 | } | |
6950 | EXPORT_SYMBOL(mt_find); | |
6951 | ||
6952 | /** | |
6953 | * mt_find_after() - Search from the start up until an entry is found. | |
6954 | * @mt: The maple tree | |
6955 | * @index: Pointer which contains the start location of the search | |
6956 | * @max: The maximum value to check | |
6957 | * | |
fad9c80e TG |
6958 | * Same as mt_find() except that it checks @index for 0 before |
6959 | * searching. If @index == 0, the search is aborted. This covers a wrap | |
6960 | * around of @index to 0 in an iterator loop. | |
54a611b6 LH |
6961 | * |
6962 | * Return: The entry at or after the @index or %NULL | |
6963 | */ | |
6964 | void *mt_find_after(struct maple_tree *mt, unsigned long *index, | |
6965 | unsigned long max) | |
6966 | { | |
6967 | if (!(*index)) | |
6968 | return NULL; | |
6969 | ||
6970 | return mt_find(mt, index, max); | |
6971 | } | |
6972 | EXPORT_SYMBOL(mt_find_after); | |
6973 | ||
6974 | #ifdef CONFIG_DEBUG_MAPLE_TREE | |
6975 | atomic_t maple_tree_tests_run; | |
6976 | EXPORT_SYMBOL_GPL(maple_tree_tests_run); | |
6977 | atomic_t maple_tree_tests_passed; | |
6978 | EXPORT_SYMBOL_GPL(maple_tree_tests_passed); | |
6979 | ||
6980 | #ifndef __KERNEL__ | |
6981 | extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int); | |
6982 | void mt_set_non_kernel(unsigned int val) | |
6983 | { | |
6984 | kmem_cache_set_non_kernel(maple_node_cache, val); | |
6985 | } | |
6986 | ||
617f8e4d SK |
6987 | extern void kmem_cache_set_callback(struct kmem_cache *cachep, |
6988 | void (*callback)(void *)); | |
6989 | void mt_set_callback(void (*callback)(void *)) | |
6990 | { | |
6991 | kmem_cache_set_callback(maple_node_cache, callback); | |
6992 | } | |
6993 | ||
6994 | extern void kmem_cache_set_private(struct kmem_cache *cachep, void *private); | |
6995 | void mt_set_private(void *private) | |
6996 | { | |
6997 | kmem_cache_set_private(maple_node_cache, private); | |
6998 | } | |
6999 | ||
54a611b6 LH |
7000 | extern unsigned long kmem_cache_get_alloc(struct kmem_cache *); |
7001 | unsigned long mt_get_alloc_size(void) | |
7002 | { | |
7003 | return kmem_cache_get_alloc(maple_node_cache); | |
7004 | } | |
7005 | ||
7006 | extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *); | |
7007 | void mt_zero_nr_tallocated(void) | |
7008 | { | |
7009 | kmem_cache_zero_nr_tallocated(maple_node_cache); | |
7010 | } | |
7011 | ||
7012 | extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *); | |
7013 | unsigned int mt_nr_tallocated(void) | |
7014 | { | |
7015 | return kmem_cache_nr_tallocated(maple_node_cache); | |
7016 | } | |
7017 | ||
7018 | extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *); | |
7019 | unsigned int mt_nr_allocated(void) | |
7020 | { | |
7021 | return kmem_cache_nr_allocated(maple_node_cache); | |
7022 | } | |
7023 | ||
120b1162 LH |
7024 | void mt_cache_shrink(void) |
7025 | { | |
7026 | } | |
7027 | #else | |
7028 | /* | |
7029 | * mt_cache_shrink() - For testing, don't use this. | |
7030 | * | |
7031 | * Certain testcases can trigger an OOM when combined with other memory | |
7032 | * debugging configuration options. This function is used to reduce the | |
7033 | * possibility of an out of memory even due to kmem_cache objects remaining | |
7034 | * around for longer than usual. | |
7035 | */ | |
7036 | void mt_cache_shrink(void) | |
7037 | { | |
7038 | kmem_cache_shrink(maple_node_cache); | |
7039 | ||
7040 | } | |
7041 | EXPORT_SYMBOL_GPL(mt_cache_shrink); | |
7042 | ||
7043 | #endif /* not defined __KERNEL__ */ | |
54a611b6 LH |
7044 | /* |
7045 | * mas_get_slot() - Get the entry in the maple state node stored at @offset. | |
7046 | * @mas: The maple state | |
7047 | * @offset: The offset into the slot array to fetch. | |
7048 | * | |
7049 | * Return: The entry stored at @offset. | |
7050 | */ | |
7051 | static inline struct maple_enode *mas_get_slot(struct ma_state *mas, | |
7052 | unsigned char offset) | |
7053 | { | |
7054 | return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)), | |
7055 | offset); | |
7056 | } | |
7057 | ||
54a611b6 LH |
7058 | /* Depth first search, post-order */ |
7059 | static void mas_dfs_postorder(struct ma_state *mas, unsigned long max) | |
7060 | { | |
7061 | ||
067311d3 | 7062 | struct maple_enode *p, *mn = mas->node; |
54a611b6 LH |
7063 | unsigned long p_min, p_max; |
7064 | ||
7065 | mas_next_node(mas, mas_mn(mas), max); | |
067311d3 | 7066 | if (!mas_is_overflow(mas)) |
54a611b6 LH |
7067 | return; |
7068 | ||
7069 | if (mte_is_root(mn)) | |
7070 | return; | |
7071 | ||
7072 | mas->node = mn; | |
7073 | mas_ascend(mas); | |
c3eb787e | 7074 | do { |
54a611b6 LH |
7075 | p = mas->node; |
7076 | p_min = mas->min; | |
7077 | p_max = mas->max; | |
7078 | mas_prev_node(mas, 0); | |
067311d3 | 7079 | } while (!mas_is_underflow(mas)); |
54a611b6 LH |
7080 | |
7081 | mas->node = p; | |
7082 | mas->max = p_max; | |
7083 | mas->min = p_min; | |
7084 | } | |
7085 | ||
7086 | /* Tree validations */ | |
7087 | static void mt_dump_node(const struct maple_tree *mt, void *entry, | |
89f499f3 LH |
7088 | unsigned long min, unsigned long max, unsigned int depth, |
7089 | enum mt_dump_format format); | |
54a611b6 | 7090 | static void mt_dump_range(unsigned long min, unsigned long max, |
89f499f3 | 7091 | unsigned int depth, enum mt_dump_format format) |
54a611b6 LH |
7092 | { |
7093 | static const char spaces[] = " "; | |
7094 | ||
89f499f3 LH |
7095 | switch(format) { |
7096 | case mt_dump_hex: | |
7097 | if (min == max) | |
7098 | pr_info("%.*s%lx: ", depth * 2, spaces, min); | |
7099 | else | |
7100 | pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max); | |
7101 | break; | |
89f499f3 LH |
7102 | case mt_dump_dec: |
7103 | if (min == max) | |
7104 | pr_info("%.*s%lu: ", depth * 2, spaces, min); | |
7105 | else | |
7106 | pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max); | |
7107 | } | |
54a611b6 LH |
7108 | } |
7109 | ||
7110 | static void mt_dump_entry(void *entry, unsigned long min, unsigned long max, | |
89f499f3 | 7111 | unsigned int depth, enum mt_dump_format format) |
54a611b6 | 7112 | { |
89f499f3 | 7113 | mt_dump_range(min, max, depth, format); |
54a611b6 LH |
7114 | |
7115 | if (xa_is_value(entry)) | |
b314e215 LS |
7116 | pr_cont("value %ld (0x%lx) [" PTR_FMT "]\n", xa_to_value(entry), |
7117 | xa_to_value(entry), entry); | |
54a611b6 LH |
7118 | else if (xa_is_zero(entry)) |
7119 | pr_cont("zero (%ld)\n", xa_to_internal(entry)); | |
7120 | else if (mt_is_reserved(entry)) | |
b314e215 | 7121 | pr_cont("UNKNOWN ENTRY (" PTR_FMT ")\n", entry); |
54a611b6 | 7122 | else |
b314e215 | 7123 | pr_cont(PTR_FMT "\n", entry); |
54a611b6 LH |
7124 | } |
7125 | ||
7126 | static void mt_dump_range64(const struct maple_tree *mt, void *entry, | |
89f499f3 LH |
7127 | unsigned long min, unsigned long max, unsigned int depth, |
7128 | enum mt_dump_format format) | |
54a611b6 LH |
7129 | { |
7130 | struct maple_range_64 *node = &mte_to_node(entry)->mr64; | |
7131 | bool leaf = mte_is_leaf(entry); | |
7132 | unsigned long first = min; | |
7133 | int i; | |
7134 | ||
7135 | pr_cont(" contents: "); | |
89f499f3 LH |
7136 | for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) { |
7137 | switch(format) { | |
7138 | case mt_dump_hex: | |
b314e215 | 7139 | pr_cont(PTR_FMT " %lX ", node->slot[i], node->pivot[i]); |
89f499f3 | 7140 | break; |
89f499f3 | 7141 | case mt_dump_dec: |
b314e215 | 7142 | pr_cont(PTR_FMT " %lu ", node->slot[i], node->pivot[i]); |
89f499f3 LH |
7143 | } |
7144 | } | |
b314e215 | 7145 | pr_cont(PTR_FMT "\n", node->slot[i]); |
54a611b6 LH |
7146 | for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) { |
7147 | unsigned long last = max; | |
7148 | ||
7149 | if (i < (MAPLE_RANGE64_SLOTS - 1)) | |
7150 | last = node->pivot[i]; | |
bd592703 | 7151 | else if (!node->slot[i] && max != mt_node_max(entry)) |
54a611b6 LH |
7152 | break; |
7153 | if (last == 0 && i > 0) | |
7154 | break; | |
7155 | if (leaf) | |
7156 | mt_dump_entry(mt_slot(mt, node->slot, i), | |
89f499f3 | 7157 | first, last, depth + 1, format); |
54a611b6 LH |
7158 | else if (node->slot[i]) |
7159 | mt_dump_node(mt, mt_slot(mt, node->slot, i), | |
89f499f3 | 7160 | first, last, depth + 1, format); |
54a611b6 LH |
7161 | |
7162 | if (last == max) | |
7163 | break; | |
7164 | if (last > max) { | |
89f499f3 LH |
7165 | switch(format) { |
7166 | case mt_dump_hex: | |
b314e215 | 7167 | pr_err("node " PTR_FMT " last (%lx) > max (%lx) at pivot %d!\n", |
54a611b6 | 7168 | node, last, max, i); |
89f499f3 | 7169 | break; |
89f499f3 | 7170 | case mt_dump_dec: |
b314e215 | 7171 | pr_err("node " PTR_FMT " last (%lu) > max (%lu) at pivot %d!\n", |
89f499f3 LH |
7172 | node, last, max, i); |
7173 | } | |
54a611b6 LH |
7174 | } |
7175 | first = last + 1; | |
7176 | } | |
7177 | } | |
7178 | ||
7179 | static void mt_dump_arange64(const struct maple_tree *mt, void *entry, | |
89f499f3 LH |
7180 | unsigned long min, unsigned long max, unsigned int depth, |
7181 | enum mt_dump_format format) | |
54a611b6 LH |
7182 | { |
7183 | struct maple_arange_64 *node = &mte_to_node(entry)->ma64; | |
54a611b6 LH |
7184 | unsigned long first = min; |
7185 | int i; | |
7186 | ||
7187 | pr_cont(" contents: "); | |
83d97f62 LH |
7188 | for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) { |
7189 | switch (format) { | |
7190 | case mt_dump_hex: | |
7191 | pr_cont("%lx ", node->gap[i]); | |
7192 | break; | |
83d97f62 LH |
7193 | case mt_dump_dec: |
7194 | pr_cont("%lu ", node->gap[i]); | |
7195 | } | |
7196 | } | |
54a611b6 | 7197 | pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap); |
83d97f62 LH |
7198 | for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) { |
7199 | switch (format) { | |
7200 | case mt_dump_hex: | |
b314e215 | 7201 | pr_cont(PTR_FMT " %lX ", node->slot[i], node->pivot[i]); |
83d97f62 | 7202 | break; |
83d97f62 | 7203 | case mt_dump_dec: |
b314e215 | 7204 | pr_cont(PTR_FMT " %lu ", node->slot[i], node->pivot[i]); |
83d97f62 LH |
7205 | } |
7206 | } | |
b314e215 | 7207 | pr_cont(PTR_FMT "\n", node->slot[i]); |
54a611b6 LH |
7208 | for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) { |
7209 | unsigned long last = max; | |
7210 | ||
7211 | if (i < (MAPLE_ARANGE64_SLOTS - 1)) | |
7212 | last = node->pivot[i]; | |
7213 | else if (!node->slot[i]) | |
7214 | break; | |
7215 | if (last == 0 && i > 0) | |
7216 | break; | |
81528310 | 7217 | if (node->slot[i]) |
54a611b6 | 7218 | mt_dump_node(mt, mt_slot(mt, node->slot, i), |
89f499f3 | 7219 | first, last, depth + 1, format); |
54a611b6 LH |
7220 | |
7221 | if (last == max) | |
7222 | break; | |
7223 | if (last > max) { | |
21a449be WY |
7224 | switch(format) { |
7225 | case mt_dump_hex: | |
b314e215 | 7226 | pr_err("node " PTR_FMT " last (%lx) > max (%lx) at pivot %d!\n", |
54a611b6 | 7227 | node, last, max, i); |
21a449be WY |
7228 | break; |
7229 | case mt_dump_dec: | |
b314e215 | 7230 | pr_err("node " PTR_FMT " last (%lu) > max (%lu) at pivot %d!\n", |
21a449be WY |
7231 | node, last, max, i); |
7232 | } | |
54a611b6 LH |
7233 | } |
7234 | first = last + 1; | |
7235 | } | |
7236 | } | |
7237 | ||
7238 | static void mt_dump_node(const struct maple_tree *mt, void *entry, | |
89f499f3 LH |
7239 | unsigned long min, unsigned long max, unsigned int depth, |
7240 | enum mt_dump_format format) | |
54a611b6 LH |
7241 | { |
7242 | struct maple_node *node = mte_to_node(entry); | |
7243 | unsigned int type = mte_node_type(entry); | |
7244 | unsigned int i; | |
7245 | ||
89f499f3 | 7246 | mt_dump_range(min, max, depth, format); |
54a611b6 | 7247 | |
b314e215 LS |
7248 | pr_cont("node " PTR_FMT " depth %d type %d parent " PTR_FMT, node, |
7249 | depth, type, node ? node->parent : NULL); | |
54a611b6 LH |
7250 | switch (type) { |
7251 | case maple_dense: | |
7252 | pr_cont("\n"); | |
7253 | for (i = 0; i < MAPLE_NODE_SLOTS; i++) { | |
7254 | if (min + i > max) | |
7255 | pr_cont("OUT OF RANGE: "); | |
7256 | mt_dump_entry(mt_slot(mt, node->slot, i), | |
89f499f3 | 7257 | min + i, min + i, depth, format); |
54a611b6 LH |
7258 | } |
7259 | break; | |
7260 | case maple_leaf_64: | |
7261 | case maple_range_64: | |
89f499f3 | 7262 | mt_dump_range64(mt, entry, min, max, depth, format); |
54a611b6 LH |
7263 | break; |
7264 | case maple_arange_64: | |
89f499f3 | 7265 | mt_dump_arange64(mt, entry, min, max, depth, format); |
54a611b6 LH |
7266 | break; |
7267 | ||
7268 | default: | |
7269 | pr_cont(" UNKNOWN TYPE\n"); | |
7270 | } | |
7271 | } | |
7272 | ||
89f499f3 | 7273 | void mt_dump(const struct maple_tree *mt, enum mt_dump_format format) |
54a611b6 LH |
7274 | { |
7275 | void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt)); | |
7276 | ||
b314e215 | 7277 | pr_info("maple_tree(" PTR_FMT ") flags %X, height %u root " PTR_FMT "\n", |
54a611b6 | 7278 | mt, mt->ma_flags, mt_height(mt), entry); |
04dafdd2 | 7279 | if (xa_is_node(entry)) |
89f499f3 | 7280 | mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format); |
04dafdd2 WY |
7281 | else if (entry) |
7282 | mt_dump_entry(entry, 0, 0, 0, format); | |
7283 | else | |
7284 | pr_info("(empty)\n"); | |
54a611b6 | 7285 | } |
120b1162 | 7286 | EXPORT_SYMBOL_GPL(mt_dump); |
54a611b6 LH |
7287 | |
7288 | /* | |
7289 | * Calculate the maximum gap in a node and check if that's what is reported in | |
7290 | * the parent (unless root). | |
7291 | */ | |
7292 | static void mas_validate_gaps(struct ma_state *mas) | |
7293 | { | |
7294 | struct maple_enode *mte = mas->node; | |
f8e5eac8 PZ |
7295 | struct maple_node *p_mn, *node = mte_to_node(mte); |
7296 | enum maple_type mt = mte_node_type(mas->node); | |
54a611b6 LH |
7297 | unsigned long gap = 0, max_gap = 0; |
7298 | unsigned long p_end, p_start = mas->min; | |
f8e5eac8 | 7299 | unsigned char p_slot, offset; |
54a611b6 | 7300 | unsigned long *gaps = NULL; |
f8e5eac8 PZ |
7301 | unsigned long *pivots = ma_pivots(node, mt); |
7302 | unsigned int i; | |
54a611b6 | 7303 | |
f8e5eac8 | 7304 | if (ma_is_dense(mt)) { |
54a611b6 LH |
7305 | for (i = 0; i < mt_slot_count(mte); i++) { |
7306 | if (mas_get_slot(mas, i)) { | |
7307 | if (gap > max_gap) | |
7308 | max_gap = gap; | |
7309 | gap = 0; | |
7310 | continue; | |
7311 | } | |
7312 | gap++; | |
7313 | } | |
7314 | goto counted; | |
7315 | } | |
7316 | ||
f8e5eac8 | 7317 | gaps = ma_gaps(node, mt); |
54a611b6 | 7318 | for (i = 0; i < mt_slot_count(mte); i++) { |
29b2681f | 7319 | p_end = mas_safe_pivot(mas, pivots, i, mt); |
54a611b6 LH |
7320 | |
7321 | if (!gaps) { | |
f8e5eac8 PZ |
7322 | if (!mas_get_slot(mas, i)) |
7323 | gap = p_end - p_start + 1; | |
54a611b6 LH |
7324 | } else { |
7325 | void *entry = mas_get_slot(mas, i); | |
7326 | ||
7327 | gap = gaps[i]; | |
f8e5eac8 PZ |
7328 | MT_BUG_ON(mas->tree, !entry); |
7329 | ||
7330 | if (gap > p_end - p_start + 1) { | |
b314e215 | 7331 | pr_err(PTR_FMT "[%u] %lu >= %lu - %lu + 1 (%lu)\n", |
f8e5eac8 PZ |
7332 | mas_mn(mas), i, gap, p_end, p_start, |
7333 | p_end - p_start + 1); | |
7334 | MT_BUG_ON(mas->tree, gap > p_end - p_start + 1); | |
54a611b6 LH |
7335 | } |
7336 | } | |
7337 | ||
7338 | if (gap > max_gap) | |
7339 | max_gap = gap; | |
f8e5eac8 | 7340 | |
54a611b6 LH |
7341 | p_start = p_end + 1; |
7342 | if (p_end >= mas->max) | |
7343 | break; | |
7344 | } | |
7345 | ||
7346 | counted: | |
f8e5eac8 | 7347 | if (mt == maple_arange_64) { |
2e783f0c | 7348 | MT_BUG_ON(mas->tree, !gaps); |
c5e94121 | 7349 | offset = ma_meta_gap(node); |
f8e5eac8 | 7350 | if (offset > i) { |
b314e215 | 7351 | pr_err("gap offset " PTR_FMT "[%u] is invalid\n", node, offset); |
f8e5eac8 PZ |
7352 | MT_BUG_ON(mas->tree, 1); |
7353 | } | |
7354 | ||
7355 | if (gaps[offset] != max_gap) { | |
b314e215 | 7356 | pr_err("gap " PTR_FMT "[%u] is not the largest gap %lu\n", |
f8e5eac8 PZ |
7357 | node, offset, max_gap); |
7358 | MT_BUG_ON(mas->tree, 1); | |
7359 | } | |
7360 | ||
f8e5eac8 PZ |
7361 | for (i++ ; i < mt_slot_count(mte); i++) { |
7362 | if (gaps[i] != 0) { | |
b314e215 | 7363 | pr_err("gap " PTR_FMT "[%u] beyond node limit != 0\n", |
f8e5eac8 PZ |
7364 | node, i); |
7365 | MT_BUG_ON(mas->tree, 1); | |
7366 | } | |
7367 | } | |
7368 | } | |
7369 | ||
54a611b6 LH |
7370 | if (mte_is_root(mte)) |
7371 | return; | |
7372 | ||
7373 | p_slot = mte_parent_slot(mas->node); | |
7374 | p_mn = mte_parent(mte); | |
7375 | MT_BUG_ON(mas->tree, max_gap > mas->max); | |
afc754c6 | 7376 | if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) { |
b314e215 | 7377 | pr_err("gap " PTR_FMT "[%u] != %lu\n", p_mn, p_slot, max_gap); |
89f499f3 | 7378 | mt_dump(mas->tree, mt_dump_hex); |
f8e5eac8 | 7379 | MT_BUG_ON(mas->tree, 1); |
54a611b6 | 7380 | } |
54a611b6 LH |
7381 | } |
7382 | ||
7383 | static void mas_validate_parent_slot(struct ma_state *mas) | |
7384 | { | |
7385 | struct maple_node *parent; | |
7386 | struct maple_enode *node; | |
afc754c6 LH |
7387 | enum maple_type p_type; |
7388 | unsigned char p_slot; | |
54a611b6 LH |
7389 | void __rcu **slots; |
7390 | int i; | |
7391 | ||
7392 | if (mte_is_root(mas->node)) | |
7393 | return; | |
7394 | ||
afc754c6 LH |
7395 | p_slot = mte_parent_slot(mas->node); |
7396 | p_type = mas_parent_type(mas, mas->node); | |
54a611b6 LH |
7397 | parent = mte_parent(mas->node); |
7398 | slots = ma_slots(parent, p_type); | |
7399 | MT_BUG_ON(mas->tree, mas_mn(mas) == parent); | |
7400 | ||
7401 | /* Check prev/next parent slot for duplicate node entry */ | |
7402 | ||
7403 | for (i = 0; i < mt_slots[p_type]; i++) { | |
7404 | node = mas_slot(mas, slots, i); | |
7405 | if (i == p_slot) { | |
7406 | if (node != mas->node) | |
b314e215 | 7407 | pr_err("parent " PTR_FMT "[%u] does not have " PTR_FMT "\n", |
54a611b6 LH |
7408 | parent, i, mas_mn(mas)); |
7409 | MT_BUG_ON(mas->tree, node != mas->node); | |
7410 | } else if (node == mas->node) { | |
b314e215 | 7411 | pr_err("Invalid child " PTR_FMT " at parent " PTR_FMT "[%u] p_slot %u\n", |
54a611b6 LH |
7412 | mas_mn(mas), parent, i, p_slot); |
7413 | MT_BUG_ON(mas->tree, node == mas->node); | |
7414 | } | |
7415 | } | |
7416 | } | |
7417 | ||
7418 | static void mas_validate_child_slot(struct ma_state *mas) | |
7419 | { | |
7420 | enum maple_type type = mte_node_type(mas->node); | |
7421 | void __rcu **slots = ma_slots(mte_to_node(mas->node), type); | |
7422 | unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type); | |
7423 | struct maple_enode *child; | |
7424 | unsigned char i; | |
7425 | ||
7426 | if (mte_is_leaf(mas->node)) | |
7427 | return; | |
7428 | ||
7429 | for (i = 0; i < mt_slots[type]; i++) { | |
7430 | child = mas_slot(mas, slots, i); | |
54a611b6 | 7431 | |
e93fda5a | 7432 | if (!child) { |
b314e215 | 7433 | pr_err("Non-leaf node lacks child at " PTR_FMT "[%u]\n", |
e93fda5a PZ |
7434 | mas_mn(mas), i); |
7435 | MT_BUG_ON(mas->tree, 1); | |
7436 | } | |
54a611b6 LH |
7437 | |
7438 | if (mte_parent_slot(child) != i) { | |
b314e215 | 7439 | pr_err("Slot error at " PTR_FMT "[%u]: child " PTR_FMT " has pslot %u\n", |
54a611b6 LH |
7440 | mas_mn(mas), i, mte_to_node(child), |
7441 | mte_parent_slot(child)); | |
7442 | MT_BUG_ON(mas->tree, 1); | |
7443 | } | |
7444 | ||
7445 | if (mte_parent(child) != mte_to_node(mas->node)) { | |
b314e215 | 7446 | pr_err("child " PTR_FMT " has parent " PTR_FMT " not " PTR_FMT "\n", |
54a611b6 LH |
7447 | mte_to_node(child), mte_parent(child), |
7448 | mte_to_node(mas->node)); | |
7449 | MT_BUG_ON(mas->tree, 1); | |
7450 | } | |
e93fda5a PZ |
7451 | |
7452 | if (i < mt_pivots[type] && pivots[i] == mas->max) | |
7453 | break; | |
54a611b6 LH |
7454 | } |
7455 | } | |
7456 | ||
7457 | /* | |
33af39d0 PZ |
7458 | * Validate all pivots are within mas->min and mas->max, check metadata ends |
7459 | * where the maximum ends and ensure there is no slots or pivots set outside of | |
7460 | * the end of the data. | |
54a611b6 LH |
7461 | */ |
7462 | static void mas_validate_limits(struct ma_state *mas) | |
7463 | { | |
7464 | int i; | |
7465 | unsigned long prev_piv = 0; | |
7466 | enum maple_type type = mte_node_type(mas->node); | |
7467 | void __rcu **slots = ma_slots(mte_to_node(mas->node), type); | |
7468 | unsigned long *pivots = ma_pivots(mas_mn(mas), type); | |
7469 | ||
54a611b6 LH |
7470 | for (i = 0; i < mt_slots[type]; i++) { |
7471 | unsigned long piv; | |
7472 | ||
7473 | piv = mas_safe_pivot(mas, pivots, i, type); | |
7474 | ||
33af39d0 | 7475 | if (!piv && (i != 0)) { |
b314e215 | 7476 | pr_err("Missing node limit pivot at " PTR_FMT "[%u]", |
33af39d0 PZ |
7477 | mas_mn(mas), i); |
7478 | MAS_WARN_ON(mas, 1); | |
54a611b6 LH |
7479 | } |
7480 | ||
7481 | if (prev_piv > piv) { | |
b314e215 | 7482 | pr_err(PTR_FMT "[%u] piv %lu < prev_piv %lu\n", |
54a611b6 | 7483 | mas_mn(mas), i, piv, prev_piv); |
e6d6792a | 7484 | MAS_WARN_ON(mas, piv < prev_piv); |
54a611b6 LH |
7485 | } |
7486 | ||
7487 | if (piv < mas->min) { | |
b314e215 | 7488 | pr_err(PTR_FMT "[%u] %lu < %lu\n", mas_mn(mas), i, |
54a611b6 | 7489 | piv, mas->min); |
e6d6792a | 7490 | MAS_WARN_ON(mas, piv < mas->min); |
54a611b6 LH |
7491 | } |
7492 | if (piv > mas->max) { | |
b314e215 | 7493 | pr_err(PTR_FMT "[%u] %lu > %lu\n", mas_mn(mas), i, |
54a611b6 | 7494 | piv, mas->max); |
e6d6792a | 7495 | MAS_WARN_ON(mas, piv > mas->max); |
54a611b6 LH |
7496 | } |
7497 | prev_piv = piv; | |
7498 | if (piv == mas->max) | |
7499 | break; | |
7500 | } | |
33af39d0 PZ |
7501 | |
7502 | if (mas_data_end(mas) != i) { | |
b314e215 | 7503 | pr_err("node" PTR_FMT ": data_end %u != the last slot offset %u\n", |
33af39d0 PZ |
7504 | mas_mn(mas), mas_data_end(mas), i); |
7505 | MT_BUG_ON(mas->tree, 1); | |
7506 | } | |
7507 | ||
54a611b6 LH |
7508 | for (i += 1; i < mt_slots[type]; i++) { |
7509 | void *entry = mas_slot(mas, slots, i); | |
7510 | ||
7511 | if (entry && (i != mt_slots[type] - 1)) { | |
b314e215 LS |
7512 | pr_err(PTR_FMT "[%u] should not have entry " PTR_FMT "\n", |
7513 | mas_mn(mas), i, entry); | |
54a611b6 LH |
7514 | MT_BUG_ON(mas->tree, entry != NULL); |
7515 | } | |
7516 | ||
7517 | if (i < mt_pivots[type]) { | |
7518 | unsigned long piv = pivots[i]; | |
7519 | ||
7520 | if (!piv) | |
7521 | continue; | |
7522 | ||
b314e215 | 7523 | pr_err(PTR_FMT "[%u] should not have piv %lu\n", |
54a611b6 | 7524 | mas_mn(mas), i, piv); |
e6d6792a | 7525 | MAS_WARN_ON(mas, i < mt_pivots[type] - 1); |
54a611b6 LH |
7526 | } |
7527 | } | |
7528 | } | |
7529 | ||
7530 | static void mt_validate_nulls(struct maple_tree *mt) | |
7531 | { | |
7532 | void *entry, *last = (void *)1; | |
7533 | unsigned char offset = 0; | |
7534 | void __rcu **slots; | |
7535 | MA_STATE(mas, mt, 0, 0); | |
7536 | ||
7537 | mas_start(&mas); | |
067311d3 | 7538 | if (mas_is_none(&mas) || (mas_is_ptr(&mas))) |
54a611b6 LH |
7539 | return; |
7540 | ||
7541 | while (!mte_is_leaf(mas.node)) | |
7542 | mas_descend(&mas); | |
7543 | ||
7544 | slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node)); | |
7545 | do { | |
7546 | entry = mas_slot(&mas, slots, offset); | |
7547 | if (!last && !entry) { | |
b314e215 | 7548 | pr_err("Sequential nulls end at " PTR_FMT "[%u]\n", |
54a611b6 LH |
7549 | mas_mn(&mas), offset); |
7550 | } | |
7551 | MT_BUG_ON(mt, !last && !entry); | |
7552 | last = entry; | |
7553 | if (offset == mas_data_end(&mas)) { | |
7554 | mas_next_node(&mas, mas_mn(&mas), ULONG_MAX); | |
067311d3 | 7555 | if (mas_is_overflow(&mas)) |
54a611b6 LH |
7556 | return; |
7557 | offset = 0; | |
7558 | slots = ma_slots(mte_to_node(mas.node), | |
7559 | mte_node_type(mas.node)); | |
7560 | } else { | |
7561 | offset++; | |
7562 | } | |
7563 | ||
067311d3 | 7564 | } while (!mas_is_overflow(&mas)); |
54a611b6 LH |
7565 | } |
7566 | ||
7567 | /* | |
7568 | * validate a maple tree by checking: | |
7569 | * 1. The limits (pivots are within mas->min to mas->max) | |
7570 | * 2. The gap is correctly set in the parents | |
7571 | */ | |
7572 | void mt_validate(struct maple_tree *mt) | |
f806de88 | 7573 | __must_hold(mas->tree->ma_lock) |
54a611b6 LH |
7574 | { |
7575 | unsigned char end; | |
7576 | ||
7577 | MA_STATE(mas, mt, 0, 0); | |
54a611b6 | 7578 | mas_start(&mas); |
9a40d45c | 7579 | if (!mas_is_active(&mas)) |
f806de88 | 7580 | return; |
54a611b6 | 7581 | |
a489539e PZ |
7582 | while (!mte_is_leaf(mas.node)) |
7583 | mas_descend(&mas); | |
7584 | ||
067311d3 | 7585 | while (!mas_is_overflow(&mas)) { |
e6d6792a | 7586 | MAS_WARN_ON(&mas, mte_dead_node(mas.node)); |
a489539e PZ |
7587 | end = mas_data_end(&mas); |
7588 | if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) && | |
7318f95b | 7589 | (!mte_is_root(mas.node)))) { |
b314e215 LS |
7590 | pr_err("Invalid size %u of " PTR_FMT "\n", |
7591 | end, mas_mn(&mas)); | |
54a611b6 | 7592 | } |
a489539e | 7593 | |
54a611b6 | 7594 | mas_validate_parent_slot(&mas); |
54a611b6 | 7595 | mas_validate_limits(&mas); |
a489539e | 7596 | mas_validate_child_slot(&mas); |
54a611b6 LH |
7597 | if (mt_is_alloc(mt)) |
7598 | mas_validate_gaps(&mas); | |
7599 | mas_dfs_postorder(&mas, ULONG_MAX); | |
7600 | } | |
7601 | mt_validate_nulls(mt); | |
54a611b6 | 7602 | } |
120b1162 | 7603 | EXPORT_SYMBOL_GPL(mt_validate); |
54a611b6 | 7604 | |
f0a1f866 LH |
7605 | void mas_dump(const struct ma_state *mas) |
7606 | { | |
b314e215 LS |
7607 | pr_err("MAS: tree=" PTR_FMT " enode=" PTR_FMT " ", |
7608 | mas->tree, mas->node); | |
067311d3 LH |
7609 | switch (mas->status) { |
7610 | case ma_active: | |
7611 | pr_err("(ma_active)"); | |
7612 | break; | |
7613 | case ma_none: | |
7614 | pr_err("(ma_none)"); | |
7615 | break; | |
7616 | case ma_root: | |
7617 | pr_err("(ma_root)"); | |
7618 | break; | |
7619 | case ma_start: | |
7620 | pr_err("(ma_start) "); | |
7621 | break; | |
7622 | case ma_pause: | |
7623 | pr_err("(ma_pause) "); | |
7624 | break; | |
7625 | case ma_overflow: | |
7626 | pr_err("(ma_overflow) "); | |
7627 | break; | |
7628 | case ma_underflow: | |
7629 | pr_err("(ma_underflow) "); | |
7630 | break; | |
7631 | case ma_error: | |
7632 | pr_err("(ma_error) "); | |
7633 | break; | |
7634 | } | |
7635 | ||
23e217a8 SK |
7636 | pr_err("Store Type: "); |
7637 | switch (mas->store_type) { | |
7638 | case wr_invalid: | |
7639 | pr_err("invalid store type\n"); | |
7640 | break; | |
7641 | case wr_new_root: | |
7642 | pr_err("new_root\n"); | |
7643 | break; | |
7644 | case wr_store_root: | |
7645 | pr_err("store_root\n"); | |
7646 | break; | |
7647 | case wr_exact_fit: | |
7648 | pr_err("exact_fit\n"); | |
7649 | break; | |
7650 | case wr_split_store: | |
7651 | pr_err("split_store\n"); | |
7652 | break; | |
7653 | case wr_slot_store: | |
7654 | pr_err("slot_store\n"); | |
7655 | break; | |
7656 | case wr_append: | |
7657 | pr_err("append\n"); | |
7658 | break; | |
7659 | case wr_node_store: | |
7660 | pr_err("node_store\n"); | |
7661 | break; | |
7662 | case wr_spanning_store: | |
7663 | pr_err("spanning_store\n"); | |
7664 | break; | |
7665 | case wr_rebalance: | |
7666 | pr_err("rebalance\n"); | |
7667 | break; | |
7668 | } | |
7669 | ||
067311d3 LH |
7670 | pr_err("[%u/%u] index=%lx last=%lx\n", mas->offset, mas->end, |
7671 | mas->index, mas->last); | |
b314e215 | 7672 | pr_err(" min=%lx max=%lx alloc=" PTR_FMT ", depth=%u, flags=%x\n", |
f0a1f866 LH |
7673 | mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags); |
7674 | if (mas->index > mas->last) | |
7675 | pr_err("Check index & last\n"); | |
7676 | } | |
7677 | EXPORT_SYMBOL_GPL(mas_dump); | |
7678 | ||
7679 | void mas_wr_dump(const struct ma_wr_state *wr_mas) | |
7680 | { | |
b314e215 | 7681 | pr_err("WR_MAS: node=" PTR_FMT " r_min=%lx r_max=%lx\n", |
f0a1f866 LH |
7682 | wr_mas->node, wr_mas->r_min, wr_mas->r_max); |
7683 | pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n", | |
0de56e38 | 7684 | wr_mas->type, wr_mas->offset_end, wr_mas->mas->end, |
f0a1f866 LH |
7685 | wr_mas->end_piv); |
7686 | } | |
7687 | EXPORT_SYMBOL_GPL(mas_wr_dump); | |
7688 | ||
54a611b6 | 7689 | #endif /* CONFIG_DEBUG_MAPLE_TREE */ |