1 // SPDX-License-Identifier: GPL-2.0+
3 * Maple Tree implementation
4 * Copyright (c) 2018-2022 Oracle Corporation
5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
6 * Matthew Wilcox <willy@infradead.org>
7 * Copyright (c) 2023 ByteDance
8 * Author: Peng Zhang <zhangpeng.00@bytedance.com>
12 * DOC: Interesting implementation details of the Maple Tree
14 * Each node type has a number of slots for entries and a number of slots for
15 * pivots. In the case of dense nodes, the pivots are implied by the position
16 * and are simply the slot index + the minimum of the node.
18 * In regular B-Tree terms, pivots are called keys. The term pivot is used to
19 * indicate that the tree is specifying ranges. Pivots may appear in the
20 * subtree with an entry attached to the value whereas keys are unique to a
21 * specific position of a B-tree. Pivot values are inclusive of the slot with
25 * The following illustrates the layout of a range64 nodes slots and pivots.
28 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
30 * │ │ │ │ │ │ │ │ └─ Implied maximum
31 * │ │ │ │ │ │ │ └─ Pivot 14
32 * │ │ │ │ │ │ └─ Pivot 13
33 * │ │ │ │ │ └─ Pivot 12
41 * Internal (non-leaf) nodes contain pointers to other nodes.
42 * Leaf nodes contain entries.
44 * The location of interest is often referred to as an offset. All offsets have
45 * a slot, but the last offset has an implied pivot from the node above (or
46 * UINT_MAX for the root node.
48 * Ranges complicate certain write activities. When modifying any of
49 * the B-tree variants, it is known that one entry will either be added or
50 * deleted. When modifying the Maple Tree, one store operation may overwrite
51 * the entire data set, or one half of the tree, or the middle half of the tree.
56 #include <linux/maple_tree.h>
57 #include <linux/xarray.h>
58 #include <linux/types.h>
59 #include <linux/export.h>
60 #include <linux/slab.h>
61 #include <linux/limits.h>
62 #include <asm/barrier.h>
64 #define CREATE_TRACE_POINTS
65 #include <trace/events/maple_tree.h>
67 #define MA_ROOT_PARENT 1
71 * * MA_STATE_BULK - Bulk insert mode
72 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert
73 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
75 #define MA_STATE_BULK 1
76 #define MA_STATE_REBALANCE 2
77 #define MA_STATE_PREALLOC 4
79 #define ma_parent_ptr(x) ((struct maple_pnode *)(x))
80 #define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT)
81 #define ma_mnode_ptr(x) ((struct maple_node *)(x))
82 #define ma_enode_ptr(x) ((struct maple_enode *)(x))
83 static struct kmem_cache
*maple_node_cache
;
85 #ifdef CONFIG_DEBUG_MAPLE_TREE
86 static const unsigned long mt_max
[] = {
87 [maple_dense
] = MAPLE_NODE_SLOTS
,
88 [maple_leaf_64
] = ULONG_MAX
,
89 [maple_range_64
] = ULONG_MAX
,
90 [maple_arange_64
] = ULONG_MAX
,
92 #define mt_node_max(x) mt_max[mte_node_type(x)]
95 static const unsigned char mt_slots
[] = {
96 [maple_dense
] = MAPLE_NODE_SLOTS
,
97 [maple_leaf_64
] = MAPLE_RANGE64_SLOTS
,
98 [maple_range_64
] = MAPLE_RANGE64_SLOTS
,
99 [maple_arange_64
] = MAPLE_ARANGE64_SLOTS
,
101 #define mt_slot_count(x) mt_slots[mte_node_type(x)]
103 static const unsigned char mt_pivots
[] = {
105 [maple_leaf_64
] = MAPLE_RANGE64_SLOTS
- 1,
106 [maple_range_64
] = MAPLE_RANGE64_SLOTS
- 1,
107 [maple_arange_64
] = MAPLE_ARANGE64_SLOTS
- 1,
109 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
111 static const unsigned char mt_min_slots
[] = {
112 [maple_dense
] = MAPLE_NODE_SLOTS
/ 2,
113 [maple_leaf_64
] = (MAPLE_RANGE64_SLOTS
/ 2) - 2,
114 [maple_range_64
] = (MAPLE_RANGE64_SLOTS
/ 2) - 2,
115 [maple_arange_64
] = (MAPLE_ARANGE64_SLOTS
/ 2) - 1,
117 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
119 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2)
120 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1)
122 struct maple_big_node
{
123 struct maple_pnode
*parent
;
124 unsigned long pivot
[MAPLE_BIG_NODE_SLOTS
- 1];
126 struct maple_enode
*slot
[MAPLE_BIG_NODE_SLOTS
];
128 unsigned long padding
[MAPLE_BIG_NODE_GAPS
];
129 unsigned long gap
[MAPLE_BIG_NODE_GAPS
];
133 enum maple_type type
;
137 * The maple_subtree_state is used to build a tree to replace a segment of an
138 * existing tree in a more atomic way. Any walkers of the older tree will hit a
139 * dead node and restart on updates.
141 struct maple_subtree_state
{
142 struct ma_state
*orig_l
; /* Original left side of subtree */
143 struct ma_state
*orig_r
; /* Original right side of subtree */
144 struct ma_state
*l
; /* New left side of subtree */
145 struct ma_state
*m
; /* New middle of subtree (rare) */
146 struct ma_state
*r
; /* New right side of subtree */
147 struct ma_topiary
*free
; /* nodes to be freed */
148 struct ma_topiary
*destroy
; /* Nodes to be destroyed (walked and freed) */
149 struct maple_big_node
*bn
;
152 #ifdef CONFIG_KASAN_STACK
153 /* Prevent mas_wr_bnode() from exceeding the stack frame limit */
154 #define noinline_for_kasan noinline_for_stack
156 #define noinline_for_kasan inline
160 static inline struct maple_node
*mt_alloc_one(gfp_t gfp
)
162 return kmem_cache_alloc(maple_node_cache
, gfp
);
165 static inline int mt_alloc_bulk(gfp_t gfp
, size_t size
, void **nodes
)
167 return kmem_cache_alloc_bulk(maple_node_cache
, gfp
, size
, nodes
);
170 static inline void mt_free_one(struct maple_node
*node
)
172 kmem_cache_free(maple_node_cache
, node
);
175 static inline void mt_free_bulk(size_t size
, void __rcu
**nodes
)
177 kmem_cache_free_bulk(maple_node_cache
, size
, (void **)nodes
);
180 static void mt_free_rcu(struct rcu_head
*head
)
182 struct maple_node
*node
= container_of(head
, struct maple_node
, rcu
);
184 kmem_cache_free(maple_node_cache
, node
);
188 * ma_free_rcu() - Use rcu callback to free a maple node
189 * @node: The node to free
191 * The maple tree uses the parent pointer to indicate this node is no longer in
192 * use and will be freed.
194 static void ma_free_rcu(struct maple_node
*node
)
196 WARN_ON(node
->parent
!= ma_parent_ptr(node
));
197 call_rcu(&node
->rcu
, mt_free_rcu
);
200 static void mas_set_height(struct ma_state
*mas
)
202 unsigned int new_flags
= mas
->tree
->ma_flags
;
204 new_flags
&= ~MT_FLAGS_HEIGHT_MASK
;
205 MAS_BUG_ON(mas
, mas
->depth
> MAPLE_HEIGHT_MAX
);
206 new_flags
|= mas
->depth
<< MT_FLAGS_HEIGHT_OFFSET
;
207 mas
->tree
->ma_flags
= new_flags
;
210 static unsigned int mas_mt_height(struct ma_state
*mas
)
212 return mt_height(mas
->tree
);
215 static inline unsigned int mt_attr(struct maple_tree
*mt
)
217 return mt
->ma_flags
& ~MT_FLAGS_HEIGHT_MASK
;
220 static __always_inline
enum maple_type
mte_node_type(
221 const struct maple_enode
*entry
)
223 return ((unsigned long)entry
>> MAPLE_NODE_TYPE_SHIFT
) &
224 MAPLE_NODE_TYPE_MASK
;
227 static __always_inline
bool ma_is_dense(const enum maple_type type
)
229 return type
< maple_leaf_64
;
232 static __always_inline
bool ma_is_leaf(const enum maple_type type
)
234 return type
< maple_range_64
;
237 static __always_inline
bool mte_is_leaf(const struct maple_enode
*entry
)
239 return ma_is_leaf(mte_node_type(entry
));
243 * We also reserve values with the bottom two bits set to '10' which are
246 static __always_inline
bool mt_is_reserved(const void *entry
)
248 return ((unsigned long)entry
< MAPLE_RESERVED_RANGE
) &&
249 xa_is_internal(entry
);
252 static __always_inline
void mas_set_err(struct ma_state
*mas
, long err
)
254 mas
->node
= MA_ERROR(err
);
255 mas
->status
= ma_error
;
258 static __always_inline
bool mas_is_ptr(const struct ma_state
*mas
)
260 return mas
->status
== ma_root
;
263 static __always_inline
bool mas_is_start(const struct ma_state
*mas
)
265 return mas
->status
== ma_start
;
268 static __always_inline
bool mas_is_none(const struct ma_state
*mas
)
270 return mas
->status
== ma_none
;
273 static __always_inline
bool mas_is_paused(const struct ma_state
*mas
)
275 return mas
->status
== ma_pause
;
278 static __always_inline
bool mas_is_overflow(struct ma_state
*mas
)
280 return mas
->status
== ma_overflow
;
283 static inline bool mas_is_underflow(struct ma_state
*mas
)
285 return mas
->status
== ma_underflow
;
288 static __always_inline
struct maple_node
*mte_to_node(
289 const struct maple_enode
*entry
)
291 return (struct maple_node
*)((unsigned long)entry
& ~MAPLE_NODE_MASK
);
295 * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
296 * @entry: The maple encoded node
298 * Return: a maple topiary pointer
300 static inline struct maple_topiary
*mte_to_mat(const struct maple_enode
*entry
)
302 return (struct maple_topiary
*)
303 ((unsigned long)entry
& ~MAPLE_NODE_MASK
);
307 * mas_mn() - Get the maple state node.
308 * @mas: The maple state
310 * Return: the maple node (not encoded - bare pointer).
312 static inline struct maple_node
*mas_mn(const struct ma_state
*mas
)
314 return mte_to_node(mas
->node
);
318 * mte_set_node_dead() - Set a maple encoded node as dead.
319 * @mn: The maple encoded node.
321 static inline void mte_set_node_dead(struct maple_enode
*mn
)
323 mte_to_node(mn
)->parent
= ma_parent_ptr(mte_to_node(mn
));
324 smp_wmb(); /* Needed for RCU */
327 /* Bit 1 indicates the root is a node */
328 #define MAPLE_ROOT_NODE 0x02
329 /* maple_type stored bit 3-6 */
330 #define MAPLE_ENODE_TYPE_SHIFT 0x03
331 /* Bit 2 means a NULL somewhere below */
332 #define MAPLE_ENODE_NULL 0x04
334 static inline struct maple_enode
*mt_mk_node(const struct maple_node
*node
,
335 enum maple_type type
)
337 return (void *)((unsigned long)node
|
338 (type
<< MAPLE_ENODE_TYPE_SHIFT
) | MAPLE_ENODE_NULL
);
341 static inline void *mte_mk_root(const struct maple_enode
*node
)
343 return (void *)((unsigned long)node
| MAPLE_ROOT_NODE
);
346 static inline void *mte_safe_root(const struct maple_enode
*node
)
348 return (void *)((unsigned long)node
& ~MAPLE_ROOT_NODE
);
351 static inline void *mte_set_full(const struct maple_enode
*node
)
353 return (void *)((unsigned long)node
& ~MAPLE_ENODE_NULL
);
356 static inline void *mte_clear_full(const struct maple_enode
*node
)
358 return (void *)((unsigned long)node
| MAPLE_ENODE_NULL
);
361 static inline bool mte_has_null(const struct maple_enode
*node
)
363 return (unsigned long)node
& MAPLE_ENODE_NULL
;
366 static __always_inline
bool ma_is_root(struct maple_node
*node
)
368 return ((unsigned long)node
->parent
& MA_ROOT_PARENT
);
371 static __always_inline
bool mte_is_root(const struct maple_enode
*node
)
373 return ma_is_root(mte_to_node(node
));
376 static inline bool mas_is_root_limits(const struct ma_state
*mas
)
378 return !mas
->min
&& mas
->max
== ULONG_MAX
;
381 static __always_inline
bool mt_is_alloc(struct maple_tree
*mt
)
383 return (mt
->ma_flags
& MT_FLAGS_ALLOC_RANGE
);
388 * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
389 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16
390 * bit values need an extra bit to store the offset. This extra bit comes from
391 * a reuse of the last bit in the node type. This is possible by using bit 1 to
392 * indicate if bit 2 is part of the type or the slot.
396 * 0x?00 = 16 bit nodes
397 * 0x010 = 32 bit nodes
398 * 0x110 = 64 bit nodes
400 * Slot size and alignment
402 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7
403 * 0b010 : 32 bit values, type in 0-2, slot in 3-7
404 * 0b110 : 64 bit values, type in 0-2, slot in 3-7
407 #define MAPLE_PARENT_ROOT 0x01
409 #define MAPLE_PARENT_SLOT_SHIFT 0x03
410 #define MAPLE_PARENT_SLOT_MASK 0xF8
412 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02
413 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC
415 #define MAPLE_PARENT_RANGE64 0x06
416 #define MAPLE_PARENT_RANGE32 0x04
417 #define MAPLE_PARENT_NOT_RANGE16 0x02
420 * mte_parent_shift() - Get the parent shift for the slot storage.
421 * @parent: The parent pointer cast as an unsigned long
422 * Return: The shift into that pointer to the star to of the slot
424 static inline unsigned long mte_parent_shift(unsigned long parent
)
426 /* Note bit 1 == 0 means 16B */
427 if (likely(parent
& MAPLE_PARENT_NOT_RANGE16
))
428 return MAPLE_PARENT_SLOT_SHIFT
;
430 return MAPLE_PARENT_16B_SLOT_SHIFT
;
434 * mte_parent_slot_mask() - Get the slot mask for the parent.
435 * @parent: The parent pointer cast as an unsigned long.
436 * Return: The slot mask for that parent.
438 static inline unsigned long mte_parent_slot_mask(unsigned long parent
)
440 /* Note bit 1 == 0 means 16B */
441 if (likely(parent
& MAPLE_PARENT_NOT_RANGE16
))
442 return MAPLE_PARENT_SLOT_MASK
;
444 return MAPLE_PARENT_16B_SLOT_MASK
;
448 * mas_parent_type() - Return the maple_type of the parent from the stored
450 * @mas: The maple state
451 * @enode: The maple_enode to extract the parent's enum
452 * Return: The node->parent maple_type
455 enum maple_type
mas_parent_type(struct ma_state
*mas
, struct maple_enode
*enode
)
457 unsigned long p_type
;
459 p_type
= (unsigned long)mte_to_node(enode
)->parent
;
460 if (WARN_ON(p_type
& MAPLE_PARENT_ROOT
))
463 p_type
&= MAPLE_NODE_MASK
;
464 p_type
&= ~mte_parent_slot_mask(p_type
);
466 case MAPLE_PARENT_RANGE64
: /* or MAPLE_PARENT_ARANGE64 */
467 if (mt_is_alloc(mas
->tree
))
468 return maple_arange_64
;
469 return maple_range_64
;
476 * mas_set_parent() - Set the parent node and encode the slot
477 * @enode: The encoded maple node.
478 * @parent: The encoded maple node that is the parent of @enode.
479 * @slot: The slot that @enode resides in @parent.
481 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
485 void mas_set_parent(struct ma_state
*mas
, struct maple_enode
*enode
,
486 const struct maple_enode
*parent
, unsigned char slot
)
488 unsigned long val
= (unsigned long)parent
;
491 enum maple_type p_type
= mte_node_type(parent
);
493 MAS_BUG_ON(mas
, p_type
== maple_dense
);
494 MAS_BUG_ON(mas
, p_type
== maple_leaf_64
);
498 case maple_arange_64
:
499 shift
= MAPLE_PARENT_SLOT_SHIFT
;
500 type
= MAPLE_PARENT_RANGE64
;
509 val
&= ~MAPLE_NODE_MASK
; /* Clear all node metadata in parent */
510 val
|= (slot
<< shift
) | type
;
511 mte_to_node(enode
)->parent
= ma_parent_ptr(val
);
515 * mte_parent_slot() - get the parent slot of @enode.
516 * @enode: The encoded maple node.
518 * Return: The slot in the parent node where @enode resides.
520 static __always_inline
521 unsigned int mte_parent_slot(const struct maple_enode
*enode
)
523 unsigned long val
= (unsigned long)mte_to_node(enode
)->parent
;
525 if (unlikely(val
& MA_ROOT_PARENT
))
529 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
530 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
532 return (val
& MAPLE_PARENT_16B_SLOT_MASK
) >> mte_parent_shift(val
);
536 * mte_parent() - Get the parent of @node.
537 * @node: The encoded maple node.
539 * Return: The parent maple node.
541 static __always_inline
542 struct maple_node
*mte_parent(const struct maple_enode
*enode
)
544 return (void *)((unsigned long)
545 (mte_to_node(enode
)->parent
) & ~MAPLE_NODE_MASK
);
549 * ma_dead_node() - check if the @enode is dead.
550 * @enode: The encoded maple node
552 * Return: true if dead, false otherwise.
554 static __always_inline
bool ma_dead_node(const struct maple_node
*node
)
556 struct maple_node
*parent
;
558 /* Do not reorder reads from the node prior to the parent check */
560 parent
= (void *)((unsigned long) node
->parent
& ~MAPLE_NODE_MASK
);
561 return (parent
== node
);
565 * mte_dead_node() - check if the @enode is dead.
566 * @enode: The encoded maple node
568 * Return: true if dead, false otherwise.
570 static __always_inline
bool mte_dead_node(const struct maple_enode
*enode
)
572 struct maple_node
*parent
, *node
;
574 node
= mte_to_node(enode
);
575 /* Do not reorder reads from the node prior to the parent check */
577 parent
= mte_parent(enode
);
578 return (parent
== node
);
582 * mas_allocated() - Get the number of nodes allocated in a maple state.
583 * @mas: The maple state
585 * The ma_state alloc member is overloaded to hold a pointer to the first
586 * allocated node or to the number of requested nodes to allocate. If bit 0 is
587 * set, then the alloc contains the number of requested nodes. If there is an
588 * allocated node, then the total allocated nodes is in that node.
590 * Return: The total number of nodes allocated
592 static inline unsigned long mas_allocated(const struct ma_state
*mas
)
594 if (!mas
->alloc
|| ((unsigned long)mas
->alloc
& 0x1))
597 return mas
->alloc
->total
;
601 * mas_set_alloc_req() - Set the requested number of allocations.
602 * @mas: the maple state
603 * @count: the number of allocations.
605 * The requested number of allocations is either in the first allocated node,
606 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
607 * no allocated node. Set the request either in the node or do the necessary
608 * encoding to store in @mas->alloc directly.
610 static inline void mas_set_alloc_req(struct ma_state
*mas
, unsigned long count
)
612 if (!mas
->alloc
|| ((unsigned long)mas
->alloc
& 0x1)) {
616 mas
->alloc
= (struct maple_alloc
*)(((count
) << 1U) | 1U);
620 mas
->alloc
->request_count
= count
;
624 * mas_alloc_req() - get the requested number of allocations.
625 * @mas: The maple state
627 * The alloc count is either stored directly in @mas, or in
628 * @mas->alloc->request_count if there is at least one node allocated. Decode
629 * the request count if it's stored directly in @mas->alloc.
631 * Return: The allocation request count.
633 static inline unsigned int mas_alloc_req(const struct ma_state
*mas
)
635 if ((unsigned long)mas
->alloc
& 0x1)
636 return (unsigned long)(mas
->alloc
) >> 1;
638 return mas
->alloc
->request_count
;
643 * ma_pivots() - Get a pointer to the maple node pivots.
644 * @node - the maple node
645 * @type - the node type
647 * In the event of a dead node, this array may be %NULL
649 * Return: A pointer to the maple node pivots
651 static inline unsigned long *ma_pivots(struct maple_node
*node
,
652 enum maple_type type
)
655 case maple_arange_64
:
656 return node
->ma64
.pivot
;
659 return node
->mr64
.pivot
;
667 * ma_gaps() - Get a pointer to the maple node gaps.
668 * @node - the maple node
669 * @type - the node type
671 * Return: A pointer to the maple node gaps
673 static inline unsigned long *ma_gaps(struct maple_node
*node
,
674 enum maple_type type
)
677 case maple_arange_64
:
678 return node
->ma64
.gap
;
688 * mas_safe_pivot() - get the pivot at @piv or mas->max.
689 * @mas: The maple state
690 * @pivots: The pointer to the maple node pivots
691 * @piv: The pivot to fetch
692 * @type: The maple node type
694 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
697 static __always_inline
unsigned long
698 mas_safe_pivot(const struct ma_state
*mas
, unsigned long *pivots
,
699 unsigned char piv
, enum maple_type type
)
701 if (piv
>= mt_pivots
[type
])
708 * mas_safe_min() - Return the minimum for a given offset.
709 * @mas: The maple state
710 * @pivots: The pointer to the maple node pivots
711 * @offset: The offset into the pivot array
713 * Return: The minimum range value that is contained in @offset.
715 static inline unsigned long
716 mas_safe_min(struct ma_state
*mas
, unsigned long *pivots
, unsigned char offset
)
719 return pivots
[offset
- 1] + 1;
725 * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
726 * @mn: The encoded maple node
727 * @piv: The pivot offset
728 * @val: The value of the pivot
730 static inline void mte_set_pivot(struct maple_enode
*mn
, unsigned char piv
,
733 struct maple_node
*node
= mte_to_node(mn
);
734 enum maple_type type
= mte_node_type(mn
);
736 BUG_ON(piv
>= mt_pivots
[type
]);
740 node
->mr64
.pivot
[piv
] = val
;
742 case maple_arange_64
:
743 node
->ma64
.pivot
[piv
] = val
;
752 * ma_slots() - Get a pointer to the maple node slots.
753 * @mn: The maple node
754 * @mt: The maple node type
756 * Return: A pointer to the maple node slots
758 static inline void __rcu
**ma_slots(struct maple_node
*mn
, enum maple_type mt
)
761 case maple_arange_64
:
762 return mn
->ma64
.slot
;
765 return mn
->mr64
.slot
;
773 static inline bool mt_write_locked(const struct maple_tree
*mt
)
775 return mt_external_lock(mt
) ? mt_write_lock_is_held(mt
) :
776 lockdep_is_held(&mt
->ma_lock
);
779 static __always_inline
bool mt_locked(const struct maple_tree
*mt
)
781 return mt_external_lock(mt
) ? mt_lock_is_held(mt
) :
782 lockdep_is_held(&mt
->ma_lock
);
785 static __always_inline
void *mt_slot(const struct maple_tree
*mt
,
786 void __rcu
**slots
, unsigned char offset
)
788 return rcu_dereference_check(slots
[offset
], mt_locked(mt
));
791 static __always_inline
void *mt_slot_locked(struct maple_tree
*mt
,
792 void __rcu
**slots
, unsigned char offset
)
794 return rcu_dereference_protected(slots
[offset
], mt_write_locked(mt
));
797 * mas_slot_locked() - Get the slot value when holding the maple tree lock.
798 * @mas: The maple state
799 * @slots: The pointer to the slots
800 * @offset: The offset into the slots array to fetch
802 * Return: The entry stored in @slots at the @offset.
804 static __always_inline
void *mas_slot_locked(struct ma_state
*mas
,
805 void __rcu
**slots
, unsigned char offset
)
807 return mt_slot_locked(mas
->tree
, slots
, offset
);
811 * mas_slot() - Get the slot value when not holding the maple tree lock.
812 * @mas: The maple state
813 * @slots: The pointer to the slots
814 * @offset: The offset into the slots array to fetch
816 * Return: The entry stored in @slots at the @offset
818 static __always_inline
void *mas_slot(struct ma_state
*mas
, void __rcu
**slots
,
819 unsigned char offset
)
821 return mt_slot(mas
->tree
, slots
, offset
);
825 * mas_root() - Get the maple tree root.
826 * @mas: The maple state.
828 * Return: The pointer to the root of the tree
830 static __always_inline
void *mas_root(struct ma_state
*mas
)
832 return rcu_dereference_check(mas
->tree
->ma_root
, mt_locked(mas
->tree
));
835 static inline void *mt_root_locked(struct maple_tree
*mt
)
837 return rcu_dereference_protected(mt
->ma_root
, mt_write_locked(mt
));
841 * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
842 * @mas: The maple state.
844 * Return: The pointer to the root of the tree
846 static inline void *mas_root_locked(struct ma_state
*mas
)
848 return mt_root_locked(mas
->tree
);
851 static inline struct maple_metadata
*ma_meta(struct maple_node
*mn
,
855 case maple_arange_64
:
856 return &mn
->ma64
.meta
;
858 return &mn
->mr64
.meta
;
863 * ma_set_meta() - Set the metadata information of a node.
864 * @mn: The maple node
865 * @mt: The maple node type
866 * @offset: The offset of the highest sub-gap in this node.
867 * @end: The end of the data in this node.
869 static inline void ma_set_meta(struct maple_node
*mn
, enum maple_type mt
,
870 unsigned char offset
, unsigned char end
)
872 struct maple_metadata
*meta
= ma_meta(mn
, mt
);
879 * mt_clear_meta() - clear the metadata information of a node, if it exists
880 * @mt: The maple tree
881 * @mn: The maple node
882 * @type: The maple node type
883 * @offset: The offset of the highest sub-gap in this node.
884 * @end: The end of the data in this node.
886 static inline void mt_clear_meta(struct maple_tree
*mt
, struct maple_node
*mn
,
887 enum maple_type type
)
889 struct maple_metadata
*meta
;
890 unsigned long *pivots
;
896 pivots
= mn
->mr64
.pivot
;
897 if (unlikely(pivots
[MAPLE_RANGE64_SLOTS
- 2])) {
898 slots
= mn
->mr64
.slot
;
899 next
= mt_slot_locked(mt
, slots
,
900 MAPLE_RANGE64_SLOTS
- 1);
901 if (unlikely((mte_to_node(next
) &&
902 mte_node_type(next
))))
903 return; /* no metadata, could be node */
906 case maple_arange_64
:
907 meta
= ma_meta(mn
, type
);
918 * ma_meta_end() - Get the data end of a node from the metadata
919 * @mn: The maple node
920 * @mt: The maple node type
922 static inline unsigned char ma_meta_end(struct maple_node
*mn
,
925 struct maple_metadata
*meta
= ma_meta(mn
, mt
);
931 * ma_meta_gap() - Get the largest gap location of a node from the metadata
932 * @mn: The maple node
934 static inline unsigned char ma_meta_gap(struct maple_node
*mn
)
936 return mn
->ma64
.meta
.gap
;
940 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
941 * @mn: The maple node
942 * @mn: The maple node type
943 * @offset: The location of the largest gap.
945 static inline void ma_set_meta_gap(struct maple_node
*mn
, enum maple_type mt
,
946 unsigned char offset
)
949 struct maple_metadata
*meta
= ma_meta(mn
, mt
);
955 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
956 * @mat - the ma_topiary, a linked list of dead nodes.
957 * @dead_enode - the node to be marked as dead and added to the tail of the list
959 * Add the @dead_enode to the linked list in @mat.
961 static inline void mat_add(struct ma_topiary
*mat
,
962 struct maple_enode
*dead_enode
)
964 mte_set_node_dead(dead_enode
);
965 mte_to_mat(dead_enode
)->next
= NULL
;
967 mat
->tail
= mat
->head
= dead_enode
;
971 mte_to_mat(mat
->tail
)->next
= dead_enode
;
972 mat
->tail
= dead_enode
;
975 static void mt_free_walk(struct rcu_head
*head
);
976 static void mt_destroy_walk(struct maple_enode
*enode
, struct maple_tree
*mt
,
979 * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
980 * @mas - the maple state
981 * @mat - the ma_topiary linked list of dead nodes to free.
983 * Destroy walk a dead list.
985 static void mas_mat_destroy(struct ma_state
*mas
, struct ma_topiary
*mat
)
987 struct maple_enode
*next
;
988 struct maple_node
*node
;
989 bool in_rcu
= mt_in_rcu(mas
->tree
);
992 next
= mte_to_mat(mat
->head
)->next
;
993 node
= mte_to_node(mat
->head
);
994 mt_destroy_walk(mat
->head
, mas
->tree
, !in_rcu
);
996 call_rcu(&node
->rcu
, mt_free_walk
);
1001 * mas_descend() - Descend into the slot stored in the ma_state.
1002 * @mas - the maple state.
1004 * Note: Not RCU safe, only use in write side or debug code.
1006 static inline void mas_descend(struct ma_state
*mas
)
1008 enum maple_type type
;
1009 unsigned long *pivots
;
1010 struct maple_node
*node
;
1014 type
= mte_node_type(mas
->node
);
1015 pivots
= ma_pivots(node
, type
);
1016 slots
= ma_slots(node
, type
);
1019 mas
->min
= pivots
[mas
->offset
- 1] + 1;
1020 mas
->max
= mas_safe_pivot(mas
, pivots
, mas
->offset
, type
);
1021 mas
->node
= mas_slot(mas
, slots
, mas
->offset
);
1025 * mte_set_gap() - Set a maple node gap.
1026 * @mn: The encoded maple node
1027 * @gap: The offset of the gap to set
1028 * @val: The gap value
1030 static inline void mte_set_gap(const struct maple_enode
*mn
,
1031 unsigned char gap
, unsigned long val
)
1033 switch (mte_node_type(mn
)) {
1036 case maple_arange_64
:
1037 mte_to_node(mn
)->ma64
.gap
[gap
] = val
;
1043 * mas_ascend() - Walk up a level of the tree.
1044 * @mas: The maple state
1046 * Sets the @mas->max and @mas->min to the correct values when walking up. This
1047 * may cause several levels of walking up to find the correct min and max.
1048 * May find a dead node which will cause a premature return.
1049 * Return: 1 on dead node, 0 otherwise
1051 static int mas_ascend(struct ma_state
*mas
)
1053 struct maple_enode
*p_enode
; /* parent enode. */
1054 struct maple_enode
*a_enode
; /* ancestor enode. */
1055 struct maple_node
*a_node
; /* ancestor node. */
1056 struct maple_node
*p_node
; /* parent node. */
1057 unsigned char a_slot
;
1058 enum maple_type a_type
;
1059 unsigned long min
, max
;
1060 unsigned long *pivots
;
1061 bool set_max
= false, set_min
= false;
1063 a_node
= mas_mn(mas
);
1064 if (ma_is_root(a_node
)) {
1069 p_node
= mte_parent(mas
->node
);
1070 if (unlikely(a_node
== p_node
))
1073 a_type
= mas_parent_type(mas
, mas
->node
);
1074 mas
->offset
= mte_parent_slot(mas
->node
);
1075 a_enode
= mt_mk_node(p_node
, a_type
);
1077 /* Check to make sure all parent information is still accurate */
1078 if (p_node
!= mte_parent(mas
->node
))
1081 mas
->node
= a_enode
;
1083 if (mte_is_root(a_enode
)) {
1084 mas
->max
= ULONG_MAX
;
1096 if (mas
->max
== ULONG_MAX
)
1101 a_type
= mas_parent_type(mas
, p_enode
);
1102 a_node
= mte_parent(p_enode
);
1103 a_slot
= mte_parent_slot(p_enode
);
1104 a_enode
= mt_mk_node(a_node
, a_type
);
1105 pivots
= ma_pivots(a_node
, a_type
);
1107 if (unlikely(ma_dead_node(a_node
)))
1110 if (!set_min
&& a_slot
) {
1112 min
= pivots
[a_slot
- 1] + 1;
1115 if (!set_max
&& a_slot
< mt_pivots
[a_type
]) {
1117 max
= pivots
[a_slot
];
1120 if (unlikely(ma_dead_node(a_node
)))
1123 if (unlikely(ma_is_root(a_node
)))
1126 } while (!set_min
|| !set_max
);
1134 * mas_pop_node() - Get a previously allocated maple node from the maple state.
1135 * @mas: The maple state
1137 * Return: A pointer to a maple node.
1139 static inline struct maple_node
*mas_pop_node(struct ma_state
*mas
)
1141 struct maple_alloc
*ret
, *node
= mas
->alloc
;
1142 unsigned long total
= mas_allocated(mas
);
1143 unsigned int req
= mas_alloc_req(mas
);
1145 /* nothing or a request pending. */
1146 if (WARN_ON(!total
))
1150 /* single allocation in this ma_state */
1156 if (node
->node_count
== 1) {
1157 /* Single allocation in this node. */
1158 mas
->alloc
= node
->slot
[0];
1159 mas
->alloc
->total
= node
->total
- 1;
1164 ret
= node
->slot
[--node
->node_count
];
1165 node
->slot
[node
->node_count
] = NULL
;
1171 mas_set_alloc_req(mas
, req
);
1174 memset(ret
, 0, sizeof(*ret
));
1175 return (struct maple_node
*)ret
;
1179 * mas_push_node() - Push a node back on the maple state allocation.
1180 * @mas: The maple state
1181 * @used: The used maple node
1183 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and
1184 * requested node count as necessary.
1186 static inline void mas_push_node(struct ma_state
*mas
, struct maple_node
*used
)
1188 struct maple_alloc
*reuse
= (struct maple_alloc
*)used
;
1189 struct maple_alloc
*head
= mas
->alloc
;
1190 unsigned long count
;
1191 unsigned int requested
= mas_alloc_req(mas
);
1193 count
= mas_allocated(mas
);
1195 reuse
->request_count
= 0;
1196 reuse
->node_count
= 0;
1197 if (count
&& (head
->node_count
< MAPLE_ALLOC_SLOTS
)) {
1198 head
->slot
[head
->node_count
++] = reuse
;
1204 if ((head
) && !((unsigned long)head
& 0x1)) {
1205 reuse
->slot
[0] = head
;
1206 reuse
->node_count
= 1;
1207 reuse
->total
+= head
->total
;
1213 mas_set_alloc_req(mas
, requested
- 1);
1217 * mas_alloc_nodes() - Allocate nodes into a maple state
1218 * @mas: The maple state
1219 * @gfp: The GFP Flags
1221 static inline void mas_alloc_nodes(struct ma_state
*mas
, gfp_t gfp
)
1223 struct maple_alloc
*node
;
1224 unsigned long allocated
= mas_allocated(mas
);
1225 unsigned int requested
= mas_alloc_req(mas
);
1227 void **slots
= NULL
;
1228 unsigned int max_req
= 0;
1233 mas_set_alloc_req(mas
, 0);
1234 if (mas
->mas_flags
& MA_STATE_PREALLOC
) {
1238 WARN_ON(!allocated
);
1241 if (!allocated
|| mas
->alloc
->node_count
== MAPLE_ALLOC_SLOTS
) {
1242 node
= (struct maple_alloc
*)mt_alloc_one(gfp
);
1247 node
->slot
[0] = mas
->alloc
;
1248 node
->node_count
= 1;
1250 node
->node_count
= 0;
1254 node
->total
= ++allocated
;
1259 node
->request_count
= 0;
1261 max_req
= MAPLE_ALLOC_SLOTS
- node
->node_count
;
1262 slots
= (void **)&node
->slot
[node
->node_count
];
1263 max_req
= min(requested
, max_req
);
1264 count
= mt_alloc_bulk(gfp
, max_req
, slots
);
1268 if (node
->node_count
== 0) {
1269 node
->slot
[0]->node_count
= 0;
1270 node
->slot
[0]->request_count
= 0;
1273 node
->node_count
+= count
;
1275 node
= node
->slot
[0];
1278 mas
->alloc
->total
= allocated
;
1282 /* Clean up potential freed allocations on bulk failure */
1283 memset(slots
, 0, max_req
* sizeof(unsigned long));
1285 mas_set_alloc_req(mas
, requested
);
1286 if (mas
->alloc
&& !(((unsigned long)mas
->alloc
& 0x1)))
1287 mas
->alloc
->total
= allocated
;
1288 mas_set_err(mas
, -ENOMEM
);
1292 * mas_free() - Free an encoded maple node
1293 * @mas: The maple state
1294 * @used: The encoded maple node to free.
1296 * Uses rcu free if necessary, pushes @used back on the maple state allocations
1299 static inline void mas_free(struct ma_state
*mas
, struct maple_enode
*used
)
1301 struct maple_node
*tmp
= mte_to_node(used
);
1303 if (mt_in_rcu(mas
->tree
))
1306 mas_push_node(mas
, tmp
);
1310 * mas_node_count_gfp() - Check if enough nodes are allocated and request more
1311 * if there is not enough nodes.
1312 * @mas: The maple state
1313 * @count: The number of nodes needed
1314 * @gfp: the gfp flags
1316 static void mas_node_count_gfp(struct ma_state
*mas
, int count
, gfp_t gfp
)
1318 unsigned long allocated
= mas_allocated(mas
);
1320 if (allocated
< count
) {
1321 mas_set_alloc_req(mas
, count
- allocated
);
1322 mas_alloc_nodes(mas
, gfp
);
1327 * mas_node_count() - Check if enough nodes are allocated and request more if
1328 * there is not enough nodes.
1329 * @mas: The maple state
1330 * @count: The number of nodes needed
1332 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1334 static void mas_node_count(struct ma_state
*mas
, int count
)
1336 return mas_node_count_gfp(mas
, count
, GFP_NOWAIT
| __GFP_NOWARN
);
1340 * mas_start() - Sets up maple state for operations.
1341 * @mas: The maple state.
1343 * If mas->status == mas_start, then set the min, max and depth to
1347 * - If mas->node is an error or not mas_start, return NULL.
1348 * - If it's an empty tree: NULL & mas->status == ma_none
1349 * - If it's a single entry: The entry & mas->status == mas_root
1350 * - If it's a tree: NULL & mas->status == safe root node.
1352 static inline struct maple_enode
*mas_start(struct ma_state
*mas
)
1354 if (likely(mas_is_start(mas
))) {
1355 struct maple_enode
*root
;
1358 mas
->max
= ULONG_MAX
;
1362 root
= mas_root(mas
);
1363 /* Tree with nodes */
1364 if (likely(xa_is_node(root
))) {
1366 mas
->status
= ma_active
;
1367 mas
->node
= mte_safe_root(root
);
1369 if (mte_dead_node(mas
->node
))
1376 if (unlikely(!root
)) {
1378 mas
->status
= ma_none
;
1379 mas
->offset
= MAPLE_NODE_SLOTS
;
1383 /* Single entry tree */
1384 mas
->status
= ma_root
;
1385 mas
->offset
= MAPLE_NODE_SLOTS
;
1387 /* Single entry tree. */
1398 * ma_data_end() - Find the end of the data in a node.
1399 * @node: The maple node
1400 * @type: The maple node type
1401 * @pivots: The array of pivots in the node
1402 * @max: The maximum value in the node
1404 * Uses metadata to find the end of the data when possible.
1405 * Return: The zero indexed last slot with data (may be null).
1407 static __always_inline
unsigned char ma_data_end(struct maple_node
*node
,
1408 enum maple_type type
, unsigned long *pivots
, unsigned long max
)
1410 unsigned char offset
;
1415 if (type
== maple_arange_64
)
1416 return ma_meta_end(node
, type
);
1418 offset
= mt_pivots
[type
] - 1;
1419 if (likely(!pivots
[offset
]))
1420 return ma_meta_end(node
, type
);
1422 if (likely(pivots
[offset
] == max
))
1425 return mt_pivots
[type
];
1429 * mas_data_end() - Find the end of the data (slot).
1430 * @mas: the maple state
1432 * This method is optimized to check the metadata of a node if the node type
1433 * supports data end metadata.
1435 * Return: The zero indexed last slot with data (may be null).
1437 static inline unsigned char mas_data_end(struct ma_state
*mas
)
1439 enum maple_type type
;
1440 struct maple_node
*node
;
1441 unsigned char offset
;
1442 unsigned long *pivots
;
1444 type
= mte_node_type(mas
->node
);
1446 if (type
== maple_arange_64
)
1447 return ma_meta_end(node
, type
);
1449 pivots
= ma_pivots(node
, type
);
1450 if (unlikely(ma_dead_node(node
)))
1453 offset
= mt_pivots
[type
] - 1;
1454 if (likely(!pivots
[offset
]))
1455 return ma_meta_end(node
, type
);
1457 if (likely(pivots
[offset
] == mas
->max
))
1460 return mt_pivots
[type
];
1464 * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1465 * @mas - the maple state
1467 * Return: The maximum gap in the leaf.
1469 static unsigned long mas_leaf_max_gap(struct ma_state
*mas
)
1472 unsigned long pstart
, gap
, max_gap
;
1473 struct maple_node
*mn
;
1474 unsigned long *pivots
;
1477 unsigned char max_piv
;
1479 mt
= mte_node_type(mas
->node
);
1481 slots
= ma_slots(mn
, mt
);
1483 if (unlikely(ma_is_dense(mt
))) {
1485 for (i
= 0; i
< mt_slots
[mt
]; i
++) {
1500 * Check the first implied pivot optimizes the loop below and slot 1 may
1501 * be skipped if there is a gap in slot 0.
1503 pivots
= ma_pivots(mn
, mt
);
1504 if (likely(!slots
[0])) {
1505 max_gap
= pivots
[0] - mas
->min
+ 1;
1511 /* reduce max_piv as the special case is checked before the loop */
1512 max_piv
= ma_data_end(mn
, mt
, pivots
, mas
->max
) - 1;
1514 * Check end implied pivot which can only be a gap on the right most
1517 if (unlikely(mas
->max
== ULONG_MAX
) && !slots
[max_piv
+ 1]) {
1518 gap
= ULONG_MAX
- pivots
[max_piv
];
1522 if (max_gap
> pivots
[max_piv
] - mas
->min
)
1526 for (; i
<= max_piv
; i
++) {
1527 /* data == no gap. */
1528 if (likely(slots
[i
]))
1531 pstart
= pivots
[i
- 1];
1532 gap
= pivots
[i
] - pstart
;
1536 /* There cannot be two gaps in a row. */
1543 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1544 * @node: The maple node
1545 * @gaps: The pointer to the gaps
1546 * @mt: The maple node type
1547 * @*off: Pointer to store the offset location of the gap.
1549 * Uses the metadata data end to scan backwards across set gaps.
1551 * Return: The maximum gap value
1553 static inline unsigned long
1554 ma_max_gap(struct maple_node
*node
, unsigned long *gaps
, enum maple_type mt
,
1557 unsigned char offset
, i
;
1558 unsigned long max_gap
= 0;
1560 i
= offset
= ma_meta_end(node
, mt
);
1562 if (gaps
[i
] > max_gap
) {
1573 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1574 * @mas: The maple state.
1576 * Return: The gap value.
1578 static inline unsigned long mas_max_gap(struct ma_state
*mas
)
1580 unsigned long *gaps
;
1581 unsigned char offset
;
1583 struct maple_node
*node
;
1585 mt
= mte_node_type(mas
->node
);
1587 return mas_leaf_max_gap(mas
);
1590 MAS_BUG_ON(mas
, mt
!= maple_arange_64
);
1591 offset
= ma_meta_gap(node
);
1592 gaps
= ma_gaps(node
, mt
);
1593 return gaps
[offset
];
1597 * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1598 * @mas: The maple state
1599 * @offset: The gap offset in the parent to set
1600 * @new: The new gap value.
1602 * Set the parent gap then continue to set the gap upwards, using the metadata
1603 * of the parent to see if it is necessary to check the node above.
1605 static inline void mas_parent_gap(struct ma_state
*mas
, unsigned char offset
,
1608 unsigned long meta_gap
= 0;
1609 struct maple_node
*pnode
;
1610 struct maple_enode
*penode
;
1611 unsigned long *pgaps
;
1612 unsigned char meta_offset
;
1613 enum maple_type pmt
;
1615 pnode
= mte_parent(mas
->node
);
1616 pmt
= mas_parent_type(mas
, mas
->node
);
1617 penode
= mt_mk_node(pnode
, pmt
);
1618 pgaps
= ma_gaps(pnode
, pmt
);
1621 MAS_BUG_ON(mas
, pmt
!= maple_arange_64
);
1622 meta_offset
= ma_meta_gap(pnode
);
1623 meta_gap
= pgaps
[meta_offset
];
1625 pgaps
[offset
] = new;
1627 if (meta_gap
== new)
1630 if (offset
!= meta_offset
) {
1634 ma_set_meta_gap(pnode
, pmt
, offset
);
1635 } else if (new < meta_gap
) {
1636 new = ma_max_gap(pnode
, pgaps
, pmt
, &meta_offset
);
1637 ma_set_meta_gap(pnode
, pmt
, meta_offset
);
1640 if (ma_is_root(pnode
))
1643 /* Go to the parent node. */
1644 pnode
= mte_parent(penode
);
1645 pmt
= mas_parent_type(mas
, penode
);
1646 pgaps
= ma_gaps(pnode
, pmt
);
1647 offset
= mte_parent_slot(penode
);
1648 penode
= mt_mk_node(pnode
, pmt
);
1653 * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1654 * @mas - the maple state.
1656 static inline void mas_update_gap(struct ma_state
*mas
)
1658 unsigned char pslot
;
1659 unsigned long p_gap
;
1660 unsigned long max_gap
;
1662 if (!mt_is_alloc(mas
->tree
))
1665 if (mte_is_root(mas
->node
))
1668 max_gap
= mas_max_gap(mas
);
1670 pslot
= mte_parent_slot(mas
->node
);
1671 p_gap
= ma_gaps(mte_parent(mas
->node
),
1672 mas_parent_type(mas
, mas
->node
))[pslot
];
1674 if (p_gap
!= max_gap
)
1675 mas_parent_gap(mas
, pslot
, max_gap
);
1679 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1680 * @parent with the slot encoded.
1681 * @mas - the maple state (for the tree)
1682 * @parent - the maple encoded node containing the children.
1684 static inline void mas_adopt_children(struct ma_state
*mas
,
1685 struct maple_enode
*parent
)
1687 enum maple_type type
= mte_node_type(parent
);
1688 struct maple_node
*node
= mte_to_node(parent
);
1689 void __rcu
**slots
= ma_slots(node
, type
);
1690 unsigned long *pivots
= ma_pivots(node
, type
);
1691 struct maple_enode
*child
;
1692 unsigned char offset
;
1694 offset
= ma_data_end(node
, type
, pivots
, mas
->max
);
1696 child
= mas_slot_locked(mas
, slots
, offset
);
1697 mas_set_parent(mas
, child
, parent
, offset
);
1702 * mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old
1704 * @mas - the maple state with the new node
1705 * @old_enode - The old maple encoded node to replace.
1707 static inline void mas_put_in_tree(struct ma_state
*mas
,
1708 struct maple_enode
*old_enode
)
1709 __must_hold(mas
->tree
->ma_lock
)
1711 unsigned char offset
;
1714 if (mte_is_root(mas
->node
)) {
1715 mas_mn(mas
)->parent
= ma_parent_ptr(mas_tree_parent(mas
));
1716 rcu_assign_pointer(mas
->tree
->ma_root
, mte_mk_root(mas
->node
));
1717 mas_set_height(mas
);
1720 offset
= mte_parent_slot(mas
->node
);
1721 slots
= ma_slots(mte_parent(mas
->node
),
1722 mas_parent_type(mas
, mas
->node
));
1723 rcu_assign_pointer(slots
[offset
], mas
->node
);
1726 mte_set_node_dead(old_enode
);
1730 * mas_replace_node() - Replace a node by putting it in the tree, marking it
1731 * dead, and freeing it.
1732 * the parent encoding to locate the maple node in the tree.
1733 * @mas - the ma_state with @mas->node pointing to the new node.
1734 * @old_enode - The old maple encoded node.
1736 static inline void mas_replace_node(struct ma_state
*mas
,
1737 struct maple_enode
*old_enode
)
1738 __must_hold(mas
->tree
->ma_lock
)
1740 mas_put_in_tree(mas
, old_enode
);
1741 mas_free(mas
, old_enode
);
1745 * mas_find_child() - Find a child who has the parent @mas->node.
1746 * @mas: the maple state with the parent.
1747 * @child: the maple state to store the child.
1749 static inline bool mas_find_child(struct ma_state
*mas
, struct ma_state
*child
)
1750 __must_hold(mas
->tree
->ma_lock
)
1753 unsigned char offset
;
1755 unsigned long *pivots
;
1756 struct maple_enode
*entry
;
1757 struct maple_node
*node
;
1760 mt
= mte_node_type(mas
->node
);
1762 slots
= ma_slots(node
, mt
);
1763 pivots
= ma_pivots(node
, mt
);
1764 end
= ma_data_end(node
, mt
, pivots
, mas
->max
);
1765 for (offset
= mas
->offset
; offset
<= end
; offset
++) {
1766 entry
= mas_slot_locked(mas
, slots
, offset
);
1767 if (mte_parent(entry
) == node
) {
1769 mas
->offset
= offset
+ 1;
1770 child
->offset
= offset
;
1780 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1781 * old data or set b_node->b_end.
1782 * @b_node: the maple_big_node
1783 * @shift: the shift count
1785 static inline void mab_shift_right(struct maple_big_node
*b_node
,
1786 unsigned char shift
)
1788 unsigned long size
= b_node
->b_end
* sizeof(unsigned long);
1790 memmove(b_node
->pivot
+ shift
, b_node
->pivot
, size
);
1791 memmove(b_node
->slot
+ shift
, b_node
->slot
, size
);
1792 if (b_node
->type
== maple_arange_64
)
1793 memmove(b_node
->gap
+ shift
, b_node
->gap
, size
);
1797 * mab_middle_node() - Check if a middle node is needed (unlikely)
1798 * @b_node: the maple_big_node that contains the data.
1799 * @size: the amount of data in the b_node
1800 * @split: the potential split location
1801 * @slot_count: the size that can be stored in a single node being considered.
1803 * Return: true if a middle node is required.
1805 static inline bool mab_middle_node(struct maple_big_node
*b_node
, int split
,
1806 unsigned char slot_count
)
1808 unsigned char size
= b_node
->b_end
;
1810 if (size
>= 2 * slot_count
)
1813 if (!b_node
->slot
[split
] && (size
>= 2 * slot_count
- 1))
1820 * mab_no_null_split() - ensure the split doesn't fall on a NULL
1821 * @b_node: the maple_big_node with the data
1822 * @split: the suggested split location
1823 * @slot_count: the number of slots in the node being considered.
1825 * Return: the split location.
1827 static inline int mab_no_null_split(struct maple_big_node
*b_node
,
1828 unsigned char split
, unsigned char slot_count
)
1830 if (!b_node
->slot
[split
]) {
1832 * If the split is less than the max slot && the right side will
1833 * still be sufficient, then increment the split on NULL.
1835 if ((split
< slot_count
- 1) &&
1836 (b_node
->b_end
- split
) > (mt_min_slots
[b_node
->type
]))
1845 * mab_calc_split() - Calculate the split location and if there needs to be two
1847 * @bn: The maple_big_node with the data
1848 * @mid_split: The second split, if required. 0 otherwise.
1850 * Return: The first split location. The middle split is set in @mid_split.
1852 static inline int mab_calc_split(struct ma_state
*mas
,
1853 struct maple_big_node
*bn
, unsigned char *mid_split
, unsigned long min
)
1855 unsigned char b_end
= bn
->b_end
;
1856 int split
= b_end
/ 2; /* Assume equal split. */
1857 unsigned char slot_min
, slot_count
= mt_slots
[bn
->type
];
1860 * To support gap tracking, all NULL entries are kept together and a node cannot
1861 * end on a NULL entry, with the exception of the left-most leaf. The
1862 * limitation means that the split of a node must be checked for this condition
1863 * and be able to put more data in one direction or the other.
1865 if (unlikely((mas
->mas_flags
& MA_STATE_BULK
))) {
1867 split
= b_end
- mt_min_slots
[bn
->type
];
1869 if (!ma_is_leaf(bn
->type
))
1872 mas
->mas_flags
|= MA_STATE_REBALANCE
;
1873 if (!bn
->slot
[split
])
1879 * Although extremely rare, it is possible to enter what is known as the 3-way
1880 * split scenario. The 3-way split comes about by means of a store of a range
1881 * that overwrites the end and beginning of two full nodes. The result is a set
1882 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can
1883 * also be located in different parent nodes which are also full. This can
1884 * carry upwards all the way to the root in the worst case.
1886 if (unlikely(mab_middle_node(bn
, split
, slot_count
))) {
1888 *mid_split
= split
* 2;
1890 slot_min
= mt_min_slots
[bn
->type
];
1894 * Avoid having a range less than the slot count unless it
1895 * causes one node to be deficient.
1896 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1898 while ((split
< slot_count
- 1) &&
1899 ((bn
->pivot
[split
] - min
) < slot_count
- 1) &&
1900 (b_end
- split
> slot_min
))
1904 /* Avoid ending a node on a NULL entry */
1905 split
= mab_no_null_split(bn
, split
, slot_count
);
1907 if (unlikely(*mid_split
))
1908 *mid_split
= mab_no_null_split(bn
, *mid_split
, slot_count
);
1914 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1915 * and set @b_node->b_end to the next free slot.
1916 * @mas: The maple state
1917 * @mas_start: The starting slot to copy
1918 * @mas_end: The end slot to copy (inclusively)
1919 * @b_node: The maple_big_node to place the data
1920 * @mab_start: The starting location in maple_big_node to store the data.
1922 static inline void mas_mab_cp(struct ma_state
*mas
, unsigned char mas_start
,
1923 unsigned char mas_end
, struct maple_big_node
*b_node
,
1924 unsigned char mab_start
)
1927 struct maple_node
*node
;
1929 unsigned long *pivots
, *gaps
;
1930 int i
= mas_start
, j
= mab_start
;
1931 unsigned char piv_end
;
1934 mt
= mte_node_type(mas
->node
);
1935 pivots
= ma_pivots(node
, mt
);
1937 b_node
->pivot
[j
] = pivots
[i
++];
1938 if (unlikely(i
> mas_end
))
1943 piv_end
= min(mas_end
, mt_pivots
[mt
]);
1944 for (; i
< piv_end
; i
++, j
++) {
1945 b_node
->pivot
[j
] = pivots
[i
];
1946 if (unlikely(!b_node
->pivot
[j
]))
1949 if (unlikely(mas
->max
== b_node
->pivot
[j
]))
1953 if (likely(i
<= mas_end
))
1954 b_node
->pivot
[j
] = mas_safe_pivot(mas
, pivots
, i
, mt
);
1957 b_node
->b_end
= ++j
;
1959 slots
= ma_slots(node
, mt
);
1960 memcpy(b_node
->slot
+ mab_start
, slots
+ mas_start
, sizeof(void *) * j
);
1961 if (!ma_is_leaf(mt
) && mt_is_alloc(mas
->tree
)) {
1962 gaps
= ma_gaps(node
, mt
);
1963 memcpy(b_node
->gap
+ mab_start
, gaps
+ mas_start
,
1964 sizeof(unsigned long) * j
);
1969 * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
1970 * @node: The maple node
1971 * @mt: The maple type
1972 * @end: The node end
1974 static inline void mas_leaf_set_meta(struct maple_node
*node
,
1975 enum maple_type mt
, unsigned char end
)
1977 if (end
< mt_slots
[mt
] - 1)
1978 ma_set_meta(node
, mt
, 0, end
);
1982 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
1983 * @b_node: the maple_big_node that has the data
1984 * @mab_start: the start location in @b_node.
1985 * @mab_end: The end location in @b_node (inclusively)
1986 * @mas: The maple state with the maple encoded node.
1988 static inline void mab_mas_cp(struct maple_big_node
*b_node
,
1989 unsigned char mab_start
, unsigned char mab_end
,
1990 struct ma_state
*mas
, bool new_max
)
1993 enum maple_type mt
= mte_node_type(mas
->node
);
1994 struct maple_node
*node
= mte_to_node(mas
->node
);
1995 void __rcu
**slots
= ma_slots(node
, mt
);
1996 unsigned long *pivots
= ma_pivots(node
, mt
);
1997 unsigned long *gaps
= NULL
;
2000 if (mab_end
- mab_start
> mt_pivots
[mt
])
2003 if (!pivots
[mt_pivots
[mt
] - 1])
2004 slots
[mt_pivots
[mt
]] = NULL
;
2008 pivots
[j
++] = b_node
->pivot
[i
++];
2009 } while (i
<= mab_end
&& likely(b_node
->pivot
[i
]));
2011 memcpy(slots
, b_node
->slot
+ mab_start
,
2012 sizeof(void *) * (i
- mab_start
));
2015 mas
->max
= b_node
->pivot
[i
- 1];
2018 if (likely(!ma_is_leaf(mt
) && mt_is_alloc(mas
->tree
))) {
2019 unsigned long max_gap
= 0;
2020 unsigned char offset
= 0;
2022 gaps
= ma_gaps(node
, mt
);
2024 gaps
[--j
] = b_node
->gap
[--i
];
2025 if (gaps
[j
] > max_gap
) {
2031 ma_set_meta(node
, mt
, offset
, end
);
2033 mas_leaf_set_meta(node
, mt
, end
);
2038 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2039 * @mas: The maple state
2040 * @end: The maple node end
2041 * @mt: The maple node type
2043 static inline void mas_bulk_rebalance(struct ma_state
*mas
, unsigned char end
,
2046 if (!(mas
->mas_flags
& MA_STATE_BULK
))
2049 if (mte_is_root(mas
->node
))
2052 if (end
> mt_min_slots
[mt
]) {
2053 mas
->mas_flags
&= ~MA_STATE_REBALANCE
;
2059 * mas_store_b_node() - Store an @entry into the b_node while also copying the
2060 * data from a maple encoded node.
2061 * @wr_mas: the maple write state
2062 * @b_node: the maple_big_node to fill with data
2063 * @offset_end: the offset to end copying
2065 * Return: The actual end of the data stored in @b_node
2067 static noinline_for_kasan
void mas_store_b_node(struct ma_wr_state
*wr_mas
,
2068 struct maple_big_node
*b_node
, unsigned char offset_end
)
2071 unsigned char b_end
;
2072 /* Possible underflow of piv will wrap back to 0 before use. */
2074 struct ma_state
*mas
= wr_mas
->mas
;
2076 b_node
->type
= wr_mas
->type
;
2080 /* Copy start data up to insert. */
2081 mas_mab_cp(mas
, 0, slot
- 1, b_node
, 0);
2082 b_end
= b_node
->b_end
;
2083 piv
= b_node
->pivot
[b_end
- 1];
2087 if (piv
+ 1 < mas
->index
) {
2088 /* Handle range starting after old range */
2089 b_node
->slot
[b_end
] = wr_mas
->content
;
2090 if (!wr_mas
->content
)
2091 b_node
->gap
[b_end
] = mas
->index
- 1 - piv
;
2092 b_node
->pivot
[b_end
++] = mas
->index
- 1;
2095 /* Store the new entry. */
2096 mas
->offset
= b_end
;
2097 b_node
->slot
[b_end
] = wr_mas
->entry
;
2098 b_node
->pivot
[b_end
] = mas
->last
;
2101 if (mas
->last
>= mas
->max
)
2104 /* Handle new range ending before old range ends */
2105 piv
= mas_safe_pivot(mas
, wr_mas
->pivots
, offset_end
, wr_mas
->type
);
2106 if (piv
> mas
->last
) {
2107 if (piv
== ULONG_MAX
)
2108 mas_bulk_rebalance(mas
, b_node
->b_end
, wr_mas
->type
);
2110 if (offset_end
!= slot
)
2111 wr_mas
->content
= mas_slot_locked(mas
, wr_mas
->slots
,
2114 b_node
->slot
[++b_end
] = wr_mas
->content
;
2115 if (!wr_mas
->content
)
2116 b_node
->gap
[b_end
] = piv
- mas
->last
+ 1;
2117 b_node
->pivot
[b_end
] = piv
;
2120 slot
= offset_end
+ 1;
2121 if (slot
> mas
->end
)
2124 /* Copy end data to the end of the node. */
2125 mas_mab_cp(mas
, slot
, mas
->end
+ 1, b_node
, ++b_end
);
2130 b_node
->b_end
= b_end
;
2134 * mas_prev_sibling() - Find the previous node with the same parent.
2135 * @mas: the maple state
2137 * Return: True if there is a previous sibling, false otherwise.
2139 static inline bool mas_prev_sibling(struct ma_state
*mas
)
2141 unsigned int p_slot
= mte_parent_slot(mas
->node
);
2143 if (mte_is_root(mas
->node
))
2150 mas
->offset
= p_slot
- 1;
2156 * mas_next_sibling() - Find the next node with the same parent.
2157 * @mas: the maple state
2159 * Return: true if there is a next sibling, false otherwise.
2161 static inline bool mas_next_sibling(struct ma_state
*mas
)
2163 MA_STATE(parent
, mas
->tree
, mas
->index
, mas
->last
);
2165 if (mte_is_root(mas
->node
))
2169 mas_ascend(&parent
);
2170 parent
.offset
= mte_parent_slot(mas
->node
) + 1;
2171 if (parent
.offset
> mas_data_end(&parent
))
2180 * mte_node_or_none() - Set the enode and state.
2181 * @enode: The encoded maple node.
2183 * Set the node to the enode and the status.
2185 static inline void mas_node_or_none(struct ma_state
*mas
,
2186 struct maple_enode
*enode
)
2190 mas
->status
= ma_active
;
2193 mas
->status
= ma_none
;
2198 * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2199 * @wr_mas: The maple write state
2201 * Uses mas_slot_locked() and does not need to worry about dead nodes.
2203 static inline void mas_wr_node_walk(struct ma_wr_state
*wr_mas
)
2205 struct ma_state
*mas
= wr_mas
->mas
;
2206 unsigned char count
, offset
;
2208 if (unlikely(ma_is_dense(wr_mas
->type
))) {
2209 wr_mas
->r_max
= wr_mas
->r_min
= mas
->index
;
2210 mas
->offset
= mas
->index
= mas
->min
;
2214 wr_mas
->node
= mas_mn(wr_mas
->mas
);
2215 wr_mas
->pivots
= ma_pivots(wr_mas
->node
, wr_mas
->type
);
2216 count
= mas
->end
= ma_data_end(wr_mas
->node
, wr_mas
->type
,
2217 wr_mas
->pivots
, mas
->max
);
2218 offset
= mas
->offset
;
2220 while (offset
< count
&& mas
->index
> wr_mas
->pivots
[offset
])
2223 wr_mas
->r_max
= offset
< count
? wr_mas
->pivots
[offset
] : mas
->max
;
2224 wr_mas
->r_min
= mas_safe_min(mas
, wr_mas
->pivots
, offset
);
2225 wr_mas
->offset_end
= mas
->offset
= offset
;
2229 * mast_rebalance_next() - Rebalance against the next node
2230 * @mast: The maple subtree state
2231 * @old_r: The encoded maple node to the right (next node).
2233 static inline void mast_rebalance_next(struct maple_subtree_state
*mast
)
2235 unsigned char b_end
= mast
->bn
->b_end
;
2237 mas_mab_cp(mast
->orig_r
, 0, mt_slot_count(mast
->orig_r
->node
),
2239 mast
->orig_r
->last
= mast
->orig_r
->max
;
2243 * mast_rebalance_prev() - Rebalance against the previous node
2244 * @mast: The maple subtree state
2245 * @old_l: The encoded maple node to the left (previous node)
2247 static inline void mast_rebalance_prev(struct maple_subtree_state
*mast
)
2249 unsigned char end
= mas_data_end(mast
->orig_l
) + 1;
2250 unsigned char b_end
= mast
->bn
->b_end
;
2252 mab_shift_right(mast
->bn
, end
);
2253 mas_mab_cp(mast
->orig_l
, 0, end
- 1, mast
->bn
, 0);
2254 mast
->l
->min
= mast
->orig_l
->min
;
2255 mast
->orig_l
->index
= mast
->orig_l
->min
;
2256 mast
->bn
->b_end
= end
+ b_end
;
2257 mast
->l
->offset
+= end
;
2261 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2262 * the node to the right. Checking the nodes to the right then the left at each
2263 * level upwards until root is reached.
2264 * Data is copied into the @mast->bn.
2265 * @mast: The maple_subtree_state.
2268 bool mast_spanning_rebalance(struct maple_subtree_state
*mast
)
2270 struct ma_state r_tmp
= *mast
->orig_r
;
2271 struct ma_state l_tmp
= *mast
->orig_l
;
2272 unsigned char depth
= 0;
2275 mas_ascend(mast
->orig_r
);
2276 mas_ascend(mast
->orig_l
);
2278 if (mast
->orig_r
->offset
< mas_data_end(mast
->orig_r
)) {
2279 mast
->orig_r
->offset
++;
2281 mas_descend(mast
->orig_r
);
2282 mast
->orig_r
->offset
= 0;
2285 mast_rebalance_next(mast
);
2286 *mast
->orig_l
= l_tmp
;
2288 } else if (mast
->orig_l
->offset
!= 0) {
2289 mast
->orig_l
->offset
--;
2291 mas_descend(mast
->orig_l
);
2292 mast
->orig_l
->offset
=
2293 mas_data_end(mast
->orig_l
);
2296 mast_rebalance_prev(mast
);
2297 *mast
->orig_r
= r_tmp
;
2300 } while (!mte_is_root(mast
->orig_r
->node
));
2302 *mast
->orig_r
= r_tmp
;
2303 *mast
->orig_l
= l_tmp
;
2308 * mast_ascend() - Ascend the original left and right maple states.
2309 * @mast: the maple subtree state.
2311 * Ascend the original left and right sides. Set the offsets to point to the
2312 * data already in the new tree (@mast->l and @mast->r).
2314 static inline void mast_ascend(struct maple_subtree_state
*mast
)
2316 MA_WR_STATE(wr_mas
, mast
->orig_r
, NULL
);
2317 mas_ascend(mast
->orig_l
);
2318 mas_ascend(mast
->orig_r
);
2320 mast
->orig_r
->offset
= 0;
2321 mast
->orig_r
->index
= mast
->r
->max
;
2322 /* last should be larger than or equal to index */
2323 if (mast
->orig_r
->last
< mast
->orig_r
->index
)
2324 mast
->orig_r
->last
= mast
->orig_r
->index
;
2326 wr_mas
.type
= mte_node_type(mast
->orig_r
->node
);
2327 mas_wr_node_walk(&wr_mas
);
2328 /* Set up the left side of things */
2329 mast
->orig_l
->offset
= 0;
2330 mast
->orig_l
->index
= mast
->l
->min
;
2331 wr_mas
.mas
= mast
->orig_l
;
2332 wr_mas
.type
= mte_node_type(mast
->orig_l
->node
);
2333 mas_wr_node_walk(&wr_mas
);
2335 mast
->bn
->type
= wr_mas
.type
;
2339 * mas_new_ma_node() - Create and return a new maple node. Helper function.
2340 * @mas: the maple state with the allocations.
2341 * @b_node: the maple_big_node with the type encoding.
2343 * Use the node type from the maple_big_node to allocate a new node from the
2344 * ma_state. This function exists mainly for code readability.
2346 * Return: A new maple encoded node
2348 static inline struct maple_enode
2349 *mas_new_ma_node(struct ma_state
*mas
, struct maple_big_node
*b_node
)
2351 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas
)), b_node
->type
);
2355 * mas_mab_to_node() - Set up right and middle nodes
2357 * @mas: the maple state that contains the allocations.
2358 * @b_node: the node which contains the data.
2359 * @left: The pointer which will have the left node
2360 * @right: The pointer which may have the right node
2361 * @middle: the pointer which may have the middle node (rare)
2362 * @mid_split: the split location for the middle node
2364 * Return: the split of left.
2366 static inline unsigned char mas_mab_to_node(struct ma_state
*mas
,
2367 struct maple_big_node
*b_node
, struct maple_enode
**left
,
2368 struct maple_enode
**right
, struct maple_enode
**middle
,
2369 unsigned char *mid_split
, unsigned long min
)
2371 unsigned char split
= 0;
2372 unsigned char slot_count
= mt_slots
[b_node
->type
];
2374 *left
= mas_new_ma_node(mas
, b_node
);
2379 if (b_node
->b_end
< slot_count
) {
2380 split
= b_node
->b_end
;
2382 split
= mab_calc_split(mas
, b_node
, mid_split
, min
);
2383 *right
= mas_new_ma_node(mas
, b_node
);
2387 *middle
= mas_new_ma_node(mas
, b_node
);
2394 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2396 * @b_node - the big node to add the entry
2397 * @mas - the maple state to get the pivot (mas->max)
2398 * @entry - the entry to add, if NULL nothing happens.
2400 static inline void mab_set_b_end(struct maple_big_node
*b_node
,
2401 struct ma_state
*mas
,
2407 b_node
->slot
[b_node
->b_end
] = entry
;
2408 if (mt_is_alloc(mas
->tree
))
2409 b_node
->gap
[b_node
->b_end
] = mas_max_gap(mas
);
2410 b_node
->pivot
[b_node
->b_end
++] = mas
->max
;
2414 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent
2415 * of @mas->node to either @left or @right, depending on @slot and @split
2417 * @mas - the maple state with the node that needs a parent
2418 * @left - possible parent 1
2419 * @right - possible parent 2
2420 * @slot - the slot the mas->node was placed
2421 * @split - the split location between @left and @right
2423 static inline void mas_set_split_parent(struct ma_state
*mas
,
2424 struct maple_enode
*left
,
2425 struct maple_enode
*right
,
2426 unsigned char *slot
, unsigned char split
)
2428 if (mas_is_none(mas
))
2431 if ((*slot
) <= split
)
2432 mas_set_parent(mas
, mas
->node
, left
, *slot
);
2434 mas_set_parent(mas
, mas
->node
, right
, (*slot
) - split
- 1);
2440 * mte_mid_split_check() - Check if the next node passes the mid-split
2441 * @**l: Pointer to left encoded maple node.
2442 * @**m: Pointer to middle encoded maple node.
2443 * @**r: Pointer to right encoded maple node.
2445 * @*split: The split location.
2446 * @mid_split: The middle split.
2448 static inline void mte_mid_split_check(struct maple_enode
**l
,
2449 struct maple_enode
**r
,
2450 struct maple_enode
*right
,
2452 unsigned char *split
,
2453 unsigned char mid_split
)
2458 if (slot
< mid_split
)
2467 * mast_set_split_parents() - Helper function to set three nodes parents. Slot
2468 * is taken from @mast->l.
2469 * @mast - the maple subtree state
2470 * @left - the left node
2471 * @right - the right node
2472 * @split - the split location.
2474 static inline void mast_set_split_parents(struct maple_subtree_state
*mast
,
2475 struct maple_enode
*left
,
2476 struct maple_enode
*middle
,
2477 struct maple_enode
*right
,
2478 unsigned char split
,
2479 unsigned char mid_split
)
2482 struct maple_enode
*l
= left
;
2483 struct maple_enode
*r
= right
;
2485 if (mas_is_none(mast
->l
))
2491 slot
= mast
->l
->offset
;
2493 mte_mid_split_check(&l
, &r
, right
, slot
, &split
, mid_split
);
2494 mas_set_split_parent(mast
->l
, l
, r
, &slot
, split
);
2496 mte_mid_split_check(&l
, &r
, right
, slot
, &split
, mid_split
);
2497 mas_set_split_parent(mast
->m
, l
, r
, &slot
, split
);
2499 mte_mid_split_check(&l
, &r
, right
, slot
, &split
, mid_split
);
2500 mas_set_split_parent(mast
->r
, l
, r
, &slot
, split
);
2504 * mas_topiary_node() - Dispose of a single node
2505 * @mas: The maple state for pushing nodes
2506 * @enode: The encoded maple node
2507 * @in_rcu: If the tree is in rcu mode
2509 * The node will either be RCU freed or pushed back on the maple state.
2511 static inline void mas_topiary_node(struct ma_state
*mas
,
2512 struct ma_state
*tmp_mas
, bool in_rcu
)
2514 struct maple_node
*tmp
;
2515 struct maple_enode
*enode
;
2517 if (mas_is_none(tmp_mas
))
2520 enode
= tmp_mas
->node
;
2521 tmp
= mte_to_node(enode
);
2522 mte_set_node_dead(enode
);
2526 mas_push_node(mas
, tmp
);
2530 * mas_topiary_replace() - Replace the data with new data, then repair the
2531 * parent links within the new tree. Iterate over the dead sub-tree and collect
2532 * the dead subtrees and topiary the nodes that are no longer of use.
2534 * The new tree will have up to three children with the correct parent. Keep
2535 * track of the new entries as they need to be followed to find the next level
2538 * The old tree will have up to three children with the old parent. Keep track
2539 * of the old entries as they may have more nodes below replaced. Nodes within
2540 * [index, last] are dead subtrees, others need to be freed and followed.
2542 * @mas: The maple state pointing at the new data
2543 * @old_enode: The maple encoded node being replaced
2546 static inline void mas_topiary_replace(struct ma_state
*mas
,
2547 struct maple_enode
*old_enode
)
2549 struct ma_state tmp
[3], tmp_next
[3];
2550 MA_TOPIARY(subtrees
, mas
->tree
);
2554 /* Place data in tree & then mark node as old */
2555 mas_put_in_tree(mas
, old_enode
);
2557 /* Update the parent pointers in the tree */
2560 tmp
[1].status
= ma_none
;
2561 tmp
[2].status
= ma_none
;
2562 while (!mte_is_leaf(tmp
[0].node
)) {
2564 for (i
= 0; i
< 3; i
++) {
2565 if (mas_is_none(&tmp
[i
]))
2569 if (!mas_find_child(&tmp
[i
], &tmp_next
[n
]))
2574 mas_adopt_children(&tmp
[i
], tmp
[i
].node
);
2577 if (MAS_WARN_ON(mas
, n
== 0))
2581 tmp_next
[n
++].status
= ma_none
;
2583 for (i
= 0; i
< 3; i
++)
2584 tmp
[i
] = tmp_next
[i
];
2587 /* Collect the old nodes that need to be discarded */
2588 if (mte_is_leaf(old_enode
))
2589 return mas_free(mas
, old_enode
);
2593 tmp
[0].node
= old_enode
;
2594 tmp
[1].status
= ma_none
;
2595 tmp
[2].status
= ma_none
;
2596 in_rcu
= mt_in_rcu(mas
->tree
);
2599 for (i
= 0; i
< 3; i
++) {
2600 if (mas_is_none(&tmp
[i
]))
2604 if (!mas_find_child(&tmp
[i
], &tmp_next
[n
]))
2607 if ((tmp_next
[n
].min
>= tmp_next
->index
) &&
2608 (tmp_next
[n
].max
<= tmp_next
->last
)) {
2609 mat_add(&subtrees
, tmp_next
[n
].node
);
2610 tmp_next
[n
].status
= ma_none
;
2617 if (MAS_WARN_ON(mas
, n
== 0))
2621 tmp_next
[n
++].status
= ma_none
;
2623 for (i
= 0; i
< 3; i
++) {
2624 mas_topiary_node(mas
, &tmp
[i
], in_rcu
);
2625 tmp
[i
] = tmp_next
[i
];
2627 } while (!mte_is_leaf(tmp
[0].node
));
2629 for (i
= 0; i
< 3; i
++)
2630 mas_topiary_node(mas
, &tmp
[i
], in_rcu
);
2632 mas_mat_destroy(mas
, &subtrees
);
2636 * mas_wmb_replace() - Write memory barrier and replace
2637 * @mas: The maple state
2638 * @old: The old maple encoded node that is being replaced.
2640 * Updates gap as necessary.
2642 static inline void mas_wmb_replace(struct ma_state
*mas
,
2643 struct maple_enode
*old_enode
)
2645 /* Insert the new data in the tree */
2646 mas_topiary_replace(mas
, old_enode
);
2648 if (mte_is_leaf(mas
->node
))
2651 mas_update_gap(mas
);
2655 * mast_cp_to_nodes() - Copy data out to nodes.
2656 * @mast: The maple subtree state
2657 * @left: The left encoded maple node
2658 * @middle: The middle encoded maple node
2659 * @right: The right encoded maple node
2660 * @split: The location to split between left and (middle ? middle : right)
2661 * @mid_split: The location to split between middle and right.
2663 static inline void mast_cp_to_nodes(struct maple_subtree_state
*mast
,
2664 struct maple_enode
*left
, struct maple_enode
*middle
,
2665 struct maple_enode
*right
, unsigned char split
, unsigned char mid_split
)
2667 bool new_lmax
= true;
2669 mas_node_or_none(mast
->l
, left
);
2670 mas_node_or_none(mast
->m
, middle
);
2671 mas_node_or_none(mast
->r
, right
);
2673 mast
->l
->min
= mast
->orig_l
->min
;
2674 if (split
== mast
->bn
->b_end
) {
2675 mast
->l
->max
= mast
->orig_r
->max
;
2679 mab_mas_cp(mast
->bn
, 0, split
, mast
->l
, new_lmax
);
2682 mab_mas_cp(mast
->bn
, 1 + split
, mid_split
, mast
->m
, true);
2683 mast
->m
->min
= mast
->bn
->pivot
[split
] + 1;
2687 mast
->r
->max
= mast
->orig_r
->max
;
2689 mab_mas_cp(mast
->bn
, 1 + split
, mast
->bn
->b_end
, mast
->r
, false);
2690 mast
->r
->min
= mast
->bn
->pivot
[split
] + 1;
2695 * mast_combine_cp_left - Copy in the original left side of the tree into the
2696 * combined data set in the maple subtree state big node.
2697 * @mast: The maple subtree state
2699 static inline void mast_combine_cp_left(struct maple_subtree_state
*mast
)
2701 unsigned char l_slot
= mast
->orig_l
->offset
;
2706 mas_mab_cp(mast
->orig_l
, 0, l_slot
- 1, mast
->bn
, 0);
2710 * mast_combine_cp_right: Copy in the original right side of the tree into the
2711 * combined data set in the maple subtree state big node.
2712 * @mast: The maple subtree state
2714 static inline void mast_combine_cp_right(struct maple_subtree_state
*mast
)
2716 if (mast
->bn
->pivot
[mast
->bn
->b_end
- 1] >= mast
->orig_r
->max
)
2719 mas_mab_cp(mast
->orig_r
, mast
->orig_r
->offset
+ 1,
2720 mt_slot_count(mast
->orig_r
->node
), mast
->bn
,
2722 mast
->orig_r
->last
= mast
->orig_r
->max
;
2726 * mast_sufficient: Check if the maple subtree state has enough data in the big
2727 * node to create at least one sufficient node
2728 * @mast: the maple subtree state
2730 static inline bool mast_sufficient(struct maple_subtree_state
*mast
)
2732 if (mast
->bn
->b_end
> mt_min_slot_count(mast
->orig_l
->node
))
2739 * mast_overflow: Check if there is too much data in the subtree state for a
2741 * @mast: The maple subtree state
2743 static inline bool mast_overflow(struct maple_subtree_state
*mast
)
2745 if (mast
->bn
->b_end
>= mt_slot_count(mast
->orig_l
->node
))
2751 static inline void *mtree_range_walk(struct ma_state
*mas
)
2753 unsigned long *pivots
;
2754 unsigned char offset
;
2755 struct maple_node
*node
;
2756 struct maple_enode
*next
, *last
;
2757 enum maple_type type
;
2760 unsigned long max
, min
;
2761 unsigned long prev_max
, prev_min
;
2768 node
= mte_to_node(next
);
2769 type
= mte_node_type(next
);
2770 pivots
= ma_pivots(node
, type
);
2771 end
= ma_data_end(node
, type
, pivots
, max
);
2774 if (pivots
[0] >= mas
->index
) {
2781 while (offset
< end
) {
2782 if (pivots
[offset
] >= mas
->index
) {
2783 max
= pivots
[offset
];
2789 min
= pivots
[offset
- 1] + 1;
2791 slots
= ma_slots(node
, type
);
2792 next
= mt_slot(mas
->tree
, slots
, offset
);
2793 if (unlikely(ma_dead_node(node
)))
2795 } while (!ma_is_leaf(type
));
2798 mas
->offset
= offset
;
2801 mas
->min
= prev_min
;
2802 mas
->max
= prev_max
;
2804 return (void *)next
;
2812 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
2813 * @mas: The starting maple state
2814 * @mast: The maple_subtree_state, keeps track of 4 maple states.
2815 * @count: The estimated count of iterations needed.
2817 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
2818 * is hit. First @b_node is split into two entries which are inserted into the
2819 * next iteration of the loop. @b_node is returned populated with the final
2820 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the
2821 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
2822 * to account of what has been copied into the new sub-tree. The update of
2823 * orig_l_mas->last is used in mas_consume to find the slots that will need to
2824 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of
2825 * the new sub-tree in case the sub-tree becomes the full tree.
2827 * Return: the number of elements in b_node during the last loop.
2829 static int mas_spanning_rebalance(struct ma_state
*mas
,
2830 struct maple_subtree_state
*mast
, unsigned char count
)
2832 unsigned char split
, mid_split
;
2833 unsigned char slot
= 0;
2834 struct maple_enode
*left
= NULL
, *middle
= NULL
, *right
= NULL
;
2835 struct maple_enode
*old_enode
;
2837 MA_STATE(l_mas
, mas
->tree
, mas
->index
, mas
->index
);
2838 MA_STATE(r_mas
, mas
->tree
, mas
->index
, mas
->last
);
2839 MA_STATE(m_mas
, mas
->tree
, mas
->index
, mas
->index
);
2842 * The tree needs to be rebalanced and leaves need to be kept at the same level.
2843 * Rebalancing is done by use of the ``struct maple_topiary``.
2848 l_mas
.status
= r_mas
.status
= m_mas
.status
= ma_none
;
2850 /* Check if this is not root and has sufficient data. */
2851 if (((mast
->orig_l
->min
!= 0) || (mast
->orig_r
->max
!= ULONG_MAX
)) &&
2852 unlikely(mast
->bn
->b_end
<= mt_min_slots
[mast
->bn
->type
]))
2853 mast_spanning_rebalance(mast
);
2858 * Each level of the tree is examined and balanced, pushing data to the left or
2859 * right, or rebalancing against left or right nodes is employed to avoid
2860 * rippling up the tree to limit the amount of churn. Once a new sub-section of
2861 * the tree is created, there may be a mix of new and old nodes. The old nodes
2862 * will have the incorrect parent pointers and currently be in two trees: the
2863 * original tree and the partially new tree. To remedy the parent pointers in
2864 * the old tree, the new data is swapped into the active tree and a walk down
2865 * the tree is performed and the parent pointers are updated.
2866 * See mas_topiary_replace() for more information.
2870 mast
->bn
->type
= mte_node_type(mast
->orig_l
->node
);
2871 split
= mas_mab_to_node(mas
, mast
->bn
, &left
, &right
, &middle
,
2872 &mid_split
, mast
->orig_l
->min
);
2873 mast_set_split_parents(mast
, left
, middle
, right
, split
,
2875 mast_cp_to_nodes(mast
, left
, middle
, right
, split
, mid_split
);
2878 * Copy data from next level in the tree to mast->bn from next
2881 memset(mast
->bn
, 0, sizeof(struct maple_big_node
));
2882 mast
->bn
->type
= mte_node_type(left
);
2885 /* Root already stored in l->node. */
2886 if (mas_is_root_limits(mast
->l
))
2890 mast_combine_cp_left(mast
);
2891 l_mas
.offset
= mast
->bn
->b_end
;
2892 mab_set_b_end(mast
->bn
, &l_mas
, left
);
2893 mab_set_b_end(mast
->bn
, &m_mas
, middle
);
2894 mab_set_b_end(mast
->bn
, &r_mas
, right
);
2896 /* Copy anything necessary out of the right node. */
2897 mast_combine_cp_right(mast
);
2898 mast
->orig_l
->last
= mast
->orig_l
->max
;
2900 if (mast_sufficient(mast
))
2903 if (mast_overflow(mast
))
2906 /* May be a new root stored in mast->bn */
2907 if (mas_is_root_limits(mast
->orig_l
))
2910 mast_spanning_rebalance(mast
);
2912 /* rebalancing from other nodes may require another loop. */
2917 l_mas
.node
= mt_mk_node(ma_mnode_ptr(mas_pop_node(mas
)),
2918 mte_node_type(mast
->orig_l
->node
));
2920 mab_mas_cp(mast
->bn
, 0, mt_slots
[mast
->bn
->type
] - 1, &l_mas
, true);
2921 mas_set_parent(mas
, left
, l_mas
.node
, slot
);
2923 mas_set_parent(mas
, middle
, l_mas
.node
, ++slot
);
2926 mas_set_parent(mas
, right
, l_mas
.node
, ++slot
);
2928 if (mas_is_root_limits(mast
->l
)) {
2930 mas_mn(mast
->l
)->parent
= ma_parent_ptr(mas_tree_parent(mas
));
2931 while (!mte_is_root(mast
->orig_l
->node
))
2934 mas_mn(&l_mas
)->parent
= mas_mn(mast
->orig_l
)->parent
;
2937 old_enode
= mast
->orig_l
->node
;
2938 mas
->depth
= l_mas
.depth
;
2939 mas
->node
= l_mas
.node
;
2940 mas
->min
= l_mas
.min
;
2941 mas
->max
= l_mas
.max
;
2942 mas
->offset
= l_mas
.offset
;
2943 mas_wmb_replace(mas
, old_enode
);
2944 mtree_range_walk(mas
);
2945 return mast
->bn
->b_end
;
2949 * mas_rebalance() - Rebalance a given node.
2950 * @mas: The maple state
2951 * @b_node: The big maple node.
2953 * Rebalance two nodes into a single node or two new nodes that are sufficient.
2954 * Continue upwards until tree is sufficient.
2956 * Return: the number of elements in b_node during the last loop.
2958 static inline int mas_rebalance(struct ma_state
*mas
,
2959 struct maple_big_node
*b_node
)
2961 char empty_count
= mas_mt_height(mas
);
2962 struct maple_subtree_state mast
;
2963 unsigned char shift
, b_end
= ++b_node
->b_end
;
2965 MA_STATE(l_mas
, mas
->tree
, mas
->index
, mas
->last
);
2966 MA_STATE(r_mas
, mas
->tree
, mas
->index
, mas
->last
);
2968 trace_ma_op(__func__
, mas
);
2971 * Rebalancing occurs if a node is insufficient. Data is rebalanced
2972 * against the node to the right if it exists, otherwise the node to the
2973 * left of this node is rebalanced against this node. If rebalancing
2974 * causes just one node to be produced instead of two, then the parent
2975 * is also examined and rebalanced if it is insufficient. Every level
2976 * tries to combine the data in the same way. If one node contains the
2977 * entire range of the tree, then that node is used as a new root node.
2979 mas_node_count(mas
, empty_count
* 2 - 1);
2980 if (mas_is_err(mas
))
2983 mast
.orig_l
= &l_mas
;
2984 mast
.orig_r
= &r_mas
;
2986 mast
.bn
->type
= mte_node_type(mas
->node
);
2988 l_mas
= r_mas
= *mas
;
2990 if (mas_next_sibling(&r_mas
)) {
2991 mas_mab_cp(&r_mas
, 0, mt_slot_count(r_mas
.node
), b_node
, b_end
);
2992 r_mas
.last
= r_mas
.index
= r_mas
.max
;
2994 mas_prev_sibling(&l_mas
);
2995 shift
= mas_data_end(&l_mas
) + 1;
2996 mab_shift_right(b_node
, shift
);
2997 mas
->offset
+= shift
;
2998 mas_mab_cp(&l_mas
, 0, shift
- 1, b_node
, 0);
2999 b_node
->b_end
= shift
+ b_end
;
3000 l_mas
.index
= l_mas
.last
= l_mas
.min
;
3003 return mas_spanning_rebalance(mas
, &mast
, empty_count
);
3007 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3009 * @mas: The maple state
3010 * @end: The end of the left-most node.
3012 * During a mass-insert event (such as forking), it may be necessary to
3013 * rebalance the left-most node when it is not sufficient.
3015 static inline void mas_destroy_rebalance(struct ma_state
*mas
, unsigned char end
)
3017 enum maple_type mt
= mte_node_type(mas
->node
);
3018 struct maple_node reuse
, *newnode
, *parent
, *new_left
, *left
, *node
;
3019 struct maple_enode
*eparent
, *old_eparent
;
3020 unsigned char offset
, tmp
, split
= mt_slots
[mt
] / 2;
3021 void __rcu
**l_slots
, **slots
;
3022 unsigned long *l_pivs
, *pivs
, gap
;
3023 bool in_rcu
= mt_in_rcu(mas
->tree
);
3025 MA_STATE(l_mas
, mas
->tree
, mas
->index
, mas
->last
);
3028 mas_prev_sibling(&l_mas
);
3032 /* Allocate for both left and right as well as parent. */
3033 mas_node_count(mas
, 3);
3034 if (mas_is_err(mas
))
3037 newnode
= mas_pop_node(mas
);
3043 newnode
->parent
= node
->parent
;
3044 slots
= ma_slots(newnode
, mt
);
3045 pivs
= ma_pivots(newnode
, mt
);
3046 left
= mas_mn(&l_mas
);
3047 l_slots
= ma_slots(left
, mt
);
3048 l_pivs
= ma_pivots(left
, mt
);
3049 if (!l_slots
[split
])
3051 tmp
= mas_data_end(&l_mas
) - split
;
3053 memcpy(slots
, l_slots
+ split
+ 1, sizeof(void *) * tmp
);
3054 memcpy(pivs
, l_pivs
+ split
+ 1, sizeof(unsigned long) * tmp
);
3055 pivs
[tmp
] = l_mas
.max
;
3056 memcpy(slots
+ tmp
, ma_slots(node
, mt
), sizeof(void *) * end
);
3057 memcpy(pivs
+ tmp
, ma_pivots(node
, mt
), sizeof(unsigned long) * end
);
3059 l_mas
.max
= l_pivs
[split
];
3060 mas
->min
= l_mas
.max
+ 1;
3061 old_eparent
= mt_mk_node(mte_parent(l_mas
.node
),
3062 mas_parent_type(&l_mas
, l_mas
.node
));
3065 unsigned char max_p
= mt_pivots
[mt
];
3066 unsigned char max_s
= mt_slots
[mt
];
3069 memset(pivs
+ tmp
, 0,
3070 sizeof(unsigned long) * (max_p
- tmp
));
3072 if (tmp
< mt_slots
[mt
])
3073 memset(slots
+ tmp
, 0, sizeof(void *) * (max_s
- tmp
));
3075 memcpy(node
, newnode
, sizeof(struct maple_node
));
3076 ma_set_meta(node
, mt
, 0, tmp
- 1);
3077 mte_set_pivot(old_eparent
, mte_parent_slot(l_mas
.node
),
3080 /* Remove data from l_pivs. */
3082 memset(l_pivs
+ tmp
, 0, sizeof(unsigned long) * (max_p
- tmp
));
3083 memset(l_slots
+ tmp
, 0, sizeof(void *) * (max_s
- tmp
));
3084 ma_set_meta(left
, mt
, 0, split
);
3085 eparent
= old_eparent
;
3090 /* RCU requires replacing both l_mas, mas, and parent. */
3091 mas
->node
= mt_mk_node(newnode
, mt
);
3092 ma_set_meta(newnode
, mt
, 0, tmp
);
3094 new_left
= mas_pop_node(mas
);
3095 new_left
->parent
= left
->parent
;
3096 mt
= mte_node_type(l_mas
.node
);
3097 slots
= ma_slots(new_left
, mt
);
3098 pivs
= ma_pivots(new_left
, mt
);
3099 memcpy(slots
, l_slots
, sizeof(void *) * split
);
3100 memcpy(pivs
, l_pivs
, sizeof(unsigned long) * split
);
3101 ma_set_meta(new_left
, mt
, 0, split
);
3102 l_mas
.node
= mt_mk_node(new_left
, mt
);
3104 /* replace parent. */
3105 offset
= mte_parent_slot(mas
->node
);
3106 mt
= mas_parent_type(&l_mas
, l_mas
.node
);
3107 parent
= mas_pop_node(mas
);
3108 slots
= ma_slots(parent
, mt
);
3109 pivs
= ma_pivots(parent
, mt
);
3110 memcpy(parent
, mte_to_node(old_eparent
), sizeof(struct maple_node
));
3111 rcu_assign_pointer(slots
[offset
], mas
->node
);
3112 rcu_assign_pointer(slots
[offset
- 1], l_mas
.node
);
3113 pivs
[offset
- 1] = l_mas
.max
;
3114 eparent
= mt_mk_node(parent
, mt
);
3116 gap
= mas_leaf_max_gap(mas
);
3117 mte_set_gap(eparent
, mte_parent_slot(mas
->node
), gap
);
3118 gap
= mas_leaf_max_gap(&l_mas
);
3119 mte_set_gap(eparent
, mte_parent_slot(l_mas
.node
), gap
);
3123 mas_replace_node(mas
, old_eparent
);
3124 mas_adopt_children(mas
, mas
->node
);
3127 mas_update_gap(mas
);
3131 * mas_split_final_node() - Split the final node in a subtree operation.
3132 * @mast: the maple subtree state
3133 * @mas: The maple state
3134 * @height: The height of the tree in case it's a new root.
3136 static inline void mas_split_final_node(struct maple_subtree_state
*mast
,
3137 struct ma_state
*mas
, int height
)
3139 struct maple_enode
*ancestor
;
3141 if (mte_is_root(mas
->node
)) {
3142 if (mt_is_alloc(mas
->tree
))
3143 mast
->bn
->type
= maple_arange_64
;
3145 mast
->bn
->type
= maple_range_64
;
3146 mas
->depth
= height
;
3149 * Only a single node is used here, could be root.
3150 * The Big_node data should just fit in a single node.
3152 ancestor
= mas_new_ma_node(mas
, mast
->bn
);
3153 mas_set_parent(mas
, mast
->l
->node
, ancestor
, mast
->l
->offset
);
3154 mas_set_parent(mas
, mast
->r
->node
, ancestor
, mast
->r
->offset
);
3155 mte_to_node(ancestor
)->parent
= mas_mn(mas
)->parent
;
3157 mast
->l
->node
= ancestor
;
3158 mab_mas_cp(mast
->bn
, 0, mt_slots
[mast
->bn
->type
] - 1, mast
->l
, true);
3159 mas
->offset
= mast
->bn
->b_end
- 1;
3163 * mast_fill_bnode() - Copy data into the big node in the subtree state
3164 * @mast: The maple subtree state
3165 * @mas: the maple state
3166 * @skip: The number of entries to skip for new nodes insertion.
3168 static inline void mast_fill_bnode(struct maple_subtree_state
*mast
,
3169 struct ma_state
*mas
,
3173 unsigned char split
;
3175 memset(mast
->bn
->gap
, 0, sizeof(unsigned long) * ARRAY_SIZE(mast
->bn
->gap
));
3176 memset(mast
->bn
->slot
, 0, sizeof(unsigned long) * ARRAY_SIZE(mast
->bn
->slot
));
3177 memset(mast
->bn
->pivot
, 0, sizeof(unsigned long) * ARRAY_SIZE(mast
->bn
->pivot
));
3178 mast
->bn
->b_end
= 0;
3180 if (mte_is_root(mas
->node
)) {
3184 mas
->offset
= mte_parent_slot(mas
->node
);
3187 if (cp
&& mast
->l
->offset
)
3188 mas_mab_cp(mas
, 0, mast
->l
->offset
- 1, mast
->bn
, 0);
3190 split
= mast
->bn
->b_end
;
3191 mab_set_b_end(mast
->bn
, mast
->l
, mast
->l
->node
);
3192 mast
->r
->offset
= mast
->bn
->b_end
;
3193 mab_set_b_end(mast
->bn
, mast
->r
, mast
->r
->node
);
3194 if (mast
->bn
->pivot
[mast
->bn
->b_end
- 1] == mas
->max
)
3198 mas_mab_cp(mas
, split
+ skip
, mt_slot_count(mas
->node
) - 1,
3199 mast
->bn
, mast
->bn
->b_end
);
3202 mast
->bn
->type
= mte_node_type(mas
->node
);
3206 * mast_split_data() - Split the data in the subtree state big node into regular
3208 * @mast: The maple subtree state
3209 * @mas: The maple state
3210 * @split: The location to split the big node
3212 static inline void mast_split_data(struct maple_subtree_state
*mast
,
3213 struct ma_state
*mas
, unsigned char split
)
3215 unsigned char p_slot
;
3217 mab_mas_cp(mast
->bn
, 0, split
, mast
->l
, true);
3218 mte_set_pivot(mast
->r
->node
, 0, mast
->r
->max
);
3219 mab_mas_cp(mast
->bn
, split
+ 1, mast
->bn
->b_end
, mast
->r
, false);
3220 mast
->l
->offset
= mte_parent_slot(mas
->node
);
3221 mast
->l
->max
= mast
->bn
->pivot
[split
];
3222 mast
->r
->min
= mast
->l
->max
+ 1;
3223 if (mte_is_leaf(mas
->node
))
3226 p_slot
= mast
->orig_l
->offset
;
3227 mas_set_split_parent(mast
->orig_l
, mast
->l
->node
, mast
->r
->node
,
3229 mas_set_split_parent(mast
->orig_r
, mast
->l
->node
, mast
->r
->node
,
3234 * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3235 * data to the right or left node if there is room.
3236 * @mas: The maple state
3237 * @height: The current height of the maple state
3238 * @mast: The maple subtree state
3239 * @left: Push left or not.
3241 * Keeping the height of the tree low means faster lookups.
3243 * Return: True if pushed, false otherwise.
3245 static inline bool mas_push_data(struct ma_state
*mas
, int height
,
3246 struct maple_subtree_state
*mast
, bool left
)
3248 unsigned char slot_total
= mast
->bn
->b_end
;
3249 unsigned char end
, space
, split
;
3251 MA_STATE(tmp_mas
, mas
->tree
, mas
->index
, mas
->last
);
3253 tmp_mas
.depth
= mast
->l
->depth
;
3255 if (left
&& !mas_prev_sibling(&tmp_mas
))
3257 else if (!left
&& !mas_next_sibling(&tmp_mas
))
3260 end
= mas_data_end(&tmp_mas
);
3262 space
= 2 * mt_slot_count(mas
->node
) - 2;
3263 /* -2 instead of -1 to ensure there isn't a triple split */
3264 if (ma_is_leaf(mast
->bn
->type
))
3267 if (mas
->max
== ULONG_MAX
)
3270 if (slot_total
>= space
)
3273 /* Get the data; Fill mast->bn */
3276 mab_shift_right(mast
->bn
, end
+ 1);
3277 mas_mab_cp(&tmp_mas
, 0, end
, mast
->bn
, 0);
3278 mast
->bn
->b_end
= slot_total
+ 1;
3280 mas_mab_cp(&tmp_mas
, 0, end
, mast
->bn
, mast
->bn
->b_end
);
3283 /* Configure mast for splitting of mast->bn */
3284 split
= mt_slots
[mast
->bn
->type
] - 2;
3286 /* Switch mas to prev node */
3288 /* Start using mast->l for the left side. */
3289 tmp_mas
.node
= mast
->l
->node
;
3292 tmp_mas
.node
= mast
->r
->node
;
3294 split
= slot_total
- split
;
3296 split
= mab_no_null_split(mast
->bn
, split
, mt_slots
[mast
->bn
->type
]);
3297 /* Update parent slot for split calculation. */
3299 mast
->orig_l
->offset
+= end
+ 1;
3301 mast_split_data(mast
, mas
, split
);
3302 mast_fill_bnode(mast
, mas
, 2);
3303 mas_split_final_node(mast
, mas
, height
+ 1);
3308 * mas_split() - Split data that is too big for one node into two.
3309 * @mas: The maple state
3310 * @b_node: The maple big node
3311 * Return: 1 on success, 0 on failure.
3313 static int mas_split(struct ma_state
*mas
, struct maple_big_node
*b_node
)
3315 struct maple_subtree_state mast
;
3317 unsigned char mid_split
, split
= 0;
3318 struct maple_enode
*old
;
3321 * Splitting is handled differently from any other B-tree; the Maple
3322 * Tree splits upwards. Splitting up means that the split operation
3323 * occurs when the walk of the tree hits the leaves and not on the way
3324 * down. The reason for splitting up is that it is impossible to know
3325 * how much space will be needed until the leaf is (or leaves are)
3326 * reached. Since overwriting data is allowed and a range could
3327 * overwrite more than one range or result in changing one entry into 3
3328 * entries, it is impossible to know if a split is required until the
3331 * Splitting is a balancing act between keeping allocations to a minimum
3332 * and avoiding a 'jitter' event where a tree is expanded to make room
3333 * for an entry followed by a contraction when the entry is removed. To
3334 * accomplish the balance, there are empty slots remaining in both left
3335 * and right nodes after a split.
3337 MA_STATE(l_mas
, mas
->tree
, mas
->index
, mas
->last
);
3338 MA_STATE(r_mas
, mas
->tree
, mas
->index
, mas
->last
);
3339 MA_STATE(prev_l_mas
, mas
->tree
, mas
->index
, mas
->last
);
3340 MA_STATE(prev_r_mas
, mas
->tree
, mas
->index
, mas
->last
);
3342 trace_ma_op(__func__
, mas
);
3343 mas
->depth
= mas_mt_height(mas
);
3344 /* Allocation failures will happen early. */
3345 mas_node_count(mas
, 1 + mas
->depth
* 2);
3346 if (mas_is_err(mas
))
3351 mast
.orig_l
= &prev_l_mas
;
3352 mast
.orig_r
= &prev_r_mas
;
3355 while (height
++ <= mas
->depth
) {
3356 if (mt_slots
[b_node
->type
] > b_node
->b_end
) {
3357 mas_split_final_node(&mast
, mas
, height
);
3361 l_mas
= r_mas
= *mas
;
3362 l_mas
.node
= mas_new_ma_node(mas
, b_node
);
3363 r_mas
.node
= mas_new_ma_node(mas
, b_node
);
3365 * Another way that 'jitter' is avoided is to terminate a split up early if the
3366 * left or right node has space to spare. This is referred to as "pushing left"
3367 * or "pushing right" and is similar to the B* tree, except the nodes left or
3368 * right can rarely be reused due to RCU, but the ripple upwards is halted which
3369 * is a significant savings.
3371 /* Try to push left. */
3372 if (mas_push_data(mas
, height
, &mast
, true))
3374 /* Try to push right. */
3375 if (mas_push_data(mas
, height
, &mast
, false))
3378 split
= mab_calc_split(mas
, b_node
, &mid_split
, prev_l_mas
.min
);
3379 mast_split_data(&mast
, mas
, split
);
3381 * Usually correct, mab_mas_cp in the above call overwrites
3384 mast
.r
->max
= mas
->max
;
3385 mast_fill_bnode(&mast
, mas
, 1);
3386 prev_l_mas
= *mast
.l
;
3387 prev_r_mas
= *mast
.r
;
3390 /* Set the original node as dead */
3392 mas
->node
= l_mas
.node
;
3393 mas_wmb_replace(mas
, old
);
3394 mtree_range_walk(mas
);
3399 * mas_reuse_node() - Reuse the node to store the data.
3400 * @wr_mas: The maple write state
3401 * @bn: The maple big node
3402 * @end: The end of the data.
3404 * Will always return false in RCU mode.
3406 * Return: True if node was reused, false otherwise.
3408 static inline bool mas_reuse_node(struct ma_wr_state
*wr_mas
,
3409 struct maple_big_node
*bn
, unsigned char end
)
3411 /* Need to be rcu safe. */
3412 if (mt_in_rcu(wr_mas
->mas
->tree
))
3415 if (end
> bn
->b_end
) {
3416 int clear
= mt_slots
[wr_mas
->type
] - bn
->b_end
;
3418 memset(wr_mas
->slots
+ bn
->b_end
, 0, sizeof(void *) * clear
--);
3419 memset(wr_mas
->pivots
+ bn
->b_end
, 0, sizeof(void *) * clear
);
3421 mab_mas_cp(bn
, 0, bn
->b_end
, wr_mas
->mas
, false);
3426 * mas_commit_b_node() - Commit the big node into the tree.
3427 * @wr_mas: The maple write state
3428 * @b_node: The maple big node
3429 * @end: The end of the data.
3431 static noinline_for_kasan
int mas_commit_b_node(struct ma_wr_state
*wr_mas
,
3432 struct maple_big_node
*b_node
, unsigned char end
)
3434 struct maple_node
*node
;
3435 struct maple_enode
*old_enode
;
3436 unsigned char b_end
= b_node
->b_end
;
3437 enum maple_type b_type
= b_node
->type
;
3439 old_enode
= wr_mas
->mas
->node
;
3440 if ((b_end
< mt_min_slots
[b_type
]) &&
3441 (!mte_is_root(old_enode
)) &&
3442 (mas_mt_height(wr_mas
->mas
) > 1))
3443 return mas_rebalance(wr_mas
->mas
, b_node
);
3445 if (b_end
>= mt_slots
[b_type
])
3446 return mas_split(wr_mas
->mas
, b_node
);
3448 if (mas_reuse_node(wr_mas
, b_node
, end
))
3451 mas_node_count(wr_mas
->mas
, 1);
3452 if (mas_is_err(wr_mas
->mas
))
3455 node
= mas_pop_node(wr_mas
->mas
);
3456 node
->parent
= mas_mn(wr_mas
->mas
)->parent
;
3457 wr_mas
->mas
->node
= mt_mk_node(node
, b_type
);
3458 mab_mas_cp(b_node
, 0, b_end
, wr_mas
->mas
, false);
3459 mas_replace_node(wr_mas
->mas
, old_enode
);
3461 mas_update_gap(wr_mas
->mas
);
3462 wr_mas
->mas
->end
= b_end
;
3467 * mas_root_expand() - Expand a root to a node
3468 * @mas: The maple state
3469 * @entry: The entry to store into the tree
3471 static inline int mas_root_expand(struct ma_state
*mas
, void *entry
)
3473 void *contents
= mas_root_locked(mas
);
3474 enum maple_type type
= maple_leaf_64
;
3475 struct maple_node
*node
;
3477 unsigned long *pivots
;
3480 mas_node_count(mas
, 1);
3481 if (unlikely(mas_is_err(mas
)))
3484 node
= mas_pop_node(mas
);
3485 pivots
= ma_pivots(node
, type
);
3486 slots
= ma_slots(node
, type
);
3487 node
->parent
= ma_parent_ptr(mas_tree_parent(mas
));
3488 mas
->node
= mt_mk_node(node
, type
);
3489 mas
->status
= ma_active
;
3493 rcu_assign_pointer(slots
[slot
], contents
);
3494 if (likely(mas
->index
> 1))
3497 pivots
[slot
++] = mas
->index
- 1;
3500 rcu_assign_pointer(slots
[slot
], entry
);
3502 pivots
[slot
] = mas
->last
;
3503 if (mas
->last
!= ULONG_MAX
)
3504 pivots
[++slot
] = ULONG_MAX
;
3507 mas_set_height(mas
);
3508 ma_set_meta(node
, maple_leaf_64
, 0, slot
);
3509 /* swap the new root into the tree */
3510 rcu_assign_pointer(mas
->tree
->ma_root
, mte_mk_root(mas
->node
));
3514 static inline void mas_store_root(struct ma_state
*mas
, void *entry
)
3516 if (likely((mas
->last
!= 0) || (mas
->index
!= 0)))
3517 mas_root_expand(mas
, entry
);
3518 else if (((unsigned long) (entry
) & 3) == 2)
3519 mas_root_expand(mas
, entry
);
3521 rcu_assign_pointer(mas
->tree
->ma_root
, entry
);
3522 mas
->status
= ma_start
;
3527 * mas_is_span_wr() - Check if the write needs to be treated as a write that
3529 * @mas: The maple state
3530 * @piv: The pivot value being written
3531 * @type: The maple node type
3532 * @entry: The data to write
3534 * Spanning writes are writes that start in one node and end in another OR if
3535 * the write of a %NULL will cause the node to end with a %NULL.
3537 * Return: True if this is a spanning write, false otherwise.
3539 static bool mas_is_span_wr(struct ma_wr_state
*wr_mas
)
3541 unsigned long max
= wr_mas
->r_max
;
3542 unsigned long last
= wr_mas
->mas
->last
;
3543 enum maple_type type
= wr_mas
->type
;
3544 void *entry
= wr_mas
->entry
;
3546 /* Contained in this pivot, fast path */
3550 if (ma_is_leaf(type
)) {
3551 max
= wr_mas
->mas
->max
;
3558 * The last entry of leaf node cannot be NULL unless it is the
3559 * rightmost node (writing ULONG_MAX), otherwise it spans slots.
3561 if (entry
|| last
== ULONG_MAX
)
3565 trace_ma_write(__func__
, wr_mas
->mas
, wr_mas
->r_max
, entry
);
3569 static inline void mas_wr_walk_descend(struct ma_wr_state
*wr_mas
)
3571 wr_mas
->type
= mte_node_type(wr_mas
->mas
->node
);
3572 mas_wr_node_walk(wr_mas
);
3573 wr_mas
->slots
= ma_slots(wr_mas
->node
, wr_mas
->type
);
3576 static inline void mas_wr_walk_traverse(struct ma_wr_state
*wr_mas
)
3578 wr_mas
->mas
->max
= wr_mas
->r_max
;
3579 wr_mas
->mas
->min
= wr_mas
->r_min
;
3580 wr_mas
->mas
->node
= wr_mas
->content
;
3581 wr_mas
->mas
->offset
= 0;
3582 wr_mas
->mas
->depth
++;
3585 * mas_wr_walk() - Walk the tree for a write.
3586 * @wr_mas: The maple write state
3588 * Uses mas_slot_locked() and does not need to worry about dead nodes.
3590 * Return: True if it's contained in a node, false on spanning write.
3592 static bool mas_wr_walk(struct ma_wr_state
*wr_mas
)
3594 struct ma_state
*mas
= wr_mas
->mas
;
3597 mas_wr_walk_descend(wr_mas
);
3598 if (unlikely(mas_is_span_wr(wr_mas
)))
3601 wr_mas
->content
= mas_slot_locked(mas
, wr_mas
->slots
,
3603 if (ma_is_leaf(wr_mas
->type
))
3606 mas_wr_walk_traverse(wr_mas
);
3612 static bool mas_wr_walk_index(struct ma_wr_state
*wr_mas
)
3614 struct ma_state
*mas
= wr_mas
->mas
;
3617 mas_wr_walk_descend(wr_mas
);
3618 wr_mas
->content
= mas_slot_locked(mas
, wr_mas
->slots
,
3620 if (ma_is_leaf(wr_mas
->type
))
3622 mas_wr_walk_traverse(wr_mas
);
3628 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3629 * @l_wr_mas: The left maple write state
3630 * @r_wr_mas: The right maple write state
3632 static inline void mas_extend_spanning_null(struct ma_wr_state
*l_wr_mas
,
3633 struct ma_wr_state
*r_wr_mas
)
3635 struct ma_state
*r_mas
= r_wr_mas
->mas
;
3636 struct ma_state
*l_mas
= l_wr_mas
->mas
;
3637 unsigned char l_slot
;
3639 l_slot
= l_mas
->offset
;
3640 if (!l_wr_mas
->content
)
3641 l_mas
->index
= l_wr_mas
->r_min
;
3643 if ((l_mas
->index
== l_wr_mas
->r_min
) &&
3645 !mas_slot_locked(l_mas
, l_wr_mas
->slots
, l_slot
- 1))) {
3647 l_mas
->index
= l_wr_mas
->pivots
[l_slot
- 2] + 1;
3649 l_mas
->index
= l_mas
->min
;
3651 l_mas
->offset
= l_slot
- 1;
3654 if (!r_wr_mas
->content
) {
3655 if (r_mas
->last
< r_wr_mas
->r_max
)
3656 r_mas
->last
= r_wr_mas
->r_max
;
3658 } else if ((r_mas
->last
== r_wr_mas
->r_max
) &&
3659 (r_mas
->last
< r_mas
->max
) &&
3660 !mas_slot_locked(r_mas
, r_wr_mas
->slots
, r_mas
->offset
+ 1)) {
3661 r_mas
->last
= mas_safe_pivot(r_mas
, r_wr_mas
->pivots
,
3662 r_wr_mas
->type
, r_mas
->offset
+ 1);
3667 static inline void *mas_state_walk(struct ma_state
*mas
)
3671 entry
= mas_start(mas
);
3672 if (mas_is_none(mas
))
3675 if (mas_is_ptr(mas
))
3678 return mtree_range_walk(mas
);
3682 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3685 * @mas: The maple state.
3687 * Note: Leaves mas in undesirable state.
3688 * Return: The entry for @mas->index or %NULL on dead node.
3690 static inline void *mtree_lookup_walk(struct ma_state
*mas
)
3692 unsigned long *pivots
;
3693 unsigned char offset
;
3694 struct maple_node
*node
;
3695 struct maple_enode
*next
;
3696 enum maple_type type
;
3702 node
= mte_to_node(next
);
3703 type
= mte_node_type(next
);
3704 pivots
= ma_pivots(node
, type
);
3705 end
= mt_pivots
[type
];
3708 if (pivots
[offset
] >= mas
->index
)
3710 } while (++offset
< end
);
3712 slots
= ma_slots(node
, type
);
3713 next
= mt_slot(mas
->tree
, slots
, offset
);
3714 if (unlikely(ma_dead_node(node
)))
3716 } while (!ma_is_leaf(type
));
3718 return (void *)next
;
3725 static void mte_destroy_walk(struct maple_enode
*, struct maple_tree
*);
3727 * mas_new_root() - Create a new root node that only contains the entry passed
3729 * @mas: The maple state
3730 * @entry: The entry to store.
3732 * Only valid when the index == 0 and the last == ULONG_MAX
3734 * Return 0 on error, 1 on success.
3736 static inline int mas_new_root(struct ma_state
*mas
, void *entry
)
3738 struct maple_enode
*root
= mas_root_locked(mas
);
3739 enum maple_type type
= maple_leaf_64
;
3740 struct maple_node
*node
;
3742 unsigned long *pivots
;
3744 if (!entry
&& !mas
->index
&& mas
->last
== ULONG_MAX
) {
3746 mas_set_height(mas
);
3747 rcu_assign_pointer(mas
->tree
->ma_root
, entry
);
3748 mas
->status
= ma_start
;
3752 mas_node_count(mas
, 1);
3753 if (mas_is_err(mas
))
3756 node
= mas_pop_node(mas
);
3757 pivots
= ma_pivots(node
, type
);
3758 slots
= ma_slots(node
, type
);
3759 node
->parent
= ma_parent_ptr(mas_tree_parent(mas
));
3760 mas
->node
= mt_mk_node(node
, type
);
3761 mas
->status
= ma_active
;
3762 rcu_assign_pointer(slots
[0], entry
);
3763 pivots
[0] = mas
->last
;
3765 mas_set_height(mas
);
3766 rcu_assign_pointer(mas
->tree
->ma_root
, mte_mk_root(mas
->node
));
3769 if (xa_is_node(root
))
3770 mte_destroy_walk(root
, mas
->tree
);
3775 * mas_wr_spanning_store() - Create a subtree with the store operation completed
3776 * and new nodes where necessary, then place the sub-tree in the actual tree.
3777 * Note that mas is expected to point to the node which caused the store to
3779 * @wr_mas: The maple write state
3781 * Return: 0 on error, positive on success.
3783 static inline int mas_wr_spanning_store(struct ma_wr_state
*wr_mas
)
3785 struct maple_subtree_state mast
;
3786 struct maple_big_node b_node
;
3787 struct ma_state
*mas
;
3788 unsigned char height
;
3790 /* Left and Right side of spanning store */
3791 MA_STATE(l_mas
, NULL
, 0, 0);
3792 MA_STATE(r_mas
, NULL
, 0, 0);
3793 MA_WR_STATE(r_wr_mas
, &r_mas
, wr_mas
->entry
);
3794 MA_WR_STATE(l_wr_mas
, &l_mas
, wr_mas
->entry
);
3797 * A store operation that spans multiple nodes is called a spanning
3798 * store and is handled early in the store call stack by the function
3799 * mas_is_span_wr(). When a spanning store is identified, the maple
3800 * state is duplicated. The first maple state walks the left tree path
3801 * to ``index``, the duplicate walks the right tree path to ``last``.
3802 * The data in the two nodes are combined into a single node, two nodes,
3803 * or possibly three nodes (see the 3-way split above). A ``NULL``
3804 * written to the last entry of a node is considered a spanning store as
3805 * a rebalance is required for the operation to complete and an overflow
3806 * of data may happen.
3809 trace_ma_op(__func__
, mas
);
3811 if (unlikely(!mas
->index
&& mas
->last
== ULONG_MAX
))
3812 return mas_new_root(mas
, wr_mas
->entry
);
3814 * Node rebalancing may occur due to this store, so there may be three new
3815 * entries per level plus a new root.
3817 height
= mas_mt_height(mas
);
3818 mas_node_count(mas
, 1 + height
* 3);
3819 if (mas_is_err(mas
))
3823 * Set up right side. Need to get to the next offset after the spanning
3824 * store to ensure it's not NULL and to combine both the next node and
3825 * the node with the start together.
3828 /* Avoid overflow, walk to next slot in the tree. */
3832 r_mas
.index
= r_mas
.last
;
3833 mas_wr_walk_index(&r_wr_mas
);
3834 r_mas
.last
= r_mas
.index
= mas
->last
;
3836 /* Set up left side. */
3838 mas_wr_walk_index(&l_wr_mas
);
3840 if (!wr_mas
->entry
) {
3841 mas_extend_spanning_null(&l_wr_mas
, &r_wr_mas
);
3842 mas
->offset
= l_mas
.offset
;
3843 mas
->index
= l_mas
.index
;
3844 mas
->last
= l_mas
.last
= r_mas
.last
;
3847 /* expanding NULLs may make this cover the entire range */
3848 if (!l_mas
.index
&& r_mas
.last
== ULONG_MAX
) {
3849 mas_set_range(mas
, 0, ULONG_MAX
);
3850 return mas_new_root(mas
, wr_mas
->entry
);
3853 memset(&b_node
, 0, sizeof(struct maple_big_node
));
3854 /* Copy l_mas and store the value in b_node. */
3855 mas_store_b_node(&l_wr_mas
, &b_node
, l_mas
.end
);
3856 /* Copy r_mas into b_node. */
3857 if (r_mas
.offset
<= r_mas
.end
)
3858 mas_mab_cp(&r_mas
, r_mas
.offset
, r_mas
.end
,
3859 &b_node
, b_node
.b_end
+ 1);
3863 /* Stop spanning searches by searching for just index. */
3864 l_mas
.index
= l_mas
.last
= mas
->index
;
3867 mast
.orig_l
= &l_mas
;
3868 mast
.orig_r
= &r_mas
;
3869 /* Combine l_mas and r_mas and split them up evenly again. */
3870 return mas_spanning_rebalance(mas
, &mast
, height
+ 1);
3874 * mas_wr_node_store() - Attempt to store the value in a node
3875 * @wr_mas: The maple write state
3877 * Attempts to reuse the node, but may allocate.
3879 * Return: True if stored, false otherwise
3881 static inline bool mas_wr_node_store(struct ma_wr_state
*wr_mas
,
3882 unsigned char new_end
)
3884 struct ma_state
*mas
= wr_mas
->mas
;
3885 void __rcu
**dst_slots
;
3886 unsigned long *dst_pivots
;
3887 unsigned char dst_offset
, offset_end
= wr_mas
->offset_end
;
3888 struct maple_node reuse
, *newnode
;
3889 unsigned char copy_size
, node_pivots
= mt_pivots
[wr_mas
->type
];
3890 bool in_rcu
= mt_in_rcu(mas
->tree
);
3892 /* Check if there is enough data. The room is enough. */
3893 if (!mte_is_root(mas
->node
) && (new_end
<= mt_min_slots
[wr_mas
->type
]) &&
3894 !(mas
->mas_flags
& MA_STATE_BULK
))
3897 if (mas
->last
== wr_mas
->end_piv
)
3898 offset_end
++; /* don't copy this offset */
3899 else if (unlikely(wr_mas
->r_max
== ULONG_MAX
))
3900 mas_bulk_rebalance(mas
, mas
->end
, wr_mas
->type
);
3904 mas_node_count(mas
, 1);
3905 if (mas_is_err(mas
))
3908 newnode
= mas_pop_node(mas
);
3910 memset(&reuse
, 0, sizeof(struct maple_node
));
3914 newnode
->parent
= mas_mn(mas
)->parent
;
3915 dst_pivots
= ma_pivots(newnode
, wr_mas
->type
);
3916 dst_slots
= ma_slots(newnode
, wr_mas
->type
);
3917 /* Copy from start to insert point */
3918 memcpy(dst_pivots
, wr_mas
->pivots
, sizeof(unsigned long) * mas
->offset
);
3919 memcpy(dst_slots
, wr_mas
->slots
, sizeof(void *) * mas
->offset
);
3921 /* Handle insert of new range starting after old range */
3922 if (wr_mas
->r_min
< mas
->index
) {
3923 rcu_assign_pointer(dst_slots
[mas
->offset
], wr_mas
->content
);
3924 dst_pivots
[mas
->offset
++] = mas
->index
- 1;
3927 /* Store the new entry and range end. */
3928 if (mas
->offset
< node_pivots
)
3929 dst_pivots
[mas
->offset
] = mas
->last
;
3930 rcu_assign_pointer(dst_slots
[mas
->offset
], wr_mas
->entry
);
3933 * this range wrote to the end of the node or it overwrote the rest of
3936 if (offset_end
> mas
->end
)
3939 dst_offset
= mas
->offset
+ 1;
3940 /* Copy to the end of node if necessary. */
3941 copy_size
= mas
->end
- offset_end
+ 1;
3942 memcpy(dst_slots
+ dst_offset
, wr_mas
->slots
+ offset_end
,
3943 sizeof(void *) * copy_size
);
3944 memcpy(dst_pivots
+ dst_offset
, wr_mas
->pivots
+ offset_end
,
3945 sizeof(unsigned long) * (copy_size
- 1));
3947 if (new_end
< node_pivots
)
3948 dst_pivots
[new_end
] = mas
->max
;
3951 mas_leaf_set_meta(newnode
, maple_leaf_64
, new_end
);
3953 struct maple_enode
*old_enode
= mas
->node
;
3955 mas
->node
= mt_mk_node(newnode
, wr_mas
->type
);
3956 mas_replace_node(mas
, old_enode
);
3958 memcpy(wr_mas
->node
, newnode
, sizeof(struct maple_node
));
3960 trace_ma_write(__func__
, mas
, 0, wr_mas
->entry
);
3961 mas_update_gap(mas
);
3967 * mas_wr_slot_store: Attempt to store a value in a slot.
3968 * @wr_mas: the maple write state
3970 * Return: True if stored, false otherwise
3972 static inline bool mas_wr_slot_store(struct ma_wr_state
*wr_mas
)
3974 struct ma_state
*mas
= wr_mas
->mas
;
3975 unsigned char offset
= mas
->offset
;
3976 void __rcu
**slots
= wr_mas
->slots
;
3979 gap
|= !mt_slot_locked(mas
->tree
, slots
, offset
);
3980 gap
|= !mt_slot_locked(mas
->tree
, slots
, offset
+ 1);
3982 if (wr_mas
->offset_end
- offset
== 1) {
3983 if (mas
->index
== wr_mas
->r_min
) {
3984 /* Overwriting the range and a part of the next one */
3985 rcu_assign_pointer(slots
[offset
], wr_mas
->entry
);
3986 wr_mas
->pivots
[offset
] = mas
->last
;
3988 /* Overwriting a part of the range and the next one */
3989 rcu_assign_pointer(slots
[offset
+ 1], wr_mas
->entry
);
3990 wr_mas
->pivots
[offset
] = mas
->index
- 1;
3991 mas
->offset
++; /* Keep mas accurate. */
3993 } else if (!mt_in_rcu(mas
->tree
)) {
3995 * Expand the range, only partially overwriting the previous and
3998 gap
|= !mt_slot_locked(mas
->tree
, slots
, offset
+ 2);
3999 rcu_assign_pointer(slots
[offset
+ 1], wr_mas
->entry
);
4000 wr_mas
->pivots
[offset
] = mas
->index
- 1;
4001 wr_mas
->pivots
[offset
+ 1] = mas
->last
;
4002 mas
->offset
++; /* Keep mas accurate. */
4007 trace_ma_write(__func__
, mas
, 0, wr_mas
->entry
);
4009 * Only update gap when the new entry is empty or there is an empty
4010 * entry in the original two ranges.
4012 if (!wr_mas
->entry
|| gap
)
4013 mas_update_gap(mas
);
4018 static inline void mas_wr_extend_null(struct ma_wr_state
*wr_mas
)
4020 struct ma_state
*mas
= wr_mas
->mas
;
4022 if (!wr_mas
->slots
[wr_mas
->offset_end
]) {
4023 /* If this one is null, the next and prev are not */
4024 mas
->last
= wr_mas
->end_piv
;
4026 /* Check next slot(s) if we are overwriting the end */
4027 if ((mas
->last
== wr_mas
->end_piv
) &&
4028 (mas
->end
!= wr_mas
->offset_end
) &&
4029 !wr_mas
->slots
[wr_mas
->offset_end
+ 1]) {
4030 wr_mas
->offset_end
++;
4031 if (wr_mas
->offset_end
== mas
->end
)
4032 mas
->last
= mas
->max
;
4034 mas
->last
= wr_mas
->pivots
[wr_mas
->offset_end
];
4035 wr_mas
->end_piv
= mas
->last
;
4039 if (!wr_mas
->content
) {
4040 /* If this one is null, the next and prev are not */
4041 mas
->index
= wr_mas
->r_min
;
4043 /* Check prev slot if we are overwriting the start */
4044 if (mas
->index
== wr_mas
->r_min
&& mas
->offset
&&
4045 !wr_mas
->slots
[mas
->offset
- 1]) {
4047 wr_mas
->r_min
= mas
->index
=
4048 mas_safe_min(mas
, wr_mas
->pivots
, mas
->offset
);
4049 wr_mas
->r_max
= wr_mas
->pivots
[mas
->offset
];
4054 static inline void mas_wr_end_piv(struct ma_wr_state
*wr_mas
)
4056 while ((wr_mas
->offset_end
< wr_mas
->mas
->end
) &&
4057 (wr_mas
->mas
->last
> wr_mas
->pivots
[wr_mas
->offset_end
]))
4058 wr_mas
->offset_end
++;
4060 if (wr_mas
->offset_end
< wr_mas
->mas
->end
)
4061 wr_mas
->end_piv
= wr_mas
->pivots
[wr_mas
->offset_end
];
4063 wr_mas
->end_piv
= wr_mas
->mas
->max
;
4066 mas_wr_extend_null(wr_mas
);
4069 static inline unsigned char mas_wr_new_end(struct ma_wr_state
*wr_mas
)
4071 struct ma_state
*mas
= wr_mas
->mas
;
4072 unsigned char new_end
= mas
->end
+ 2;
4074 new_end
-= wr_mas
->offset_end
- mas
->offset
;
4075 if (wr_mas
->r_min
== mas
->index
)
4078 if (wr_mas
->end_piv
== mas
->last
)
4085 * mas_wr_append: Attempt to append
4086 * @wr_mas: the maple write state
4087 * @new_end: The end of the node after the modification
4089 * This is currently unsafe in rcu mode since the end of the node may be cached
4090 * by readers while the node contents may be updated which could result in
4091 * inaccurate information.
4093 * Return: True if appended, false otherwise
4095 static inline bool mas_wr_append(struct ma_wr_state
*wr_mas
,
4096 unsigned char new_end
)
4098 struct ma_state
*mas
;
4103 if (mt_in_rcu(mas
->tree
))
4107 if (mas
->offset
!= end
)
4110 if (new_end
< mt_pivots
[wr_mas
->type
]) {
4111 wr_mas
->pivots
[new_end
] = wr_mas
->pivots
[end
];
4112 ma_set_meta(wr_mas
->node
, wr_mas
->type
, 0, new_end
);
4115 slots
= wr_mas
->slots
;
4116 if (new_end
== end
+ 1) {
4117 if (mas
->last
== wr_mas
->r_max
) {
4118 /* Append to end of range */
4119 rcu_assign_pointer(slots
[new_end
], wr_mas
->entry
);
4120 wr_mas
->pivots
[end
] = mas
->index
- 1;
4121 mas
->offset
= new_end
;
4123 /* Append to start of range */
4124 rcu_assign_pointer(slots
[new_end
], wr_mas
->content
);
4125 wr_mas
->pivots
[end
] = mas
->last
;
4126 rcu_assign_pointer(slots
[end
], wr_mas
->entry
);
4129 /* Append to the range without touching any boundaries. */
4130 rcu_assign_pointer(slots
[new_end
], wr_mas
->content
);
4131 wr_mas
->pivots
[end
+ 1] = mas
->last
;
4132 rcu_assign_pointer(slots
[end
+ 1], wr_mas
->entry
);
4133 wr_mas
->pivots
[end
] = mas
->index
- 1;
4134 mas
->offset
= end
+ 1;
4137 if (!wr_mas
->content
|| !wr_mas
->entry
)
4138 mas_update_gap(mas
);
4141 trace_ma_write(__func__
, mas
, new_end
, wr_mas
->entry
);
4146 * mas_wr_bnode() - Slow path for a modification.
4147 * @wr_mas: The write maple state
4149 * This is where split, rebalance end up.
4151 static void mas_wr_bnode(struct ma_wr_state
*wr_mas
)
4153 struct maple_big_node b_node
;
4155 trace_ma_write(__func__
, wr_mas
->mas
, 0, wr_mas
->entry
);
4156 memset(&b_node
, 0, sizeof(struct maple_big_node
));
4157 mas_store_b_node(wr_mas
, &b_node
, wr_mas
->offset_end
);
4158 mas_commit_b_node(wr_mas
, &b_node
, wr_mas
->mas
->end
);
4161 static inline void mas_wr_modify(struct ma_wr_state
*wr_mas
)
4163 struct ma_state
*mas
= wr_mas
->mas
;
4164 unsigned char new_end
;
4166 /* Direct replacement */
4167 if (wr_mas
->r_min
== mas
->index
&& wr_mas
->r_max
== mas
->last
) {
4168 rcu_assign_pointer(wr_mas
->slots
[mas
->offset
], wr_mas
->entry
);
4169 if (!!wr_mas
->entry
^ !!wr_mas
->content
)
4170 mas_update_gap(mas
);
4175 * new_end exceeds the size of the maple node and cannot enter the fast
4178 new_end
= mas_wr_new_end(wr_mas
);
4179 if (new_end
>= mt_slots
[wr_mas
->type
])
4182 /* Attempt to append */
4183 if (mas_wr_append(wr_mas
, new_end
))
4186 if (new_end
== mas
->end
&& mas_wr_slot_store(wr_mas
))
4189 if (mas_wr_node_store(wr_mas
, new_end
))
4192 if (mas_is_err(mas
))
4196 mas_wr_bnode(wr_mas
);
4200 * mas_wr_store_entry() - Internal call to store a value
4201 * @mas: The maple state
4202 * @entry: The entry to store.
4204 * Return: The contents that was stored at the index.
4206 static inline void *mas_wr_store_entry(struct ma_wr_state
*wr_mas
)
4208 struct ma_state
*mas
= wr_mas
->mas
;
4210 wr_mas
->content
= mas_start(mas
);
4211 if (mas_is_none(mas
) || mas_is_ptr(mas
)) {
4212 mas_store_root(mas
, wr_mas
->entry
);
4213 return wr_mas
->content
;
4216 if (unlikely(!mas_wr_walk(wr_mas
))) {
4217 mas_wr_spanning_store(wr_mas
);
4218 return wr_mas
->content
;
4221 /* At this point, we are at the leaf node that needs to be altered. */
4222 mas_wr_end_piv(wr_mas
);
4223 /* New root for a single pointer */
4224 if (unlikely(!mas
->index
&& mas
->last
== ULONG_MAX
)) {
4225 mas_new_root(mas
, wr_mas
->entry
);
4226 return wr_mas
->content
;
4229 mas_wr_modify(wr_mas
);
4230 return wr_mas
->content
;
4234 * mas_insert() - Internal call to insert a value
4235 * @mas: The maple state
4236 * @entry: The entry to store
4238 * Return: %NULL or the contents that already exists at the requested index
4239 * otherwise. The maple state needs to be checked for error conditions.
4241 static inline void *mas_insert(struct ma_state
*mas
, void *entry
)
4243 MA_WR_STATE(wr_mas
, mas
, entry
);
4246 * Inserting a new range inserts either 0, 1, or 2 pivots within the
4247 * tree. If the insert fits exactly into an existing gap with a value
4248 * of NULL, then the slot only needs to be written with the new value.
4249 * If the range being inserted is adjacent to another range, then only a
4250 * single pivot needs to be inserted (as well as writing the entry). If
4251 * the new range is within a gap but does not touch any other ranges,
4252 * then two pivots need to be inserted: the start - 1, and the end. As
4253 * usual, the entry must be written. Most operations require a new node
4254 * to be allocated and replace an existing node to ensure RCU safety,
4255 * when in RCU mode. The exception to requiring a newly allocated node
4256 * is when inserting at the end of a node (appending). When done
4257 * carefully, appending can reuse the node in place.
4259 wr_mas
.content
= mas_start(mas
);
4263 if (mas_is_none(mas
) || mas_is_ptr(mas
)) {
4264 mas_store_root(mas
, entry
);
4268 /* spanning writes always overwrite something */
4269 if (!mas_wr_walk(&wr_mas
))
4272 /* At this point, we are at the leaf node that needs to be altered. */
4273 wr_mas
.offset_end
= mas
->offset
;
4274 wr_mas
.end_piv
= wr_mas
.r_max
;
4276 if (wr_mas
.content
|| (mas
->last
> wr_mas
.r_max
))
4282 mas_wr_modify(&wr_mas
);
4283 return wr_mas
.content
;
4286 mas_set_err(mas
, -EEXIST
);
4287 return wr_mas
.content
;
4292 * mas_alloc_cyclic() - Internal call to find somewhere to store an entry
4293 * @mas: The maple state.
4294 * @startp: Pointer to ID.
4295 * @range_lo: Lower bound of range to search.
4296 * @range_hi: Upper bound of range to search.
4297 * @entry: The entry to store.
4298 * @next: Pointer to next ID to allocate.
4299 * @gfp: The GFP_FLAGS to use for allocations.
4301 * Return: 0 if the allocation succeeded without wrapping, 1 if the
4302 * allocation succeeded after wrapping, or -EBUSY if there are no
4305 int mas_alloc_cyclic(struct ma_state
*mas
, unsigned long *startp
,
4306 void *entry
, unsigned long range_lo
, unsigned long range_hi
,
4307 unsigned long *next
, gfp_t gfp
)
4309 unsigned long min
= range_lo
;
4312 range_lo
= max(min
, *next
);
4313 ret
= mas_empty_area(mas
, range_lo
, range_hi
, 1);
4314 if ((mas
->tree
->ma_flags
& MT_FLAGS_ALLOC_WRAPPED
) && ret
== 0) {
4315 mas
->tree
->ma_flags
&= ~MT_FLAGS_ALLOC_WRAPPED
;
4318 if (ret
< 0 && range_lo
> min
) {
4319 ret
= mas_empty_area(mas
, min
, range_hi
, 1);
4327 mas_insert(mas
, entry
);
4328 } while (mas_nomem(mas
, gfp
));
4329 if (mas_is_err(mas
))
4330 return xa_err(mas
->node
);
4332 *startp
= mas
->index
;
4333 *next
= *startp
+ 1;
4335 mas
->tree
->ma_flags
|= MT_FLAGS_ALLOC_WRAPPED
;
4339 EXPORT_SYMBOL(mas_alloc_cyclic
);
4341 static __always_inline
void mas_rewalk(struct ma_state
*mas
, unsigned long index
)
4344 mas_set(mas
, index
);
4345 mas_state_walk(mas
);
4346 if (mas_is_start(mas
))
4350 static __always_inline
bool mas_rewalk_if_dead(struct ma_state
*mas
,
4351 struct maple_node
*node
, const unsigned long index
)
4353 if (unlikely(ma_dead_node(node
))) {
4354 mas_rewalk(mas
, index
);
4361 * mas_prev_node() - Find the prev non-null entry at the same level in the
4362 * tree. The prev value will be mas->node[mas->offset] or the status will be
4364 * @mas: The maple state
4365 * @min: The lower limit to search
4367 * The prev node value will be mas->node[mas->offset] or the status will be
4369 * Return: 1 if the node is dead, 0 otherwise.
4371 static int mas_prev_node(struct ma_state
*mas
, unsigned long min
)
4376 struct maple_node
*node
;
4377 unsigned long *pivots
;
4390 if (ma_is_root(node
))
4394 if (unlikely(mas_ascend(mas
)))
4396 offset
= mas
->offset
;
4402 mt
= mte_node_type(mas
->node
);
4405 slots
= ma_slots(node
, mt
);
4406 mas
->node
= mas_slot(mas
, slots
, offset
);
4407 if (unlikely(ma_dead_node(node
)))
4410 mt
= mte_node_type(mas
->node
);
4412 pivots
= ma_pivots(node
, mt
);
4413 offset
= ma_data_end(node
, mt
, pivots
, max
);
4414 if (unlikely(ma_dead_node(node
)))
4418 slots
= ma_slots(node
, mt
);
4419 mas
->node
= mas_slot(mas
, slots
, offset
);
4420 pivots
= ma_pivots(node
, mt
);
4421 if (unlikely(ma_dead_node(node
)))
4425 mas
->min
= pivots
[offset
- 1] + 1;
4427 mas
->offset
= mas_data_end(mas
);
4428 if (unlikely(mte_dead_node(mas
->node
)))
4431 mas
->end
= mas
->offset
;
4435 if (unlikely(ma_dead_node(node
)))
4438 mas
->status
= ma_underflow
;
4443 * mas_prev_slot() - Get the entry in the previous slot
4445 * @mas: The maple state
4446 * @max: The minimum starting range
4447 * @empty: Can be empty
4448 * @set_underflow: Set the @mas->node to underflow state on limit.
4450 * Return: The entry in the previous slot which is possibly NULL
4452 static void *mas_prev_slot(struct ma_state
*mas
, unsigned long min
, bool empty
)
4456 unsigned long pivot
;
4457 enum maple_type type
;
4458 unsigned long *pivots
;
4459 struct maple_node
*node
;
4460 unsigned long save_point
= mas
->index
;
4464 type
= mte_node_type(mas
->node
);
4465 pivots
= ma_pivots(node
, type
);
4466 if (unlikely(mas_rewalk_if_dead(mas
, node
, save_point
)))
4469 if (mas
->min
<= min
) {
4470 pivot
= mas_safe_min(mas
, pivots
, mas
->offset
);
4472 if (unlikely(mas_rewalk_if_dead(mas
, node
, save_point
)))
4480 if (likely(mas
->offset
)) {
4482 mas
->last
= mas
->index
- 1;
4483 mas
->index
= mas_safe_min(mas
, pivots
, mas
->offset
);
4485 if (mas
->index
<= min
)
4488 if (mas_prev_node(mas
, min
)) {
4489 mas_rewalk(mas
, save_point
);
4493 if (WARN_ON_ONCE(mas_is_underflow(mas
)))
4496 mas
->last
= mas
->max
;
4498 type
= mte_node_type(mas
->node
);
4499 pivots
= ma_pivots(node
, type
);
4500 mas
->index
= pivots
[mas
->offset
- 1] + 1;
4503 slots
= ma_slots(node
, type
);
4504 entry
= mas_slot(mas
, slots
, mas
->offset
);
4505 if (unlikely(mas_rewalk_if_dead(mas
, node
, save_point
)))
4513 if (mas
->index
<= min
) {
4514 mas
->status
= ma_underflow
;
4524 mas
->status
= ma_underflow
;
4529 * mas_next_node() - Get the next node at the same level in the tree.
4530 * @mas: The maple state
4531 * @max: The maximum pivot value to check.
4533 * The next value will be mas->node[mas->offset] or the status will have
4535 * Return: 1 on dead node, 0 otherwise.
4537 static int mas_next_node(struct ma_state
*mas
, struct maple_node
*node
,
4541 unsigned long *pivots
;
4542 struct maple_enode
*enode
;
4543 struct maple_node
*tmp
;
4545 unsigned char node_end
;
4549 if (mas
->max
>= max
)
4555 if (ma_is_root(node
))
4559 if (unlikely(mas_ascend(mas
)))
4564 mt
= mte_node_type(mas
->node
);
4565 pivots
= ma_pivots(node
, mt
);
4566 node_end
= ma_data_end(node
, mt
, pivots
, mas
->max
);
4567 if (unlikely(ma_dead_node(node
)))
4570 } while (unlikely(mas
->offset
== node_end
));
4572 slots
= ma_slots(node
, mt
);
4574 enode
= mas_slot(mas
, slots
, mas
->offset
);
4575 if (unlikely(ma_dead_node(node
)))
4581 while (unlikely(level
> 1)) {
4585 mt
= mte_node_type(mas
->node
);
4586 slots
= ma_slots(node
, mt
);
4587 enode
= mas_slot(mas
, slots
, 0);
4588 if (unlikely(ma_dead_node(node
)))
4593 pivots
= ma_pivots(node
, mt
);
4595 mas
->max
= mas_safe_pivot(mas
, pivots
, mas
->offset
, mt
);
4596 tmp
= mte_to_node(enode
);
4597 mt
= mte_node_type(enode
);
4598 pivots
= ma_pivots(tmp
, mt
);
4599 mas
->end
= ma_data_end(tmp
, mt
, pivots
, mas
->max
);
4600 if (unlikely(ma_dead_node(node
)))
4608 if (unlikely(ma_dead_node(node
)))
4611 mas
->status
= ma_overflow
;
4616 * mas_next_slot() - Get the entry in the next slot
4618 * @mas: The maple state
4619 * @max: The maximum starting range
4620 * @empty: Can be empty
4621 * @set_overflow: Should @mas->node be set to overflow when the limit is
4624 * Return: The entry in the next slot which is possibly NULL
4626 static void *mas_next_slot(struct ma_state
*mas
, unsigned long max
, bool empty
)
4629 unsigned long *pivots
;
4630 unsigned long pivot
;
4631 enum maple_type type
;
4632 struct maple_node
*node
;
4633 unsigned long save_point
= mas
->last
;
4638 type
= mte_node_type(mas
->node
);
4639 pivots
= ma_pivots(node
, type
);
4640 if (unlikely(mas_rewalk_if_dead(mas
, node
, save_point
)))
4643 if (mas
->max
>= max
) {
4644 if (likely(mas
->offset
< mas
->end
))
4645 pivot
= pivots
[mas
->offset
];
4649 if (unlikely(mas_rewalk_if_dead(mas
, node
, save_point
)))
4652 if (pivot
>= max
) { /* Was at the limit, next will extend beyond */
4653 mas
->status
= ma_overflow
;
4658 if (likely(mas
->offset
< mas
->end
)) {
4659 mas
->index
= pivots
[mas
->offset
] + 1;
4662 if (likely(mas
->offset
< mas
->end
))
4663 mas
->last
= pivots
[mas
->offset
];
4665 mas
->last
= mas
->max
;
4667 if (mas
->last
>= max
) {
4668 mas
->status
= ma_overflow
;
4672 if (mas_next_node(mas
, node
, max
)) {
4673 mas_rewalk(mas
, save_point
);
4677 if (WARN_ON_ONCE(mas_is_overflow(mas
)))
4681 mas
->index
= mas
->min
;
4683 type
= mte_node_type(mas
->node
);
4684 pivots
= ma_pivots(node
, type
);
4685 mas
->last
= pivots
[0];
4688 slots
= ma_slots(node
, type
);
4689 entry
= mt_slot(mas
->tree
, slots
, mas
->offset
);
4690 if (unlikely(mas_rewalk_if_dead(mas
, node
, save_point
)))
4698 if (mas
->last
>= max
) {
4699 mas
->status
= ma_overflow
;
4703 mas
->index
= mas
->last
+ 1;
4711 * mas_next_entry() - Internal function to get the next entry.
4712 * @mas: The maple state
4713 * @limit: The maximum range start.
4715 * Set the @mas->node to the next entry and the range_start to
4716 * the beginning value for the entry. Does not check beyond @limit.
4717 * Sets @mas->index and @mas->last to the range, Does not update @mas->index and
4718 * @mas->last on overflow.
4719 * Restarts on dead nodes.
4721 * Return: the next entry or %NULL.
4723 static inline void *mas_next_entry(struct ma_state
*mas
, unsigned long limit
)
4725 if (mas
->last
>= limit
) {
4726 mas
->status
= ma_overflow
;
4730 return mas_next_slot(mas
, limit
, false);
4734 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the
4735 * highest gap address of a given size in a given node and descend.
4736 * @mas: The maple state
4737 * @size: The needed size.
4739 * Return: True if found in a leaf, false otherwise.
4742 static bool mas_rev_awalk(struct ma_state
*mas
, unsigned long size
,
4743 unsigned long *gap_min
, unsigned long *gap_max
)
4745 enum maple_type type
= mte_node_type(mas
->node
);
4746 struct maple_node
*node
= mas_mn(mas
);
4747 unsigned long *pivots
, *gaps
;
4749 unsigned long gap
= 0;
4750 unsigned long max
, min
;
4751 unsigned char offset
;
4753 if (unlikely(mas_is_err(mas
)))
4756 if (ma_is_dense(type
)) {
4758 mas
->offset
= (unsigned char)(mas
->index
- mas
->min
);
4762 pivots
= ma_pivots(node
, type
);
4763 slots
= ma_slots(node
, type
);
4764 gaps
= ma_gaps(node
, type
);
4765 offset
= mas
->offset
;
4766 min
= mas_safe_min(mas
, pivots
, offset
);
4767 /* Skip out of bounds. */
4768 while (mas
->last
< min
)
4769 min
= mas_safe_min(mas
, pivots
, --offset
);
4771 max
= mas_safe_pivot(mas
, pivots
, offset
, type
);
4772 while (mas
->index
<= max
) {
4776 else if (!mas_slot(mas
, slots
, offset
))
4777 gap
= max
- min
+ 1;
4780 if ((size
<= gap
) && (size
<= mas
->last
- min
+ 1))
4784 /* Skip the next slot, it cannot be a gap. */
4789 max
= pivots
[offset
];
4790 min
= mas_safe_min(mas
, pivots
, offset
);
4800 min
= mas_safe_min(mas
, pivots
, offset
);
4803 if (unlikely((mas
->index
> max
) || (size
- 1 > max
- mas
->index
)))
4806 if (unlikely(ma_is_leaf(type
))) {
4807 mas
->offset
= offset
;
4809 *gap_max
= min
+ gap
- 1;
4813 /* descend, only happens under lock. */
4814 mas
->node
= mas_slot(mas
, slots
, offset
);
4817 mas
->offset
= mas_data_end(mas
);
4821 if (!mte_is_root(mas
->node
))
4825 mas_set_err(mas
, -EBUSY
);
4829 static inline bool mas_anode_descend(struct ma_state
*mas
, unsigned long size
)
4831 enum maple_type type
= mte_node_type(mas
->node
);
4832 unsigned long pivot
, min
, gap
= 0;
4833 unsigned char offset
, data_end
;
4834 unsigned long *gaps
, *pivots
;
4836 struct maple_node
*node
;
4839 if (ma_is_dense(type
)) {
4840 mas
->offset
= (unsigned char)(mas
->index
- mas
->min
);
4845 pivots
= ma_pivots(node
, type
);
4846 slots
= ma_slots(node
, type
);
4847 gaps
= ma_gaps(node
, type
);
4848 offset
= mas
->offset
;
4849 min
= mas_safe_min(mas
, pivots
, offset
);
4850 data_end
= ma_data_end(node
, type
, pivots
, mas
->max
);
4851 for (; offset
<= data_end
; offset
++) {
4852 pivot
= mas_safe_pivot(mas
, pivots
, offset
, type
);
4854 /* Not within lower bounds */
4855 if (mas
->index
> pivot
)
4860 else if (!mas_slot(mas
, slots
, offset
))
4861 gap
= min(pivot
, mas
->last
) - max(mas
->index
, min
) + 1;
4866 if (ma_is_leaf(type
)) {
4870 if (mas
->index
<= pivot
) {
4871 mas
->node
= mas_slot(mas
, slots
, offset
);
4880 if (mas
->last
<= pivot
) {
4881 mas_set_err(mas
, -EBUSY
);
4886 if (mte_is_root(mas
->node
))
4889 mas
->offset
= offset
;
4894 * mas_walk() - Search for @mas->index in the tree.
4895 * @mas: The maple state.
4897 * mas->index and mas->last will be set to the range if there is a value. If
4898 * mas->status is ma_none, reset to ma_start
4900 * Return: the entry at the location or %NULL.
4902 void *mas_walk(struct ma_state
*mas
)
4906 if (!mas_is_active(mas
) || !mas_is_start(mas
))
4907 mas
->status
= ma_start
;
4909 entry
= mas_state_walk(mas
);
4910 if (mas_is_start(mas
)) {
4912 } else if (mas_is_none(mas
)) {
4914 mas
->last
= ULONG_MAX
;
4915 } else if (mas_is_ptr(mas
)) {
4922 mas
->last
= ULONG_MAX
;
4923 mas
->status
= ma_none
;
4929 EXPORT_SYMBOL_GPL(mas_walk
);
4931 static inline bool mas_rewind_node(struct ma_state
*mas
)
4936 if (mte_is_root(mas
->node
)) {
4946 mas
->offset
= --slot
;
4951 * mas_skip_node() - Internal function. Skip over a node.
4952 * @mas: The maple state.
4954 * Return: true if there is another node, false otherwise.
4956 static inline bool mas_skip_node(struct ma_state
*mas
)
4958 if (mas_is_err(mas
))
4962 if (mte_is_root(mas
->node
)) {
4963 if (mas
->offset
>= mas_data_end(mas
)) {
4964 mas_set_err(mas
, -EBUSY
);
4970 } while (mas
->offset
>= mas_data_end(mas
));
4977 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of
4979 * @mas: The maple state
4980 * @size: The size of the gap required
4982 * Search between @mas->index and @mas->last for a gap of @size.
4984 static inline void mas_awalk(struct ma_state
*mas
, unsigned long size
)
4986 struct maple_enode
*last
= NULL
;
4989 * There are 4 options:
4990 * go to child (descend)
4991 * go back to parent (ascend)
4992 * no gap found. (return, slot == MAPLE_NODE_SLOTS)
4993 * found the gap. (return, slot != MAPLE_NODE_SLOTS)
4995 while (!mas_is_err(mas
) && !mas_anode_descend(mas
, size
)) {
4996 if (last
== mas
->node
)
5004 * mas_sparse_area() - Internal function. Return upper or lower limit when
5005 * searching for a gap in an empty tree.
5006 * @mas: The maple state
5007 * @min: the minimum range
5008 * @max: The maximum range
5009 * @size: The size of the gap
5010 * @fwd: Searching forward or back
5012 static inline int mas_sparse_area(struct ma_state
*mas
, unsigned long min
,
5013 unsigned long max
, unsigned long size
, bool fwd
)
5015 if (!unlikely(mas_is_none(mas
)) && min
== 0) {
5018 * At this time, min is increased, we need to recheck whether
5019 * the size is satisfied.
5021 if (min
> max
|| max
- min
+ 1 < size
)
5028 mas
->last
= min
+ size
- 1;
5031 mas
->index
= max
- size
+ 1;
5037 * mas_empty_area() - Get the lowest address within the range that is
5038 * sufficient for the size requested.
5039 * @mas: The maple state
5040 * @min: The lowest value of the range
5041 * @max: The highest value of the range
5042 * @size: The size needed
5044 int mas_empty_area(struct ma_state
*mas
, unsigned long min
,
5045 unsigned long max
, unsigned long size
)
5047 unsigned char offset
;
5048 unsigned long *pivots
;
5050 struct maple_node
*node
;
5055 if (size
== 0 || max
- min
< size
- 1)
5058 if (mas_is_start(mas
))
5060 else if (mas
->offset
>= 2)
5062 else if (!mas_skip_node(mas
))
5066 if (mas_is_none(mas
) || mas_is_ptr(mas
))
5067 return mas_sparse_area(mas
, min
, max
, size
, true);
5069 /* The start of the window can only be within these values */
5072 mas_awalk(mas
, size
);
5074 if (unlikely(mas_is_err(mas
)))
5075 return xa_err(mas
->node
);
5077 offset
= mas
->offset
;
5078 if (unlikely(offset
== MAPLE_NODE_SLOTS
))
5082 mt
= mte_node_type(mas
->node
);
5083 pivots
= ma_pivots(node
, mt
);
5084 min
= mas_safe_min(mas
, pivots
, offset
);
5085 if (mas
->index
< min
)
5087 mas
->last
= mas
->index
+ size
- 1;
5088 mas
->end
= ma_data_end(node
, mt
, pivots
, mas
->max
);
5091 EXPORT_SYMBOL_GPL(mas_empty_area
);
5094 * mas_empty_area_rev() - Get the highest address within the range that is
5095 * sufficient for the size requested.
5096 * @mas: The maple state
5097 * @min: The lowest value of the range
5098 * @max: The highest value of the range
5099 * @size: The size needed
5101 int mas_empty_area_rev(struct ma_state
*mas
, unsigned long min
,
5102 unsigned long max
, unsigned long size
)
5104 struct maple_enode
*last
= mas
->node
;
5109 if (size
== 0 || max
- min
< size
- 1)
5112 if (mas_is_start(mas
)) {
5114 mas
->offset
= mas_data_end(mas
);
5115 } else if (mas
->offset
>= 2) {
5117 } else if (!mas_rewind_node(mas
)) {
5122 if (mas_is_none(mas
) || mas_is_ptr(mas
))
5123 return mas_sparse_area(mas
, min
, max
, size
, false);
5125 /* The start of the window can only be within these values. */
5129 while (!mas_rev_awalk(mas
, size
, &min
, &max
)) {
5130 if (last
== mas
->node
) {
5131 if (!mas_rewind_node(mas
))
5138 if (mas_is_err(mas
))
5139 return xa_err(mas
->node
);
5141 if (unlikely(mas
->offset
== MAPLE_NODE_SLOTS
))
5144 /* Trim the upper limit to the max. */
5145 if (max
< mas
->last
)
5148 mas
->index
= mas
->last
- size
+ 1;
5149 mas
->end
= mas_data_end(mas
);
5152 EXPORT_SYMBOL_GPL(mas_empty_area_rev
);
5155 * mte_dead_leaves() - Mark all leaves of a node as dead.
5156 * @mas: The maple state
5157 * @slots: Pointer to the slot array
5158 * @type: The maple node type
5160 * Must hold the write lock.
5162 * Return: The number of leaves marked as dead.
5165 unsigned char mte_dead_leaves(struct maple_enode
*enode
, struct maple_tree
*mt
,
5168 struct maple_node
*node
;
5169 enum maple_type type
;
5173 for (offset
= 0; offset
< mt_slot_count(enode
); offset
++) {
5174 entry
= mt_slot(mt
, slots
, offset
);
5175 type
= mte_node_type(entry
);
5176 node
= mte_to_node(entry
);
5177 /* Use both node and type to catch LE & BE metadata */
5181 mte_set_node_dead(entry
);
5183 rcu_assign_pointer(slots
[offset
], node
);
5190 * mte_dead_walk() - Walk down a dead tree to just before the leaves
5191 * @enode: The maple encoded node
5192 * @offset: The starting offset
5194 * Note: This can only be used from the RCU callback context.
5196 static void __rcu
**mte_dead_walk(struct maple_enode
**enode
, unsigned char offset
)
5198 struct maple_node
*node
, *next
;
5199 void __rcu
**slots
= NULL
;
5201 next
= mte_to_node(*enode
);
5203 *enode
= ma_enode_ptr(next
);
5204 node
= mte_to_node(*enode
);
5205 slots
= ma_slots(node
, node
->type
);
5206 next
= rcu_dereference_protected(slots
[offset
],
5207 lock_is_held(&rcu_callback_map
));
5209 } while (!ma_is_leaf(next
->type
));
5215 * mt_free_walk() - Walk & free a tree in the RCU callback context
5216 * @head: The RCU head that's within the node.
5218 * Note: This can only be used from the RCU callback context.
5220 static void mt_free_walk(struct rcu_head
*head
)
5223 struct maple_node
*node
, *start
;
5224 struct maple_enode
*enode
;
5225 unsigned char offset
;
5226 enum maple_type type
;
5228 node
= container_of(head
, struct maple_node
, rcu
);
5230 if (ma_is_leaf(node
->type
))
5234 enode
= mt_mk_node(node
, node
->type
);
5235 slots
= mte_dead_walk(&enode
, 0);
5236 node
= mte_to_node(enode
);
5238 mt_free_bulk(node
->slot_len
, slots
);
5239 offset
= node
->parent_slot
+ 1;
5240 enode
= node
->piv_parent
;
5241 if (mte_to_node(enode
) == node
)
5244 type
= mte_node_type(enode
);
5245 slots
= ma_slots(mte_to_node(enode
), type
);
5246 if ((offset
< mt_slots
[type
]) &&
5247 rcu_dereference_protected(slots
[offset
],
5248 lock_is_held(&rcu_callback_map
)))
5249 slots
= mte_dead_walk(&enode
, offset
);
5250 node
= mte_to_node(enode
);
5251 } while ((node
!= start
) || (node
->slot_len
< offset
));
5253 slots
= ma_slots(node
, node
->type
);
5254 mt_free_bulk(node
->slot_len
, slots
);
5257 mt_free_rcu(&node
->rcu
);
5260 static inline void __rcu
**mte_destroy_descend(struct maple_enode
**enode
,
5261 struct maple_tree
*mt
, struct maple_enode
*prev
, unsigned char offset
)
5263 struct maple_node
*node
;
5264 struct maple_enode
*next
= *enode
;
5265 void __rcu
**slots
= NULL
;
5266 enum maple_type type
;
5267 unsigned char next_offset
= 0;
5271 node
= mte_to_node(*enode
);
5272 type
= mte_node_type(*enode
);
5273 slots
= ma_slots(node
, type
);
5274 next
= mt_slot_locked(mt
, slots
, next_offset
);
5275 if ((mte_dead_node(next
)))
5276 next
= mt_slot_locked(mt
, slots
, ++next_offset
);
5278 mte_set_node_dead(*enode
);
5280 node
->piv_parent
= prev
;
5281 node
->parent_slot
= offset
;
5282 offset
= next_offset
;
5285 } while (!mte_is_leaf(next
));
5290 static void mt_destroy_walk(struct maple_enode
*enode
, struct maple_tree
*mt
,
5294 struct maple_node
*node
= mte_to_node(enode
);
5295 struct maple_enode
*start
;
5297 if (mte_is_leaf(enode
)) {
5298 node
->type
= mte_node_type(enode
);
5303 slots
= mte_destroy_descend(&enode
, mt
, start
, 0);
5304 node
= mte_to_node(enode
); // Updated in the above call.
5306 enum maple_type type
;
5307 unsigned char offset
;
5308 struct maple_enode
*parent
, *tmp
;
5310 node
->slot_len
= mte_dead_leaves(enode
, mt
, slots
);
5312 mt_free_bulk(node
->slot_len
, slots
);
5313 offset
= node
->parent_slot
+ 1;
5314 enode
= node
->piv_parent
;
5315 if (mte_to_node(enode
) == node
)
5318 type
= mte_node_type(enode
);
5319 slots
= ma_slots(mte_to_node(enode
), type
);
5320 if (offset
>= mt_slots
[type
])
5323 tmp
= mt_slot_locked(mt
, slots
, offset
);
5324 if (mte_node_type(tmp
) && mte_to_node(tmp
)) {
5327 slots
= mte_destroy_descend(&enode
, mt
, parent
, offset
);
5330 node
= mte_to_node(enode
);
5331 } while (start
!= enode
);
5333 node
= mte_to_node(enode
);
5334 node
->slot_len
= mte_dead_leaves(enode
, mt
, slots
);
5336 mt_free_bulk(node
->slot_len
, slots
);
5340 mt_free_rcu(&node
->rcu
);
5342 mt_clear_meta(mt
, node
, node
->type
);
5346 * mte_destroy_walk() - Free a tree or sub-tree.
5347 * @enode: the encoded maple node (maple_enode) to start
5348 * @mt: the tree to free - needed for node types.
5350 * Must hold the write lock.
5352 static inline void mte_destroy_walk(struct maple_enode
*enode
,
5353 struct maple_tree
*mt
)
5355 struct maple_node
*node
= mte_to_node(enode
);
5357 if (mt_in_rcu(mt
)) {
5358 mt_destroy_walk(enode
, mt
, false);
5359 call_rcu(&node
->rcu
, mt_free_walk
);
5361 mt_destroy_walk(enode
, mt
, true);
5365 static void mas_wr_store_setup(struct ma_wr_state
*wr_mas
)
5367 if (!mas_is_active(wr_mas
->mas
)) {
5368 if (mas_is_start(wr_mas
->mas
))
5371 if (unlikely(mas_is_paused(wr_mas
->mas
)))
5374 if (unlikely(mas_is_none(wr_mas
->mas
)))
5377 if (unlikely(mas_is_overflow(wr_mas
->mas
)))
5380 if (unlikely(mas_is_underflow(wr_mas
->mas
)))
5385 * A less strict version of mas_is_span_wr() where we allow spanning
5386 * writes within this node. This is to stop partial walks in
5387 * mas_prealloc() from being reset.
5389 if (wr_mas
->mas
->last
> wr_mas
->mas
->max
)
5395 if (mte_is_leaf(wr_mas
->mas
->node
) &&
5396 wr_mas
->mas
->last
== wr_mas
->mas
->max
)
5402 mas_reset(wr_mas
->mas
);
5408 * mas_store() - Store an @entry.
5409 * @mas: The maple state.
5410 * @entry: The entry to store.
5412 * The @mas->index and @mas->last is used to set the range for the @entry.
5413 * Note: The @mas should have pre-allocated entries to ensure there is memory to
5414 * store the entry. Please see mas_expected_entries()/mas_destroy() for more details.
5416 * Return: the first entry between mas->index and mas->last or %NULL.
5418 void *mas_store(struct ma_state
*mas
, void *entry
)
5420 MA_WR_STATE(wr_mas
, mas
, entry
);
5422 trace_ma_write(__func__
, mas
, 0, entry
);
5423 #ifdef CONFIG_DEBUG_MAPLE_TREE
5424 if (MAS_WARN_ON(mas
, mas
->index
> mas
->last
))
5425 pr_err("Error %lX > %lX %p\n", mas
->index
, mas
->last
, entry
);
5427 if (mas
->index
> mas
->last
) {
5428 mas_set_err(mas
, -EINVAL
);
5435 * Storing is the same operation as insert with the added caveat that it
5436 * can overwrite entries. Although this seems simple enough, one may
5437 * want to examine what happens if a single store operation was to
5438 * overwrite multiple entries within a self-balancing B-Tree.
5440 mas_wr_store_setup(&wr_mas
);
5441 mas_wr_store_entry(&wr_mas
);
5442 return wr_mas
.content
;
5444 EXPORT_SYMBOL_GPL(mas_store
);
5447 * mas_store_gfp() - Store a value into the tree.
5448 * @mas: The maple state
5449 * @entry: The entry to store
5450 * @gfp: The GFP_FLAGS to use for allocations if necessary.
5452 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5455 int mas_store_gfp(struct ma_state
*mas
, void *entry
, gfp_t gfp
)
5457 MA_WR_STATE(wr_mas
, mas
, entry
);
5459 mas_wr_store_setup(&wr_mas
);
5460 trace_ma_write(__func__
, mas
, 0, entry
);
5462 mas_wr_store_entry(&wr_mas
);
5463 if (unlikely(mas_nomem(mas
, gfp
)))
5466 if (unlikely(mas_is_err(mas
)))
5467 return xa_err(mas
->node
);
5471 EXPORT_SYMBOL_GPL(mas_store_gfp
);
5474 * mas_store_prealloc() - Store a value into the tree using memory
5475 * preallocated in the maple state.
5476 * @mas: The maple state
5477 * @entry: The entry to store.
5479 void mas_store_prealloc(struct ma_state
*mas
, void *entry
)
5481 MA_WR_STATE(wr_mas
, mas
, entry
);
5483 mas_wr_store_setup(&wr_mas
);
5484 trace_ma_write(__func__
, mas
, 0, entry
);
5485 mas_wr_store_entry(&wr_mas
);
5486 MAS_WR_BUG_ON(&wr_mas
, mas_is_err(mas
));
5489 EXPORT_SYMBOL_GPL(mas_store_prealloc
);
5492 * mas_preallocate() - Preallocate enough nodes for a store operation
5493 * @mas: The maple state
5494 * @entry: The entry that will be stored
5495 * @gfp: The GFP_FLAGS to use for allocations.
5497 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5499 int mas_preallocate(struct ma_state
*mas
, void *entry
, gfp_t gfp
)
5501 MA_WR_STATE(wr_mas
, mas
, entry
);
5502 unsigned char node_size
;
5507 if (unlikely(!mas
->index
&& mas
->last
== ULONG_MAX
))
5510 mas_wr_store_setup(&wr_mas
);
5511 wr_mas
.content
= mas_start(mas
);
5513 if (unlikely(mas_is_none(mas
) || mas_is_ptr(mas
)))
5516 if (unlikely(!mas_wr_walk(&wr_mas
))) {
5517 /* Spanning store, use worst case for now */
5518 request
= 1 + mas_mt_height(mas
) * 3;
5522 /* At this point, we are at the leaf node that needs to be altered. */
5523 /* Exact fit, no nodes needed. */
5524 if (wr_mas
.r_min
== mas
->index
&& wr_mas
.r_max
== mas
->last
)
5527 mas_wr_end_piv(&wr_mas
);
5528 node_size
= mas_wr_new_end(&wr_mas
);
5530 /* Slot store, does not require additional nodes */
5531 if (node_size
== mas
->end
) {
5533 if (!mt_in_rcu(mas
->tree
))
5535 /* shifting boundary */
5536 if (wr_mas
.offset_end
- mas
->offset
== 1)
5540 if (node_size
>= mt_slots
[wr_mas
.type
]) {
5541 /* Split, worst case for now. */
5542 request
= 1 + mas_mt_height(mas
) * 2;
5546 /* New root needs a single node */
5547 if (unlikely(mte_is_root(mas
->node
)))
5550 /* Potential spanning rebalance collapsing a node, use worst-case */
5551 if (node_size
- 1 <= mt_min_slots
[wr_mas
.type
])
5552 request
= mas_mt_height(mas
) * 2 - 1;
5554 /* node store, slot store needs one node */
5556 mas_node_count_gfp(mas
, request
, gfp
);
5557 mas
->mas_flags
|= MA_STATE_PREALLOC
;
5558 if (likely(!mas_is_err(mas
)))
5561 mas_set_alloc_req(mas
, 0);
5562 ret
= xa_err(mas
->node
);
5568 EXPORT_SYMBOL_GPL(mas_preallocate
);
5571 * mas_destroy() - destroy a maple state.
5572 * @mas: The maple state
5574 * Upon completion, check the left-most node and rebalance against the node to
5575 * the right if necessary. Frees any allocated nodes associated with this maple
5578 void mas_destroy(struct ma_state
*mas
)
5580 struct maple_alloc
*node
;
5581 unsigned long total
;
5584 * When using mas_for_each() to insert an expected number of elements,
5585 * it is possible that the number inserted is less than the expected
5586 * number. To fix an invalid final node, a check is performed here to
5587 * rebalance the previous node with the final node.
5589 if (mas
->mas_flags
& MA_STATE_REBALANCE
) {
5593 mtree_range_walk(mas
);
5595 if (end
< mt_min_slot_count(mas
->node
) - 1)
5596 mas_destroy_rebalance(mas
, end
);
5598 mas
->mas_flags
&= ~MA_STATE_REBALANCE
;
5600 mas
->mas_flags
&= ~(MA_STATE_BULK
|MA_STATE_PREALLOC
);
5602 total
= mas_allocated(mas
);
5605 mas
->alloc
= node
->slot
[0];
5606 if (node
->node_count
> 1) {
5607 size_t count
= node
->node_count
- 1;
5609 mt_free_bulk(count
, (void __rcu
**)&node
->slot
[1]);
5612 mt_free_one(ma_mnode_ptr(node
));
5618 EXPORT_SYMBOL_GPL(mas_destroy
);
5621 * mas_expected_entries() - Set the expected number of entries that will be inserted.
5622 * @mas: The maple state
5623 * @nr_entries: The number of expected entries.
5625 * This will attempt to pre-allocate enough nodes to store the expected number
5626 * of entries. The allocations will occur using the bulk allocator interface
5627 * for speed. Please call mas_destroy() on the @mas after inserting the entries
5628 * to ensure any unused nodes are freed.
5630 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5632 int mas_expected_entries(struct ma_state
*mas
, unsigned long nr_entries
)
5634 int nonleaf_cap
= MAPLE_ARANGE64_SLOTS
- 2;
5635 struct maple_enode
*enode
= mas
->node
;
5640 * Sometimes it is necessary to duplicate a tree to a new tree, such as
5641 * forking a process and duplicating the VMAs from one tree to a new
5642 * tree. When such a situation arises, it is known that the new tree is
5643 * not going to be used until the entire tree is populated. For
5644 * performance reasons, it is best to use a bulk load with RCU disabled.
5645 * This allows for optimistic splitting that favours the left and reuse
5646 * of nodes during the operation.
5649 /* Optimize splitting for bulk insert in-order */
5650 mas
->mas_flags
|= MA_STATE_BULK
;
5653 * Avoid overflow, assume a gap between each entry and a trailing null.
5654 * If this is wrong, it just means allocation can happen during
5655 * insertion of entries.
5657 nr_nodes
= max(nr_entries
, nr_entries
* 2 + 1);
5658 if (!mt_is_alloc(mas
->tree
))
5659 nonleaf_cap
= MAPLE_RANGE64_SLOTS
- 2;
5661 /* Leaves; reduce slots to keep space for expansion */
5662 nr_nodes
= DIV_ROUND_UP(nr_nodes
, MAPLE_RANGE64_SLOTS
- 2);
5663 /* Internal nodes */
5664 nr_nodes
+= DIV_ROUND_UP(nr_nodes
, nonleaf_cap
);
5665 /* Add working room for split (2 nodes) + new parents */
5666 mas_node_count_gfp(mas
, nr_nodes
+ 3, GFP_KERNEL
);
5668 /* Detect if allocations run out */
5669 mas
->mas_flags
|= MA_STATE_PREALLOC
;
5671 if (!mas_is_err(mas
))
5674 ret
= xa_err(mas
->node
);
5680 EXPORT_SYMBOL_GPL(mas_expected_entries
);
5682 static bool mas_next_setup(struct ma_state
*mas
, unsigned long max
,
5685 bool was_none
= mas_is_none(mas
);
5687 if (unlikely(mas
->last
>= max
)) {
5688 mas
->status
= ma_overflow
;
5692 switch (mas
->status
) {
5698 mas
->status
= ma_start
;
5701 mas_walk(mas
); /* Retries on dead nodes handled by mas_walk */
5704 /* Overflowed before, but the max changed */
5705 mas
->status
= ma_active
;
5708 /* The user expects the mas to be one before where it is */
5709 mas
->status
= ma_active
;
5710 *entry
= mas_walk(mas
);
5720 if (likely(mas_is_active(mas
))) /* Fast path */
5723 if (mas_is_ptr(mas
)) {
5725 if (was_none
&& mas
->index
== 0) {
5726 mas
->index
= mas
->last
= 0;
5730 mas
->last
= ULONG_MAX
;
5731 mas
->status
= ma_none
;
5735 if (mas_is_none(mas
))
5742 * mas_next() - Get the next entry.
5743 * @mas: The maple state
5744 * @max: The maximum index to check.
5746 * Returns the next entry after @mas->index.
5747 * Must hold rcu_read_lock or the write lock.
5748 * Can return the zero entry.
5750 * Return: The next entry or %NULL
5752 void *mas_next(struct ma_state
*mas
, unsigned long max
)
5756 if (mas_next_setup(mas
, max
, &entry
))
5759 /* Retries on dead nodes handled by mas_next_slot */
5760 return mas_next_slot(mas
, max
, false);
5762 EXPORT_SYMBOL_GPL(mas_next
);
5765 * mas_next_range() - Advance the maple state to the next range
5766 * @mas: The maple state
5767 * @max: The maximum index to check.
5769 * Sets @mas->index and @mas->last to the range.
5770 * Must hold rcu_read_lock or the write lock.
5771 * Can return the zero entry.
5773 * Return: The next entry or %NULL
5775 void *mas_next_range(struct ma_state
*mas
, unsigned long max
)
5779 if (mas_next_setup(mas
, max
, &entry
))
5782 /* Retries on dead nodes handled by mas_next_slot */
5783 return mas_next_slot(mas
, max
, true);
5785 EXPORT_SYMBOL_GPL(mas_next_range
);
5788 * mt_next() - get the next value in the maple tree
5789 * @mt: The maple tree
5790 * @index: The start index
5791 * @max: The maximum index to check
5793 * Takes RCU read lock internally to protect the search, which does not
5794 * protect the returned pointer after dropping RCU read lock.
5795 * See also: Documentation/core-api/maple_tree.rst
5797 * Return: The entry higher than @index or %NULL if nothing is found.
5799 void *mt_next(struct maple_tree
*mt
, unsigned long index
, unsigned long max
)
5802 MA_STATE(mas
, mt
, index
, index
);
5805 entry
= mas_next(&mas
, max
);
5809 EXPORT_SYMBOL_GPL(mt_next
);
5811 static bool mas_prev_setup(struct ma_state
*mas
, unsigned long min
, void **entry
)
5813 if (unlikely(mas
->index
<= min
)) {
5814 mas
->status
= ma_underflow
;
5818 switch (mas
->status
) {
5826 mas
->status
= ma_start
;
5829 /* underflowed before but the min changed */
5830 mas
->status
= ma_active
;
5833 /* User expects mas to be one after where it is */
5834 mas
->status
= ma_active
;
5835 *entry
= mas_walk(mas
);
5845 if (mas_is_start(mas
))
5848 if (unlikely(mas_is_ptr(mas
))) {
5850 mas
->status
= ma_none
;
5853 mas
->index
= mas
->last
= 0;
5854 *entry
= mas_root(mas
);
5858 if (mas_is_none(mas
)) {
5860 /* Walked to out-of-range pointer? */
5861 mas
->index
= mas
->last
= 0;
5862 mas
->status
= ma_root
;
5863 *entry
= mas_root(mas
);
5873 * mas_prev() - Get the previous entry
5874 * @mas: The maple state
5875 * @min: The minimum value to check.
5877 * Must hold rcu_read_lock or the write lock.
5878 * Will reset mas to ma_start if the status is ma_none. Will stop on not
5881 * Return: the previous value or %NULL.
5883 void *mas_prev(struct ma_state
*mas
, unsigned long min
)
5887 if (mas_prev_setup(mas
, min
, &entry
))
5890 return mas_prev_slot(mas
, min
, false);
5892 EXPORT_SYMBOL_GPL(mas_prev
);
5895 * mas_prev_range() - Advance to the previous range
5896 * @mas: The maple state
5897 * @min: The minimum value to check.
5899 * Sets @mas->index and @mas->last to the range.
5900 * Must hold rcu_read_lock or the write lock.
5901 * Will reset mas to ma_start if the node is ma_none. Will stop on not
5904 * Return: the previous value or %NULL.
5906 void *mas_prev_range(struct ma_state
*mas
, unsigned long min
)
5910 if (mas_prev_setup(mas
, min
, &entry
))
5913 return mas_prev_slot(mas
, min
, true);
5915 EXPORT_SYMBOL_GPL(mas_prev_range
);
5918 * mt_prev() - get the previous value in the maple tree
5919 * @mt: The maple tree
5920 * @index: The start index
5921 * @min: The minimum index to check
5923 * Takes RCU read lock internally to protect the search, which does not
5924 * protect the returned pointer after dropping RCU read lock.
5925 * See also: Documentation/core-api/maple_tree.rst
5927 * Return: The entry before @index or %NULL if nothing is found.
5929 void *mt_prev(struct maple_tree
*mt
, unsigned long index
, unsigned long min
)
5932 MA_STATE(mas
, mt
, index
, index
);
5935 entry
= mas_prev(&mas
, min
);
5939 EXPORT_SYMBOL_GPL(mt_prev
);
5942 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
5943 * @mas: The maple state to pause
5945 * Some users need to pause a walk and drop the lock they're holding in
5946 * order to yield to a higher priority thread or carry out an operation
5947 * on an entry. Those users should call this function before they drop
5948 * the lock. It resets the @mas to be suitable for the next iteration
5949 * of the loop after the user has reacquired the lock. If most entries
5950 * found during a walk require you to call mas_pause(), the mt_for_each()
5951 * iterator may be more appropriate.
5954 void mas_pause(struct ma_state
*mas
)
5956 mas
->status
= ma_pause
;
5959 EXPORT_SYMBOL_GPL(mas_pause
);
5962 * mas_find_setup() - Internal function to set up mas_find*().
5963 * @mas: The maple state
5964 * @max: The maximum index
5965 * @entry: Pointer to the entry
5967 * Returns: True if entry is the answer, false otherwise.
5969 static __always_inline
bool mas_find_setup(struct ma_state
*mas
, unsigned long max
, void **entry
)
5971 switch (mas
->status
) {
5973 if (mas
->last
< max
)
5979 if (unlikely(mas
->last
>= max
))
5982 mas
->index
= ++mas
->last
;
5983 mas
->status
= ma_start
;
5986 if (unlikely(mas
->last
>= max
))
5989 mas
->index
= mas
->last
;
5990 mas
->status
= ma_start
;
5993 /* mas is pointing at entry before unable to go lower */
5994 if (unlikely(mas
->index
>= max
)) {
5995 mas
->status
= ma_overflow
;
5999 mas
->status
= ma_active
;
6000 *entry
= mas_walk(mas
);
6005 if (unlikely(mas
->last
>= max
))
6008 mas
->status
= ma_active
;
6009 *entry
= mas_walk(mas
);
6019 if (mas_is_start(mas
)) {
6020 /* First run or continue */
6021 if (mas
->index
> max
)
6024 *entry
= mas_walk(mas
);
6030 if (unlikely(mas_is_ptr(mas
)))
6031 goto ptr_out_of_range
;
6033 if (unlikely(mas_is_none(mas
)))
6036 if (mas
->index
== max
)
6042 mas
->status
= ma_none
;
6044 mas
->last
= ULONG_MAX
;
6049 * mas_find() - On the first call, find the entry at or after mas->index up to
6050 * %max. Otherwise, find the entry after mas->index.
6051 * @mas: The maple state
6052 * @max: The maximum value to check.
6054 * Must hold rcu_read_lock or the write lock.
6055 * If an entry exists, last and index are updated accordingly.
6056 * May set @mas->status to ma_overflow.
6058 * Return: The entry or %NULL.
6060 void *mas_find(struct ma_state
*mas
, unsigned long max
)
6064 if (mas_find_setup(mas
, max
, &entry
))
6067 /* Retries on dead nodes handled by mas_next_slot */
6068 entry
= mas_next_slot(mas
, max
, false);
6069 /* Ignore overflow */
6070 mas
->status
= ma_active
;
6073 EXPORT_SYMBOL_GPL(mas_find
);
6076 * mas_find_range() - On the first call, find the entry at or after
6077 * mas->index up to %max. Otherwise, advance to the next slot mas->index.
6078 * @mas: The maple state
6079 * @max: The maximum value to check.
6081 * Must hold rcu_read_lock or the write lock.
6082 * If an entry exists, last and index are updated accordingly.
6083 * May set @mas->status to ma_overflow.
6085 * Return: The entry or %NULL.
6087 void *mas_find_range(struct ma_state
*mas
, unsigned long max
)
6091 if (mas_find_setup(mas
, max
, &entry
))
6094 /* Retries on dead nodes handled by mas_next_slot */
6095 return mas_next_slot(mas
, max
, true);
6097 EXPORT_SYMBOL_GPL(mas_find_range
);
6100 * mas_find_rev_setup() - Internal function to set up mas_find_*_rev()
6101 * @mas: The maple state
6102 * @min: The minimum index
6103 * @entry: Pointer to the entry
6105 * Returns: True if entry is the answer, false otherwise.
6107 static bool mas_find_rev_setup(struct ma_state
*mas
, unsigned long min
,
6111 switch (mas
->status
) {
6117 if (unlikely(mas
->index
<= min
)) {
6118 mas
->status
= ma_underflow
;
6121 mas
->last
= --mas
->index
;
6122 mas
->status
= ma_start
;
6125 if (mas
->index
<= min
)
6128 mas
->last
= mas
->index
;
6129 mas
->status
= ma_start
;
6131 case ma_overflow
: /* user expects the mas to be one after where it is */
6132 if (unlikely(mas
->index
<= min
)) {
6133 mas
->status
= ma_underflow
;
6137 mas
->status
= ma_active
;
6139 case ma_underflow
: /* user expects the mas to be one before where it is */
6140 if (unlikely(mas
->index
<= min
))
6143 mas
->status
= ma_active
;
6151 if (mas_is_start(mas
)) {
6152 /* First run or continue */
6153 if (mas
->index
< min
)
6156 *entry
= mas_walk(mas
);
6161 if (unlikely(mas_is_ptr(mas
)))
6164 if (unlikely(mas_is_none(mas
))) {
6166 * Walked to the location, and there was nothing so the previous
6169 mas
->last
= mas
->index
= 0;
6170 mas
->status
= ma_root
;
6171 *entry
= mas_root(mas
);
6176 if (mas
->index
< min
)
6182 mas
->status
= ma_none
;
6187 * mas_find_rev: On the first call, find the first non-null entry at or below
6188 * mas->index down to %min. Otherwise find the first non-null entry below
6189 * mas->index down to %min.
6190 * @mas: The maple state
6191 * @min: The minimum value to check.
6193 * Must hold rcu_read_lock or the write lock.
6194 * If an entry exists, last and index are updated accordingly.
6195 * May set @mas->status to ma_underflow.
6197 * Return: The entry or %NULL.
6199 void *mas_find_rev(struct ma_state
*mas
, unsigned long min
)
6203 if (mas_find_rev_setup(mas
, min
, &entry
))
6206 /* Retries on dead nodes handled by mas_prev_slot */
6207 return mas_prev_slot(mas
, min
, false);
6210 EXPORT_SYMBOL_GPL(mas_find_rev
);
6213 * mas_find_range_rev: On the first call, find the first non-null entry at or
6214 * below mas->index down to %min. Otherwise advance to the previous slot after
6215 * mas->index down to %min.
6216 * @mas: The maple state
6217 * @min: The minimum value to check.
6219 * Must hold rcu_read_lock or the write lock.
6220 * If an entry exists, last and index are updated accordingly.
6221 * May set @mas->status to ma_underflow.
6223 * Return: The entry or %NULL.
6225 void *mas_find_range_rev(struct ma_state
*mas
, unsigned long min
)
6229 if (mas_find_rev_setup(mas
, min
, &entry
))
6232 /* Retries on dead nodes handled by mas_prev_slot */
6233 return mas_prev_slot(mas
, min
, true);
6235 EXPORT_SYMBOL_GPL(mas_find_range_rev
);
6238 * mas_erase() - Find the range in which index resides and erase the entire
6240 * @mas: The maple state
6242 * Must hold the write lock.
6243 * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6244 * erases that range.
6246 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6248 void *mas_erase(struct ma_state
*mas
)
6251 MA_WR_STATE(wr_mas
, mas
, NULL
);
6253 if (!mas_is_active(mas
) || !mas_is_start(mas
))
6254 mas
->status
= ma_start
;
6256 /* Retry unnecessary when holding the write lock. */
6257 entry
= mas_state_walk(mas
);
6262 /* Must reset to ensure spanning writes of last slot are detected */
6264 mas_wr_store_setup(&wr_mas
);
6265 mas_wr_store_entry(&wr_mas
);
6266 if (mas_nomem(mas
, GFP_KERNEL
))
6271 EXPORT_SYMBOL_GPL(mas_erase
);
6274 * mas_nomem() - Check if there was an error allocating and do the allocation
6275 * if necessary If there are allocations, then free them.
6276 * @mas: The maple state
6277 * @gfp: The GFP_FLAGS to use for allocations
6278 * Return: true on allocation, false otherwise.
6280 bool mas_nomem(struct ma_state
*mas
, gfp_t gfp
)
6281 __must_hold(mas
->tree
->ma_lock
)
6283 if (likely(mas
->node
!= MA_ERROR(-ENOMEM
))) {
6288 if (gfpflags_allow_blocking(gfp
) && !mt_external_lock(mas
->tree
)) {
6289 mtree_unlock(mas
->tree
);
6290 mas_alloc_nodes(mas
, gfp
);
6291 mtree_lock(mas
->tree
);
6293 mas_alloc_nodes(mas
, gfp
);
6296 if (!mas_allocated(mas
))
6299 mas
->status
= ma_start
;
6303 void __init
maple_tree_init(void)
6305 maple_node_cache
= kmem_cache_create("maple_node",
6306 sizeof(struct maple_node
), sizeof(struct maple_node
),
6311 * mtree_load() - Load a value stored in a maple tree
6312 * @mt: The maple tree
6313 * @index: The index to load
6315 * Return: the entry or %NULL
6317 void *mtree_load(struct maple_tree
*mt
, unsigned long index
)
6319 MA_STATE(mas
, mt
, index
, index
);
6322 trace_ma_read(__func__
, &mas
);
6325 entry
= mas_start(&mas
);
6326 if (unlikely(mas_is_none(&mas
)))
6329 if (unlikely(mas_is_ptr(&mas
))) {
6336 entry
= mtree_lookup_walk(&mas
);
6337 if (!entry
&& unlikely(mas_is_start(&mas
)))
6341 if (xa_is_zero(entry
))
6346 EXPORT_SYMBOL(mtree_load
);
6349 * mtree_store_range() - Store an entry at a given range.
6350 * @mt: The maple tree
6351 * @index: The start of the range
6352 * @last: The end of the range
6353 * @entry: The entry to store
6354 * @gfp: The GFP_FLAGS to use for allocations
6356 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6359 int mtree_store_range(struct maple_tree
*mt
, unsigned long index
,
6360 unsigned long last
, void *entry
, gfp_t gfp
)
6362 MA_STATE(mas
, mt
, index
, last
);
6363 MA_WR_STATE(wr_mas
, &mas
, entry
);
6365 trace_ma_write(__func__
, &mas
, 0, entry
);
6366 if (WARN_ON_ONCE(xa_is_advanced(entry
)))
6374 mas_wr_store_entry(&wr_mas
);
6375 if (mas_nomem(&mas
, gfp
))
6379 if (mas_is_err(&mas
))
6380 return xa_err(mas
.node
);
6384 EXPORT_SYMBOL(mtree_store_range
);
6387 * mtree_store() - Store an entry at a given index.
6388 * @mt: The maple tree
6389 * @index: The index to store the value
6390 * @entry: The entry to store
6391 * @gfp: The GFP_FLAGS to use for allocations
6393 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6396 int mtree_store(struct maple_tree
*mt
, unsigned long index
, void *entry
,
6399 return mtree_store_range(mt
, index
, index
, entry
, gfp
);
6401 EXPORT_SYMBOL(mtree_store
);
6404 * mtree_insert_range() - Insert an entry at a given range if there is no value.
6405 * @mt: The maple tree
6406 * @first: The start of the range
6407 * @last: The end of the range
6408 * @entry: The entry to store
6409 * @gfp: The GFP_FLAGS to use for allocations.
6411 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6412 * request, -ENOMEM if memory could not be allocated.
6414 int mtree_insert_range(struct maple_tree
*mt
, unsigned long first
,
6415 unsigned long last
, void *entry
, gfp_t gfp
)
6417 MA_STATE(ms
, mt
, first
, last
);
6419 if (WARN_ON_ONCE(xa_is_advanced(entry
)))
6427 mas_insert(&ms
, entry
);
6428 if (mas_nomem(&ms
, gfp
))
6432 if (mas_is_err(&ms
))
6433 return xa_err(ms
.node
);
6437 EXPORT_SYMBOL(mtree_insert_range
);
6440 * mtree_insert() - Insert an entry at a given index if there is no value.
6441 * @mt: The maple tree
6442 * @index : The index to store the value
6443 * @entry: The entry to store
6444 * @gfp: The GFP_FLAGS to use for allocations.
6446 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6447 * request, -ENOMEM if memory could not be allocated.
6449 int mtree_insert(struct maple_tree
*mt
, unsigned long index
, void *entry
,
6452 return mtree_insert_range(mt
, index
, index
, entry
, gfp
);
6454 EXPORT_SYMBOL(mtree_insert
);
6456 int mtree_alloc_range(struct maple_tree
*mt
, unsigned long *startp
,
6457 void *entry
, unsigned long size
, unsigned long min
,
6458 unsigned long max
, gfp_t gfp
)
6462 MA_STATE(mas
, mt
, 0, 0);
6463 if (!mt_is_alloc(mt
))
6466 if (WARN_ON_ONCE(mt_is_reserved(entry
)))
6471 ret
= mas_empty_area(&mas
, min
, max
, size
);
6475 mas_insert(&mas
, entry
);
6477 * mas_nomem() may release the lock, causing the allocated area
6478 * to be unavailable, so try to allocate a free area again.
6480 if (mas_nomem(&mas
, gfp
))
6483 if (mas_is_err(&mas
))
6484 ret
= xa_err(mas
.node
);
6486 *startp
= mas
.index
;
6492 EXPORT_SYMBOL(mtree_alloc_range
);
6495 * mtree_alloc_cyclic() - Find somewhere to store this entry in the tree.
6496 * @mt: The maple tree.
6497 * @startp: Pointer to ID.
6498 * @range_lo: Lower bound of range to search.
6499 * @range_hi: Upper bound of range to search.
6500 * @entry: The entry to store.
6501 * @next: Pointer to next ID to allocate.
6502 * @gfp: The GFP_FLAGS to use for allocations.
6504 * Finds an empty entry in @mt after @next, stores the new index into
6505 * the @id pointer, stores the entry at that index, then updates @next.
6507 * @mt must be initialized with the MT_FLAGS_ALLOC_RANGE flag.
6509 * Context: Any context. Takes and releases the mt.lock. May sleep if
6510 * the @gfp flags permit.
6512 * Return: 0 if the allocation succeeded without wrapping, 1 if the
6513 * allocation succeeded after wrapping, -ENOMEM if memory could not be
6514 * allocated, -EINVAL if @mt cannot be used, or -EBUSY if there are no
6517 int mtree_alloc_cyclic(struct maple_tree
*mt
, unsigned long *startp
,
6518 void *entry
, unsigned long range_lo
, unsigned long range_hi
,
6519 unsigned long *next
, gfp_t gfp
)
6523 MA_STATE(mas
, mt
, 0, 0);
6525 if (!mt_is_alloc(mt
))
6527 if (WARN_ON_ONCE(mt_is_reserved(entry
)))
6530 ret
= mas_alloc_cyclic(&mas
, startp
, entry
, range_lo
, range_hi
,
6535 EXPORT_SYMBOL(mtree_alloc_cyclic
);
6537 int mtree_alloc_rrange(struct maple_tree
*mt
, unsigned long *startp
,
6538 void *entry
, unsigned long size
, unsigned long min
,
6539 unsigned long max
, gfp_t gfp
)
6543 MA_STATE(mas
, mt
, 0, 0);
6544 if (!mt_is_alloc(mt
))
6547 if (WARN_ON_ONCE(mt_is_reserved(entry
)))
6552 ret
= mas_empty_area_rev(&mas
, min
, max
, size
);
6556 mas_insert(&mas
, entry
);
6558 * mas_nomem() may release the lock, causing the allocated area
6559 * to be unavailable, so try to allocate a free area again.
6561 if (mas_nomem(&mas
, gfp
))
6564 if (mas_is_err(&mas
))
6565 ret
= xa_err(mas
.node
);
6567 *startp
= mas
.index
;
6573 EXPORT_SYMBOL(mtree_alloc_rrange
);
6576 * mtree_erase() - Find an index and erase the entire range.
6577 * @mt: The maple tree
6578 * @index: The index to erase
6580 * Erasing is the same as a walk to an entry then a store of a NULL to that
6581 * ENTIRE range. In fact, it is implemented as such using the advanced API.
6583 * Return: The entry stored at the @index or %NULL
6585 void *mtree_erase(struct maple_tree
*mt
, unsigned long index
)
6589 MA_STATE(mas
, mt
, index
, index
);
6590 trace_ma_op(__func__
, &mas
);
6593 entry
= mas_erase(&mas
);
6598 EXPORT_SYMBOL(mtree_erase
);
6601 * mas_dup_free() - Free an incomplete duplication of a tree.
6602 * @mas: The maple state of a incomplete tree.
6604 * The parameter @mas->node passed in indicates that the allocation failed on
6605 * this node. This function frees all nodes starting from @mas->node in the
6606 * reverse order of mas_dup_build(). There is no need to hold the source tree
6607 * lock at this time.
6609 static void mas_dup_free(struct ma_state
*mas
)
6611 struct maple_node
*node
;
6612 enum maple_type type
;
6614 unsigned char count
, i
;
6616 /* Maybe the first node allocation failed. */
6617 if (mas_is_none(mas
))
6620 while (!mte_is_root(mas
->node
)) {
6626 mas
->offset
= mas_data_end(mas
);
6627 } while (!mte_is_leaf(mas
->node
));
6632 node
= mte_to_node(mas
->node
);
6633 type
= mte_node_type(mas
->node
);
6634 slots
= ma_slots(node
, type
);
6635 count
= mas_data_end(mas
) + 1;
6636 for (i
= 0; i
< count
; i
++)
6637 ((unsigned long *)slots
)[i
] &= ~MAPLE_NODE_MASK
;
6638 mt_free_bulk(count
, slots
);
6641 node
= mte_to_node(mas
->node
);
6646 * mas_copy_node() - Copy a maple node and replace the parent.
6647 * @mas: The maple state of source tree.
6648 * @new_mas: The maple state of new tree.
6649 * @parent: The parent of the new node.
6651 * Copy @mas->node to @new_mas->node, set @parent to be the parent of
6652 * @new_mas->node. If memory allocation fails, @mas is set to -ENOMEM.
6654 static inline void mas_copy_node(struct ma_state
*mas
, struct ma_state
*new_mas
,
6655 struct maple_pnode
*parent
)
6657 struct maple_node
*node
= mte_to_node(mas
->node
);
6658 struct maple_node
*new_node
= mte_to_node(new_mas
->node
);
6661 /* Copy the node completely. */
6662 memcpy(new_node
, node
, sizeof(struct maple_node
));
6663 /* Update the parent node pointer. */
6664 val
= (unsigned long)node
->parent
& MAPLE_NODE_MASK
;
6665 new_node
->parent
= ma_parent_ptr(val
| (unsigned long)parent
);
6669 * mas_dup_alloc() - Allocate child nodes for a maple node.
6670 * @mas: The maple state of source tree.
6671 * @new_mas: The maple state of new tree.
6672 * @gfp: The GFP_FLAGS to use for allocations.
6674 * This function allocates child nodes for @new_mas->node during the duplication
6675 * process. If memory allocation fails, @mas is set to -ENOMEM.
6677 static inline void mas_dup_alloc(struct ma_state
*mas
, struct ma_state
*new_mas
,
6680 struct maple_node
*node
= mte_to_node(mas
->node
);
6681 struct maple_node
*new_node
= mte_to_node(new_mas
->node
);
6682 enum maple_type type
;
6683 unsigned char request
, count
, i
;
6685 void __rcu
**new_slots
;
6688 /* Allocate memory for child nodes. */
6689 type
= mte_node_type(mas
->node
);
6690 new_slots
= ma_slots(new_node
, type
);
6691 request
= mas_data_end(mas
) + 1;
6692 count
= mt_alloc_bulk(gfp
, request
, (void **)new_slots
);
6693 if (unlikely(count
< request
)) {
6694 memset(new_slots
, 0, request
* sizeof(void *));
6695 mas_set_err(mas
, -ENOMEM
);
6699 /* Restore node type information in slots. */
6700 slots
= ma_slots(node
, type
);
6701 for (i
= 0; i
< count
; i
++) {
6702 val
= (unsigned long)mt_slot_locked(mas
->tree
, slots
, i
);
6703 val
&= MAPLE_NODE_MASK
;
6704 ((unsigned long *)new_slots
)[i
] |= val
;
6709 * mas_dup_build() - Build a new maple tree from a source tree
6710 * @mas: The maple state of source tree, need to be in MAS_START state.
6711 * @new_mas: The maple state of new tree, need to be in MAS_START state.
6712 * @gfp: The GFP_FLAGS to use for allocations.
6714 * This function builds a new tree in DFS preorder. If the memory allocation
6715 * fails, the error code -ENOMEM will be set in @mas, and @new_mas points to the
6716 * last node. mas_dup_free() will free the incomplete duplication of a tree.
6718 * Note that the attributes of the two trees need to be exactly the same, and the
6719 * new tree needs to be empty, otherwise -EINVAL will be set in @mas.
6721 static inline void mas_dup_build(struct ma_state
*mas
, struct ma_state
*new_mas
,
6724 struct maple_node
*node
;
6725 struct maple_pnode
*parent
= NULL
;
6726 struct maple_enode
*root
;
6727 enum maple_type type
;
6729 if (unlikely(mt_attr(mas
->tree
) != mt_attr(new_mas
->tree
)) ||
6730 unlikely(!mtree_empty(new_mas
->tree
))) {
6731 mas_set_err(mas
, -EINVAL
);
6735 root
= mas_start(mas
);
6736 if (mas_is_ptr(mas
) || mas_is_none(mas
))
6739 node
= mt_alloc_one(gfp
);
6741 new_mas
->status
= ma_none
;
6742 mas_set_err(mas
, -ENOMEM
);
6746 type
= mte_node_type(mas
->node
);
6747 root
= mt_mk_node(node
, type
);
6748 new_mas
->node
= root
;
6750 new_mas
->max
= ULONG_MAX
;
6751 root
= mte_mk_root(root
);
6753 mas_copy_node(mas
, new_mas
, parent
);
6754 if (!mte_is_leaf(mas
->node
)) {
6755 /* Only allocate child nodes for non-leaf nodes. */
6756 mas_dup_alloc(mas
, new_mas
, gfp
);
6757 if (unlikely(mas_is_err(mas
)))
6761 * This is the last leaf node and duplication is
6764 if (mas
->max
== ULONG_MAX
)
6767 /* This is not the last leaf node and needs to go up. */
6770 mas_ascend(new_mas
);
6771 } while (mas
->offset
== mas_data_end(mas
));
6773 /* Move to the next subtree. */
6779 parent
= ma_parent_ptr(mte_to_node(new_mas
->node
));
6780 mas_descend(new_mas
);
6782 new_mas
->offset
= 0;
6785 /* Specially handle the parent of the root node. */
6786 mte_to_node(root
)->parent
= ma_parent_ptr(mas_tree_parent(new_mas
));
6788 /* Make them the same height */
6789 new_mas
->tree
->ma_flags
= mas
->tree
->ma_flags
;
6790 rcu_assign_pointer(new_mas
->tree
->ma_root
, root
);
6794 * __mt_dup(): Duplicate an entire maple tree
6795 * @mt: The source maple tree
6796 * @new: The new maple tree
6797 * @gfp: The GFP_FLAGS to use for allocations
6799 * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
6800 * traversal. It uses memcpy() to copy nodes in the source tree and allocate
6801 * new child nodes in non-leaf nodes. The new node is exactly the same as the
6802 * source node except for all the addresses stored in it. It will be faster than
6803 * traversing all elements in the source tree and inserting them one by one into
6805 * The user needs to ensure that the attributes of the source tree and the new
6806 * tree are the same, and the new tree needs to be an empty tree, otherwise
6807 * -EINVAL will be returned.
6808 * Note that the user needs to manually lock the source tree and the new tree.
6810 * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
6811 * the attributes of the two trees are different or the new tree is not an empty
6814 int __mt_dup(struct maple_tree
*mt
, struct maple_tree
*new, gfp_t gfp
)
6817 MA_STATE(mas
, mt
, 0, 0);
6818 MA_STATE(new_mas
, new, 0, 0);
6820 mas_dup_build(&mas
, &new_mas
, gfp
);
6821 if (unlikely(mas_is_err(&mas
))) {
6822 ret
= xa_err(mas
.node
);
6824 mas_dup_free(&new_mas
);
6829 EXPORT_SYMBOL(__mt_dup
);
6832 * mtree_dup(): Duplicate an entire maple tree
6833 * @mt: The source maple tree
6834 * @new: The new maple tree
6835 * @gfp: The GFP_FLAGS to use for allocations
6837 * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
6838 * traversal. It uses memcpy() to copy nodes in the source tree and allocate
6839 * new child nodes in non-leaf nodes. The new node is exactly the same as the
6840 * source node except for all the addresses stored in it. It will be faster than
6841 * traversing all elements in the source tree and inserting them one by one into
6843 * The user needs to ensure that the attributes of the source tree and the new
6844 * tree are the same, and the new tree needs to be an empty tree, otherwise
6845 * -EINVAL will be returned.
6847 * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
6848 * the attributes of the two trees are different or the new tree is not an empty
6851 int mtree_dup(struct maple_tree
*mt
, struct maple_tree
*new, gfp_t gfp
)
6854 MA_STATE(mas
, mt
, 0, 0);
6855 MA_STATE(new_mas
, new, 0, 0);
6858 mas_lock_nested(&mas
, SINGLE_DEPTH_NESTING
);
6859 mas_dup_build(&mas
, &new_mas
, gfp
);
6861 if (unlikely(mas_is_err(&mas
))) {
6862 ret
= xa_err(mas
.node
);
6864 mas_dup_free(&new_mas
);
6867 mas_unlock(&new_mas
);
6870 EXPORT_SYMBOL(mtree_dup
);
6873 * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6874 * @mt: The maple tree
6876 * Note: Does not handle locking.
6878 void __mt_destroy(struct maple_tree
*mt
)
6880 void *root
= mt_root_locked(mt
);
6882 rcu_assign_pointer(mt
->ma_root
, NULL
);
6883 if (xa_is_node(root
))
6884 mte_destroy_walk(root
, mt
);
6886 mt
->ma_flags
= mt_attr(mt
);
6888 EXPORT_SYMBOL_GPL(__mt_destroy
);
6891 * mtree_destroy() - Destroy a maple tree
6892 * @mt: The maple tree
6894 * Frees all resources used by the tree. Handles locking.
6896 void mtree_destroy(struct maple_tree
*mt
)
6902 EXPORT_SYMBOL(mtree_destroy
);
6905 * mt_find() - Search from the start up until an entry is found.
6906 * @mt: The maple tree
6907 * @index: Pointer which contains the start location of the search
6908 * @max: The maximum value of the search range
6910 * Takes RCU read lock internally to protect the search, which does not
6911 * protect the returned pointer after dropping RCU read lock.
6912 * See also: Documentation/core-api/maple_tree.rst
6914 * In case that an entry is found @index is updated to point to the next
6915 * possible entry independent whether the found entry is occupying a
6916 * single index or a range if indices.
6918 * Return: The entry at or after the @index or %NULL
6920 void *mt_find(struct maple_tree
*mt
, unsigned long *index
, unsigned long max
)
6922 MA_STATE(mas
, mt
, *index
, *index
);
6924 #ifdef CONFIG_DEBUG_MAPLE_TREE
6925 unsigned long copy
= *index
;
6928 trace_ma_read(__func__
, &mas
);
6935 entry
= mas_state_walk(&mas
);
6936 if (mas_is_start(&mas
))
6939 if (unlikely(xa_is_zero(entry
)))
6945 while (mas_is_active(&mas
) && (mas
.last
< max
)) {
6946 entry
= mas_next_entry(&mas
, max
);
6947 if (likely(entry
&& !xa_is_zero(entry
)))
6951 if (unlikely(xa_is_zero(entry
)))
6955 if (likely(entry
)) {
6956 *index
= mas
.last
+ 1;
6957 #ifdef CONFIG_DEBUG_MAPLE_TREE
6958 if (MT_WARN_ON(mt
, (*index
) && ((*index
) <= copy
)))
6959 pr_err("index not increased! %lx <= %lx\n",
6966 EXPORT_SYMBOL(mt_find
);
6969 * mt_find_after() - Search from the start up until an entry is found.
6970 * @mt: The maple tree
6971 * @index: Pointer which contains the start location of the search
6972 * @max: The maximum value to check
6974 * Same as mt_find() except that it checks @index for 0 before
6975 * searching. If @index == 0, the search is aborted. This covers a wrap
6976 * around of @index to 0 in an iterator loop.
6978 * Return: The entry at or after the @index or %NULL
6980 void *mt_find_after(struct maple_tree
*mt
, unsigned long *index
,
6986 return mt_find(mt
, index
, max
);
6988 EXPORT_SYMBOL(mt_find_after
);
6990 #ifdef CONFIG_DEBUG_MAPLE_TREE
6991 atomic_t maple_tree_tests_run
;
6992 EXPORT_SYMBOL_GPL(maple_tree_tests_run
);
6993 atomic_t maple_tree_tests_passed
;
6994 EXPORT_SYMBOL_GPL(maple_tree_tests_passed
);
6997 extern void kmem_cache_set_non_kernel(struct kmem_cache
*, unsigned int);
6998 void mt_set_non_kernel(unsigned int val
)
7000 kmem_cache_set_non_kernel(maple_node_cache
, val
);
7003 extern unsigned long kmem_cache_get_alloc(struct kmem_cache
*);
7004 unsigned long mt_get_alloc_size(void)
7006 return kmem_cache_get_alloc(maple_node_cache
);
7009 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache
*);
7010 void mt_zero_nr_tallocated(void)
7012 kmem_cache_zero_nr_tallocated(maple_node_cache
);
7015 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache
*);
7016 unsigned int mt_nr_tallocated(void)
7018 return kmem_cache_nr_tallocated(maple_node_cache
);
7021 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache
*);
7022 unsigned int mt_nr_allocated(void)
7024 return kmem_cache_nr_allocated(maple_node_cache
);
7027 void mt_cache_shrink(void)
7032 * mt_cache_shrink() - For testing, don't use this.
7034 * Certain testcases can trigger an OOM when combined with other memory
7035 * debugging configuration options. This function is used to reduce the
7036 * possibility of an out of memory even due to kmem_cache objects remaining
7037 * around for longer than usual.
7039 void mt_cache_shrink(void)
7041 kmem_cache_shrink(maple_node_cache
);
7044 EXPORT_SYMBOL_GPL(mt_cache_shrink
);
7046 #endif /* not defined __KERNEL__ */
7048 * mas_get_slot() - Get the entry in the maple state node stored at @offset.
7049 * @mas: The maple state
7050 * @offset: The offset into the slot array to fetch.
7052 * Return: The entry stored at @offset.
7054 static inline struct maple_enode
*mas_get_slot(struct ma_state
*mas
,
7055 unsigned char offset
)
7057 return mas_slot(mas
, ma_slots(mas_mn(mas
), mte_node_type(mas
->node
)),
7061 /* Depth first search, post-order */
7062 static void mas_dfs_postorder(struct ma_state
*mas
, unsigned long max
)
7065 struct maple_enode
*p
, *mn
= mas
->node
;
7066 unsigned long p_min
, p_max
;
7068 mas_next_node(mas
, mas_mn(mas
), max
);
7069 if (!mas_is_overflow(mas
))
7072 if (mte_is_root(mn
))
7081 mas_prev_node(mas
, 0);
7082 } while (!mas_is_underflow(mas
));
7089 /* Tree validations */
7090 static void mt_dump_node(const struct maple_tree
*mt
, void *entry
,
7091 unsigned long min
, unsigned long max
, unsigned int depth
,
7092 enum mt_dump_format format
);
7093 static void mt_dump_range(unsigned long min
, unsigned long max
,
7094 unsigned int depth
, enum mt_dump_format format
)
7096 static const char spaces
[] = " ";
7101 pr_info("%.*s%lx: ", depth
* 2, spaces
, min
);
7103 pr_info("%.*s%lx-%lx: ", depth
* 2, spaces
, min
, max
);
7107 pr_info("%.*s%lu: ", depth
* 2, spaces
, min
);
7109 pr_info("%.*s%lu-%lu: ", depth
* 2, spaces
, min
, max
);
7113 static void mt_dump_entry(void *entry
, unsigned long min
, unsigned long max
,
7114 unsigned int depth
, enum mt_dump_format format
)
7116 mt_dump_range(min
, max
, depth
, format
);
7118 if (xa_is_value(entry
))
7119 pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry
),
7120 xa_to_value(entry
), entry
);
7121 else if (xa_is_zero(entry
))
7122 pr_cont("zero (%ld)\n", xa_to_internal(entry
));
7123 else if (mt_is_reserved(entry
))
7124 pr_cont("UNKNOWN ENTRY (%p)\n", entry
);
7126 pr_cont("%p\n", entry
);
7129 static void mt_dump_range64(const struct maple_tree
*mt
, void *entry
,
7130 unsigned long min
, unsigned long max
, unsigned int depth
,
7131 enum mt_dump_format format
)
7133 struct maple_range_64
*node
= &mte_to_node(entry
)->mr64
;
7134 bool leaf
= mte_is_leaf(entry
);
7135 unsigned long first
= min
;
7138 pr_cont(" contents: ");
7139 for (i
= 0; i
< MAPLE_RANGE64_SLOTS
- 1; i
++) {
7142 pr_cont("%p %lX ", node
->slot
[i
], node
->pivot
[i
]);
7145 pr_cont("%p %lu ", node
->slot
[i
], node
->pivot
[i
]);
7148 pr_cont("%p\n", node
->slot
[i
]);
7149 for (i
= 0; i
< MAPLE_RANGE64_SLOTS
; i
++) {
7150 unsigned long last
= max
;
7152 if (i
< (MAPLE_RANGE64_SLOTS
- 1))
7153 last
= node
->pivot
[i
];
7154 else if (!node
->slot
[i
] && max
!= mt_node_max(entry
))
7156 if (last
== 0 && i
> 0)
7159 mt_dump_entry(mt_slot(mt
, node
->slot
, i
),
7160 first
, last
, depth
+ 1, format
);
7161 else if (node
->slot
[i
])
7162 mt_dump_node(mt
, mt_slot(mt
, node
->slot
, i
),
7163 first
, last
, depth
+ 1, format
);
7170 pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
7171 node
, last
, max
, i
);
7174 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
7175 node
, last
, max
, i
);
7182 static void mt_dump_arange64(const struct maple_tree
*mt
, void *entry
,
7183 unsigned long min
, unsigned long max
, unsigned int depth
,
7184 enum mt_dump_format format
)
7186 struct maple_arange_64
*node
= &mte_to_node(entry
)->ma64
;
7187 bool leaf
= mte_is_leaf(entry
);
7188 unsigned long first
= min
;
7191 pr_cont(" contents: ");
7192 for (i
= 0; i
< MAPLE_ARANGE64_SLOTS
; i
++) {
7195 pr_cont("%lx ", node
->gap
[i
]);
7198 pr_cont("%lu ", node
->gap
[i
]);
7201 pr_cont("| %02X %02X| ", node
->meta
.end
, node
->meta
.gap
);
7202 for (i
= 0; i
< MAPLE_ARANGE64_SLOTS
- 1; i
++) {
7205 pr_cont("%p %lX ", node
->slot
[i
], node
->pivot
[i
]);
7208 pr_cont("%p %lu ", node
->slot
[i
], node
->pivot
[i
]);
7211 pr_cont("%p\n", node
->slot
[i
]);
7212 for (i
= 0; i
< MAPLE_ARANGE64_SLOTS
; i
++) {
7213 unsigned long last
= max
;
7215 if (i
< (MAPLE_ARANGE64_SLOTS
- 1))
7216 last
= node
->pivot
[i
];
7217 else if (!node
->slot
[i
])
7219 if (last
== 0 && i
> 0)
7222 mt_dump_entry(mt_slot(mt
, node
->slot
, i
),
7223 first
, last
, depth
+ 1, format
);
7224 else if (node
->slot
[i
])
7225 mt_dump_node(mt
, mt_slot(mt
, node
->slot
, i
),
7226 first
, last
, depth
+ 1, format
);
7231 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
7232 node
, last
, max
, i
);
7239 static void mt_dump_node(const struct maple_tree
*mt
, void *entry
,
7240 unsigned long min
, unsigned long max
, unsigned int depth
,
7241 enum mt_dump_format format
)
7243 struct maple_node
*node
= mte_to_node(entry
);
7244 unsigned int type
= mte_node_type(entry
);
7247 mt_dump_range(min
, max
, depth
, format
);
7249 pr_cont("node %p depth %d type %d parent %p", node
, depth
, type
,
7250 node
? node
->parent
: NULL
);
7254 for (i
= 0; i
< MAPLE_NODE_SLOTS
; i
++) {
7256 pr_cont("OUT OF RANGE: ");
7257 mt_dump_entry(mt_slot(mt
, node
->slot
, i
),
7258 min
+ i
, min
+ i
, depth
, format
);
7262 case maple_range_64
:
7263 mt_dump_range64(mt
, entry
, min
, max
, depth
, format
);
7265 case maple_arange_64
:
7266 mt_dump_arange64(mt
, entry
, min
, max
, depth
, format
);
7270 pr_cont(" UNKNOWN TYPE\n");
7274 void mt_dump(const struct maple_tree
*mt
, enum mt_dump_format format
)
7276 void *entry
= rcu_dereference_check(mt
->ma_root
, mt_locked(mt
));
7278 pr_info("maple_tree(%p) flags %X, height %u root %p\n",
7279 mt
, mt
->ma_flags
, mt_height(mt
), entry
);
7280 if (!xa_is_node(entry
))
7281 mt_dump_entry(entry
, 0, 0, 0, format
);
7283 mt_dump_node(mt
, entry
, 0, mt_node_max(entry
), 0, format
);
7285 EXPORT_SYMBOL_GPL(mt_dump
);
7288 * Calculate the maximum gap in a node and check if that's what is reported in
7289 * the parent (unless root).
7291 static void mas_validate_gaps(struct ma_state
*mas
)
7293 struct maple_enode
*mte
= mas
->node
;
7294 struct maple_node
*p_mn
, *node
= mte_to_node(mte
);
7295 enum maple_type mt
= mte_node_type(mas
->node
);
7296 unsigned long gap
= 0, max_gap
= 0;
7297 unsigned long p_end
, p_start
= mas
->min
;
7298 unsigned char p_slot
, offset
;
7299 unsigned long *gaps
= NULL
;
7300 unsigned long *pivots
= ma_pivots(node
, mt
);
7303 if (ma_is_dense(mt
)) {
7304 for (i
= 0; i
< mt_slot_count(mte
); i
++) {
7305 if (mas_get_slot(mas
, i
)) {
7316 gaps
= ma_gaps(node
, mt
);
7317 for (i
= 0; i
< mt_slot_count(mte
); i
++) {
7318 p_end
= mas_safe_pivot(mas
, pivots
, i
, mt
);
7321 if (!mas_get_slot(mas
, i
))
7322 gap
= p_end
- p_start
+ 1;
7324 void *entry
= mas_get_slot(mas
, i
);
7327 MT_BUG_ON(mas
->tree
, !entry
);
7329 if (gap
> p_end
- p_start
+ 1) {
7330 pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
7331 mas_mn(mas
), i
, gap
, p_end
, p_start
,
7332 p_end
- p_start
+ 1);
7333 MT_BUG_ON(mas
->tree
, gap
> p_end
- p_start
+ 1);
7340 p_start
= p_end
+ 1;
7341 if (p_end
>= mas
->max
)
7346 if (mt
== maple_arange_64
) {
7347 MT_BUG_ON(mas
->tree
, !gaps
);
7348 offset
= ma_meta_gap(node
);
7350 pr_err("gap offset %p[%u] is invalid\n", node
, offset
);
7351 MT_BUG_ON(mas
->tree
, 1);
7354 if (gaps
[offset
] != max_gap
) {
7355 pr_err("gap %p[%u] is not the largest gap %lu\n",
7356 node
, offset
, max_gap
);
7357 MT_BUG_ON(mas
->tree
, 1);
7360 for (i
++ ; i
< mt_slot_count(mte
); i
++) {
7362 pr_err("gap %p[%u] beyond node limit != 0\n",
7364 MT_BUG_ON(mas
->tree
, 1);
7369 if (mte_is_root(mte
))
7372 p_slot
= mte_parent_slot(mas
->node
);
7373 p_mn
= mte_parent(mte
);
7374 MT_BUG_ON(mas
->tree
, max_gap
> mas
->max
);
7375 if (ma_gaps(p_mn
, mas_parent_type(mas
, mte
))[p_slot
] != max_gap
) {
7376 pr_err("gap %p[%u] != %lu\n", p_mn
, p_slot
, max_gap
);
7377 mt_dump(mas
->tree
, mt_dump_hex
);
7378 MT_BUG_ON(mas
->tree
, 1);
7382 static void mas_validate_parent_slot(struct ma_state
*mas
)
7384 struct maple_node
*parent
;
7385 struct maple_enode
*node
;
7386 enum maple_type p_type
;
7387 unsigned char p_slot
;
7391 if (mte_is_root(mas
->node
))
7394 p_slot
= mte_parent_slot(mas
->node
);
7395 p_type
= mas_parent_type(mas
, mas
->node
);
7396 parent
= mte_parent(mas
->node
);
7397 slots
= ma_slots(parent
, p_type
);
7398 MT_BUG_ON(mas
->tree
, mas_mn(mas
) == parent
);
7400 /* Check prev/next parent slot for duplicate node entry */
7402 for (i
= 0; i
< mt_slots
[p_type
]; i
++) {
7403 node
= mas_slot(mas
, slots
, i
);
7405 if (node
!= mas
->node
)
7406 pr_err("parent %p[%u] does not have %p\n",
7407 parent
, i
, mas_mn(mas
));
7408 MT_BUG_ON(mas
->tree
, node
!= mas
->node
);
7409 } else if (node
== mas
->node
) {
7410 pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
7411 mas_mn(mas
), parent
, i
, p_slot
);
7412 MT_BUG_ON(mas
->tree
, node
== mas
->node
);
7417 static void mas_validate_child_slot(struct ma_state
*mas
)
7419 enum maple_type type
= mte_node_type(mas
->node
);
7420 void __rcu
**slots
= ma_slots(mte_to_node(mas
->node
), type
);
7421 unsigned long *pivots
= ma_pivots(mte_to_node(mas
->node
), type
);
7422 struct maple_enode
*child
;
7425 if (mte_is_leaf(mas
->node
))
7428 for (i
= 0; i
< mt_slots
[type
]; i
++) {
7429 child
= mas_slot(mas
, slots
, i
);
7432 pr_err("Non-leaf node lacks child at %p[%u]\n",
7434 MT_BUG_ON(mas
->tree
, 1);
7437 if (mte_parent_slot(child
) != i
) {
7438 pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
7439 mas_mn(mas
), i
, mte_to_node(child
),
7440 mte_parent_slot(child
));
7441 MT_BUG_ON(mas
->tree
, 1);
7444 if (mte_parent(child
) != mte_to_node(mas
->node
)) {
7445 pr_err("child %p has parent %p not %p\n",
7446 mte_to_node(child
), mte_parent(child
),
7447 mte_to_node(mas
->node
));
7448 MT_BUG_ON(mas
->tree
, 1);
7451 if (i
< mt_pivots
[type
] && pivots
[i
] == mas
->max
)
7457 * Validate all pivots are within mas->min and mas->max, check metadata ends
7458 * where the maximum ends and ensure there is no slots or pivots set outside of
7459 * the end of the data.
7461 static void mas_validate_limits(struct ma_state
*mas
)
7464 unsigned long prev_piv
= 0;
7465 enum maple_type type
= mte_node_type(mas
->node
);
7466 void __rcu
**slots
= ma_slots(mte_to_node(mas
->node
), type
);
7467 unsigned long *pivots
= ma_pivots(mas_mn(mas
), type
);
7469 for (i
= 0; i
< mt_slots
[type
]; i
++) {
7472 piv
= mas_safe_pivot(mas
, pivots
, i
, type
);
7474 if (!piv
&& (i
!= 0)) {
7475 pr_err("Missing node limit pivot at %p[%u]",
7477 MAS_WARN_ON(mas
, 1);
7480 if (prev_piv
> piv
) {
7481 pr_err("%p[%u] piv %lu < prev_piv %lu\n",
7482 mas_mn(mas
), i
, piv
, prev_piv
);
7483 MAS_WARN_ON(mas
, piv
< prev_piv
);
7486 if (piv
< mas
->min
) {
7487 pr_err("%p[%u] %lu < %lu\n", mas_mn(mas
), i
,
7489 MAS_WARN_ON(mas
, piv
< mas
->min
);
7491 if (piv
> mas
->max
) {
7492 pr_err("%p[%u] %lu > %lu\n", mas_mn(mas
), i
,
7494 MAS_WARN_ON(mas
, piv
> mas
->max
);
7497 if (piv
== mas
->max
)
7501 if (mas_data_end(mas
) != i
) {
7502 pr_err("node%p: data_end %u != the last slot offset %u\n",
7503 mas_mn(mas
), mas_data_end(mas
), i
);
7504 MT_BUG_ON(mas
->tree
, 1);
7507 for (i
+= 1; i
< mt_slots
[type
]; i
++) {
7508 void *entry
= mas_slot(mas
, slots
, i
);
7510 if (entry
&& (i
!= mt_slots
[type
] - 1)) {
7511 pr_err("%p[%u] should not have entry %p\n", mas_mn(mas
),
7513 MT_BUG_ON(mas
->tree
, entry
!= NULL
);
7516 if (i
< mt_pivots
[type
]) {
7517 unsigned long piv
= pivots
[i
];
7522 pr_err("%p[%u] should not have piv %lu\n",
7523 mas_mn(mas
), i
, piv
);
7524 MAS_WARN_ON(mas
, i
< mt_pivots
[type
] - 1);
7529 static void mt_validate_nulls(struct maple_tree
*mt
)
7531 void *entry
, *last
= (void *)1;
7532 unsigned char offset
= 0;
7534 MA_STATE(mas
, mt
, 0, 0);
7537 if (mas_is_none(&mas
) || (mas_is_ptr(&mas
)))
7540 while (!mte_is_leaf(mas
.node
))
7543 slots
= ma_slots(mte_to_node(mas
.node
), mte_node_type(mas
.node
));
7545 entry
= mas_slot(&mas
, slots
, offset
);
7546 if (!last
&& !entry
) {
7547 pr_err("Sequential nulls end at %p[%u]\n",
7548 mas_mn(&mas
), offset
);
7550 MT_BUG_ON(mt
, !last
&& !entry
);
7552 if (offset
== mas_data_end(&mas
)) {
7553 mas_next_node(&mas
, mas_mn(&mas
), ULONG_MAX
);
7554 if (mas_is_overflow(&mas
))
7557 slots
= ma_slots(mte_to_node(mas
.node
),
7558 mte_node_type(mas
.node
));
7563 } while (!mas_is_overflow(&mas
));
7567 * validate a maple tree by checking:
7568 * 1. The limits (pivots are within mas->min to mas->max)
7569 * 2. The gap is correctly set in the parents
7571 void mt_validate(struct maple_tree
*mt
)
7575 MA_STATE(mas
, mt
, 0, 0);
7578 if (!mas_is_active(&mas
))
7581 while (!mte_is_leaf(mas
.node
))
7584 while (!mas_is_overflow(&mas
)) {
7585 MAS_WARN_ON(&mas
, mte_dead_node(mas
.node
));
7586 end
= mas_data_end(&mas
);
7587 if (MAS_WARN_ON(&mas
, (end
< mt_min_slot_count(mas
.node
)) &&
7588 (mas
.max
!= ULONG_MAX
))) {
7589 pr_err("Invalid size %u of %p\n", end
, mas_mn(&mas
));
7592 mas_validate_parent_slot(&mas
);
7593 mas_validate_limits(&mas
);
7594 mas_validate_child_slot(&mas
);
7595 if (mt_is_alloc(mt
))
7596 mas_validate_gaps(&mas
);
7597 mas_dfs_postorder(&mas
, ULONG_MAX
);
7599 mt_validate_nulls(mt
);
7604 EXPORT_SYMBOL_GPL(mt_validate
);
7606 void mas_dump(const struct ma_state
*mas
)
7608 pr_err("MAS: tree=%p enode=%p ", mas
->tree
, mas
->node
);
7609 switch (mas
->status
) {
7611 pr_err("(ma_active)");
7614 pr_err("(ma_none)");
7617 pr_err("(ma_root)");
7620 pr_err("(ma_start) ");
7623 pr_err("(ma_pause) ");
7626 pr_err("(ma_overflow) ");
7629 pr_err("(ma_underflow) ");
7632 pr_err("(ma_error) ");
7636 pr_err("[%u/%u] index=%lx last=%lx\n", mas
->offset
, mas
->end
,
7637 mas
->index
, mas
->last
);
7638 pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
7639 mas
->min
, mas
->max
, mas
->alloc
, mas
->depth
, mas
->mas_flags
);
7640 if (mas
->index
> mas
->last
)
7641 pr_err("Check index & last\n");
7643 EXPORT_SYMBOL_GPL(mas_dump
);
7645 void mas_wr_dump(const struct ma_wr_state
*wr_mas
)
7647 pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n",
7648 wr_mas
->node
, wr_mas
->r_min
, wr_mas
->r_max
);
7649 pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
7650 wr_mas
->type
, wr_mas
->offset_end
, wr_mas
->mas
->end
,
7653 EXPORT_SYMBOL_GPL(mas_wr_dump
);
7655 #endif /* CONFIG_DEBUG_MAPLE_TREE */