]> git.ipfire.org Git - people/arne_f/kernel.git/blob - lib/radix-tree.c
ACPI / NUMA: Use pr_fmt() instead of printk
[people/arne_f/kernel.git] / lib / radix-tree.c
1 /*
2 * Copyright (C) 2001 Momchil Velikov
3 * Portions Copyright (C) 2001 Christoph Hellwig
4 * Copyright (C) 2005 SGI, Christoph Lameter
5 * Copyright (C) 2006 Nick Piggin
6 * Copyright (C) 2012 Konstantin Khlebnikov
7 * Copyright (C) 2016 Intel, Matthew Wilcox
8 * Copyright (C) 2016 Intel, Ross Zwisler
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2, or (at
13 * your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25 #include <linux/errno.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/export.h>
29 #include <linux/radix-tree.h>
30 #include <linux/percpu.h>
31 #include <linux/slab.h>
32 #include <linux/kmemleak.h>
33 #include <linux/notifier.h>
34 #include <linux/cpu.h>
35 #include <linux/string.h>
36 #include <linux/bitops.h>
37 #include <linux/rcupdate.h>
38 #include <linux/preempt.h> /* in_interrupt() */
39
40
41 /*
42 * Radix tree node cache.
43 */
44 static struct kmem_cache *radix_tree_node_cachep;
45
46 /*
47 * The radix tree is variable-height, so an insert operation not only has
48 * to build the branch to its corresponding item, it also has to build the
49 * branch to existing items if the size has to be increased (by
50 * radix_tree_extend).
51 *
52 * The worst case is a zero height tree with just a single item at index 0,
53 * and then inserting an item at index ULONG_MAX. This requires 2 new branches
54 * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
55 * Hence:
56 */
57 #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
58
59 /*
60 * Per-cpu pool of preloaded nodes
61 */
62 struct radix_tree_preload {
63 unsigned nr;
64 /* nodes->private_data points to next preallocated node */
65 struct radix_tree_node *nodes;
66 };
67 static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
68
69 static inline void *node_to_entry(void *ptr)
70 {
71 return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
72 }
73
74 #define RADIX_TREE_RETRY node_to_entry(NULL)
75
76 #ifdef CONFIG_RADIX_TREE_MULTIORDER
77 /* Sibling slots point directly to another slot in the same node */
78 static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node)
79 {
80 void **ptr = node;
81 return (parent->slots <= ptr) &&
82 (ptr < parent->slots + RADIX_TREE_MAP_SIZE);
83 }
84 #else
85 static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node)
86 {
87 return false;
88 }
89 #endif
90
91 static inline unsigned long get_slot_offset(struct radix_tree_node *parent,
92 void **slot)
93 {
94 return slot - parent->slots;
95 }
96
97 static unsigned int radix_tree_descend(struct radix_tree_node *parent,
98 struct radix_tree_node **nodep, unsigned long index)
99 {
100 unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
101 void **entry = rcu_dereference_raw(parent->slots[offset]);
102
103 #ifdef CONFIG_RADIX_TREE_MULTIORDER
104 if (radix_tree_is_internal_node(entry)) {
105 unsigned long siboff = get_slot_offset(parent, entry);
106 if (siboff < RADIX_TREE_MAP_SIZE) {
107 offset = siboff;
108 entry = rcu_dereference_raw(parent->slots[offset]);
109 }
110 }
111 #endif
112
113 *nodep = (void *)entry;
114 return offset;
115 }
116
117 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
118 {
119 return root->gfp_mask & __GFP_BITS_MASK;
120 }
121
122 static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
123 int offset)
124 {
125 __set_bit(offset, node->tags[tag]);
126 }
127
128 static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
129 int offset)
130 {
131 __clear_bit(offset, node->tags[tag]);
132 }
133
134 static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
135 int offset)
136 {
137 return test_bit(offset, node->tags[tag]);
138 }
139
140 static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
141 {
142 root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
143 }
144
145 static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
146 {
147 root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
148 }
149
150 static inline void root_tag_clear_all(struct radix_tree_root *root)
151 {
152 root->gfp_mask &= __GFP_BITS_MASK;
153 }
154
155 static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
156 {
157 return (__force int)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
158 }
159
160 static inline unsigned root_tags_get(struct radix_tree_root *root)
161 {
162 return (__force unsigned)root->gfp_mask >> __GFP_BITS_SHIFT;
163 }
164
165 /*
166 * Returns 1 if any slot in the node has this tag set.
167 * Otherwise returns 0.
168 */
169 static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
170 {
171 unsigned idx;
172 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
173 if (node->tags[tag][idx])
174 return 1;
175 }
176 return 0;
177 }
178
179 /**
180 * radix_tree_find_next_bit - find the next set bit in a memory region
181 *
182 * @addr: The address to base the search on
183 * @size: The bitmap size in bits
184 * @offset: The bitnumber to start searching at
185 *
186 * Unrollable variant of find_next_bit() for constant size arrays.
187 * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
188 * Returns next bit offset, or size if nothing found.
189 */
190 static __always_inline unsigned long
191 radix_tree_find_next_bit(const unsigned long *addr,
192 unsigned long size, unsigned long offset)
193 {
194 if (!__builtin_constant_p(size))
195 return find_next_bit(addr, size, offset);
196
197 if (offset < size) {
198 unsigned long tmp;
199
200 addr += offset / BITS_PER_LONG;
201 tmp = *addr >> (offset % BITS_PER_LONG);
202 if (tmp)
203 return __ffs(tmp) + offset;
204 offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
205 while (offset < size) {
206 tmp = *++addr;
207 if (tmp)
208 return __ffs(tmp) + offset;
209 offset += BITS_PER_LONG;
210 }
211 }
212 return size;
213 }
214
215 #ifndef __KERNEL__
216 static void dump_node(struct radix_tree_node *node, unsigned long index)
217 {
218 unsigned long i;
219
220 pr_debug("radix node: %p offset %d tags %lx %lx %lx shift %d count %d parent %p\n",
221 node, node->offset,
222 node->tags[0][0], node->tags[1][0], node->tags[2][0],
223 node->shift, node->count, node->parent);
224
225 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
226 unsigned long first = index | (i << node->shift);
227 unsigned long last = first | ((1UL << node->shift) - 1);
228 void *entry = node->slots[i];
229 if (!entry)
230 continue;
231 if (is_sibling_entry(node, entry)) {
232 pr_debug("radix sblng %p offset %ld val %p indices %ld-%ld\n",
233 entry, i,
234 *(void **)entry_to_node(entry),
235 first, last);
236 } else if (!radix_tree_is_internal_node(entry)) {
237 pr_debug("radix entry %p offset %ld indices %ld-%ld\n",
238 entry, i, first, last);
239 } else {
240 dump_node(entry_to_node(entry), first);
241 }
242 }
243 }
244
245 /* For debug */
246 static void radix_tree_dump(struct radix_tree_root *root)
247 {
248 pr_debug("radix root: %p rnode %p tags %x\n",
249 root, root->rnode,
250 root->gfp_mask >> __GFP_BITS_SHIFT);
251 if (!radix_tree_is_internal_node(root->rnode))
252 return;
253 dump_node(entry_to_node(root->rnode), 0);
254 }
255 #endif
256
257 /*
258 * This assumes that the caller has performed appropriate preallocation, and
259 * that the caller has pinned this thread of control to the current CPU.
260 */
261 static struct radix_tree_node *
262 radix_tree_node_alloc(struct radix_tree_root *root)
263 {
264 struct radix_tree_node *ret = NULL;
265 gfp_t gfp_mask = root_gfp_mask(root);
266
267 /*
268 * Preload code isn't irq safe and it doesn't make sense to use
269 * preloading during an interrupt anyway as all the allocations have
270 * to be atomic. So just do normal allocation when in interrupt.
271 */
272 if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
273 struct radix_tree_preload *rtp;
274
275 /*
276 * Even if the caller has preloaded, try to allocate from the
277 * cache first for the new node to get accounted.
278 */
279 ret = kmem_cache_alloc(radix_tree_node_cachep,
280 gfp_mask | __GFP_ACCOUNT | __GFP_NOWARN);
281 if (ret)
282 goto out;
283
284 /*
285 * Provided the caller has preloaded here, we will always
286 * succeed in getting a node here (and never reach
287 * kmem_cache_alloc)
288 */
289 rtp = this_cpu_ptr(&radix_tree_preloads);
290 if (rtp->nr) {
291 ret = rtp->nodes;
292 rtp->nodes = ret->private_data;
293 ret->private_data = NULL;
294 rtp->nr--;
295 }
296 /*
297 * Update the allocation stack trace as this is more useful
298 * for debugging.
299 */
300 kmemleak_update_trace(ret);
301 goto out;
302 }
303 ret = kmem_cache_alloc(radix_tree_node_cachep,
304 gfp_mask | __GFP_ACCOUNT);
305 out:
306 BUG_ON(radix_tree_is_internal_node(ret));
307 return ret;
308 }
309
310 static void radix_tree_node_rcu_free(struct rcu_head *head)
311 {
312 struct radix_tree_node *node =
313 container_of(head, struct radix_tree_node, rcu_head);
314 int i;
315
316 /*
317 * must only free zeroed nodes into the slab. radix_tree_shrink
318 * can leave us with a non-NULL entry in the first slot, so clear
319 * that here to make sure.
320 */
321 for (i = 0; i < RADIX_TREE_MAX_TAGS; i++)
322 tag_clear(node, i, 0);
323
324 node->slots[0] = NULL;
325 node->count = 0;
326
327 kmem_cache_free(radix_tree_node_cachep, node);
328 }
329
330 static inline void
331 radix_tree_node_free(struct radix_tree_node *node)
332 {
333 call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
334 }
335
336 /*
337 * Load up this CPU's radix_tree_node buffer with sufficient objects to
338 * ensure that the addition of a single element in the tree cannot fail. On
339 * success, return zero, with preemption disabled. On error, return -ENOMEM
340 * with preemption not disabled.
341 *
342 * To make use of this facility, the radix tree must be initialised without
343 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
344 */
345 static int __radix_tree_preload(gfp_t gfp_mask)
346 {
347 struct radix_tree_preload *rtp;
348 struct radix_tree_node *node;
349 int ret = -ENOMEM;
350
351 preempt_disable();
352 rtp = this_cpu_ptr(&radix_tree_preloads);
353 while (rtp->nr < RADIX_TREE_PRELOAD_SIZE) {
354 preempt_enable();
355 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
356 if (node == NULL)
357 goto out;
358 preempt_disable();
359 rtp = this_cpu_ptr(&radix_tree_preloads);
360 if (rtp->nr < RADIX_TREE_PRELOAD_SIZE) {
361 node->private_data = rtp->nodes;
362 rtp->nodes = node;
363 rtp->nr++;
364 } else {
365 kmem_cache_free(radix_tree_node_cachep, node);
366 }
367 }
368 ret = 0;
369 out:
370 return ret;
371 }
372
373 /*
374 * Load up this CPU's radix_tree_node buffer with sufficient objects to
375 * ensure that the addition of a single element in the tree cannot fail. On
376 * success, return zero, with preemption disabled. On error, return -ENOMEM
377 * with preemption not disabled.
378 *
379 * To make use of this facility, the radix tree must be initialised without
380 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
381 */
382 int radix_tree_preload(gfp_t gfp_mask)
383 {
384 /* Warn on non-sensical use... */
385 WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
386 return __radix_tree_preload(gfp_mask);
387 }
388 EXPORT_SYMBOL(radix_tree_preload);
389
390 /*
391 * The same as above function, except we don't guarantee preloading happens.
392 * We do it, if we decide it helps. On success, return zero with preemption
393 * disabled. On error, return -ENOMEM with preemption not disabled.
394 */
395 int radix_tree_maybe_preload(gfp_t gfp_mask)
396 {
397 if (gfpflags_allow_blocking(gfp_mask))
398 return __radix_tree_preload(gfp_mask);
399 /* Preloading doesn't help anything with this gfp mask, skip it */
400 preempt_disable();
401 return 0;
402 }
403 EXPORT_SYMBOL(radix_tree_maybe_preload);
404
405 /*
406 * The maximum index which can be stored in a radix tree
407 */
408 static inline unsigned long shift_maxindex(unsigned int shift)
409 {
410 return (RADIX_TREE_MAP_SIZE << shift) - 1;
411 }
412
413 static inline unsigned long node_maxindex(struct radix_tree_node *node)
414 {
415 return shift_maxindex(node->shift);
416 }
417
418 static unsigned radix_tree_load_root(struct radix_tree_root *root,
419 struct radix_tree_node **nodep, unsigned long *maxindex)
420 {
421 struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
422
423 *nodep = node;
424
425 if (likely(radix_tree_is_internal_node(node))) {
426 node = entry_to_node(node);
427 *maxindex = node_maxindex(node);
428 return node->shift + RADIX_TREE_MAP_SHIFT;
429 }
430
431 *maxindex = 0;
432 return 0;
433 }
434
435 /*
436 * Extend a radix tree so it can store key @index.
437 */
438 static int radix_tree_extend(struct radix_tree_root *root,
439 unsigned long index, unsigned int shift)
440 {
441 struct radix_tree_node *slot;
442 unsigned int maxshift;
443 int tag;
444
445 /* Figure out what the shift should be. */
446 maxshift = shift;
447 while (index > shift_maxindex(maxshift))
448 maxshift += RADIX_TREE_MAP_SHIFT;
449
450 slot = root->rnode;
451 if (!slot)
452 goto out;
453
454 do {
455 struct radix_tree_node *node = radix_tree_node_alloc(root);
456
457 if (!node)
458 return -ENOMEM;
459
460 /* Propagate the aggregated tag info into the new root */
461 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
462 if (root_tag_get(root, tag))
463 tag_set(node, tag, 0);
464 }
465
466 BUG_ON(shift > BITS_PER_LONG);
467 node->shift = shift;
468 node->offset = 0;
469 node->count = 1;
470 node->parent = NULL;
471 if (radix_tree_is_internal_node(slot))
472 entry_to_node(slot)->parent = node;
473 node->slots[0] = slot;
474 slot = node_to_entry(node);
475 rcu_assign_pointer(root->rnode, slot);
476 shift += RADIX_TREE_MAP_SHIFT;
477 } while (shift <= maxshift);
478 out:
479 return maxshift + RADIX_TREE_MAP_SHIFT;
480 }
481
482 /**
483 * __radix_tree_create - create a slot in a radix tree
484 * @root: radix tree root
485 * @index: index key
486 * @order: index occupies 2^order aligned slots
487 * @nodep: returns node
488 * @slotp: returns slot
489 *
490 * Create, if necessary, and return the node and slot for an item
491 * at position @index in the radix tree @root.
492 *
493 * Until there is more than one item in the tree, no nodes are
494 * allocated and @root->rnode is used as a direct slot instead of
495 * pointing to a node, in which case *@nodep will be NULL.
496 *
497 * Returns -ENOMEM, or 0 for success.
498 */
499 int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
500 unsigned order, struct radix_tree_node **nodep,
501 void ***slotp)
502 {
503 struct radix_tree_node *node = NULL, *child;
504 void **slot = (void **)&root->rnode;
505 unsigned long maxindex;
506 unsigned int shift, offset = 0;
507 unsigned long max = index | ((1UL << order) - 1);
508
509 shift = radix_tree_load_root(root, &child, &maxindex);
510
511 /* Make sure the tree is high enough. */
512 if (max > maxindex) {
513 int error = radix_tree_extend(root, max, shift);
514 if (error < 0)
515 return error;
516 shift = error;
517 child = root->rnode;
518 if (order == shift)
519 shift += RADIX_TREE_MAP_SHIFT;
520 }
521
522 while (shift > order) {
523 shift -= RADIX_TREE_MAP_SHIFT;
524 if (child == NULL) {
525 /* Have to add a child node. */
526 child = radix_tree_node_alloc(root);
527 if (!child)
528 return -ENOMEM;
529 child->shift = shift;
530 child->offset = offset;
531 child->parent = node;
532 rcu_assign_pointer(*slot, node_to_entry(child));
533 if (node)
534 node->count++;
535 } else if (!radix_tree_is_internal_node(child))
536 break;
537
538 /* Go a level down */
539 node = entry_to_node(child);
540 offset = radix_tree_descend(node, &child, index);
541 slot = &node->slots[offset];
542 }
543
544 #ifdef CONFIG_RADIX_TREE_MULTIORDER
545 /* Insert pointers to the canonical entry */
546 if (order > shift) {
547 unsigned i, n = 1 << (order - shift);
548 offset = offset & ~(n - 1);
549 slot = &node->slots[offset];
550 child = node_to_entry(slot);
551 for (i = 0; i < n; i++) {
552 if (slot[i])
553 return -EEXIST;
554 }
555
556 for (i = 1; i < n; i++) {
557 rcu_assign_pointer(slot[i], child);
558 node->count++;
559 }
560 }
561 #endif
562
563 if (nodep)
564 *nodep = node;
565 if (slotp)
566 *slotp = slot;
567 return 0;
568 }
569
570 /**
571 * __radix_tree_insert - insert into a radix tree
572 * @root: radix tree root
573 * @index: index key
574 * @order: key covers the 2^order indices around index
575 * @item: item to insert
576 *
577 * Insert an item into the radix tree at position @index.
578 */
579 int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
580 unsigned order, void *item)
581 {
582 struct radix_tree_node *node;
583 void **slot;
584 int error;
585
586 BUG_ON(radix_tree_is_internal_node(item));
587
588 error = __radix_tree_create(root, index, order, &node, &slot);
589 if (error)
590 return error;
591 if (*slot != NULL)
592 return -EEXIST;
593 rcu_assign_pointer(*slot, item);
594
595 if (node) {
596 unsigned offset = get_slot_offset(node, slot);
597 node->count++;
598 BUG_ON(tag_get(node, 0, offset));
599 BUG_ON(tag_get(node, 1, offset));
600 BUG_ON(tag_get(node, 2, offset));
601 } else {
602 BUG_ON(root_tags_get(root));
603 }
604
605 return 0;
606 }
607 EXPORT_SYMBOL(__radix_tree_insert);
608
609 /**
610 * __radix_tree_lookup - lookup an item in a radix tree
611 * @root: radix tree root
612 * @index: index key
613 * @nodep: returns node
614 * @slotp: returns slot
615 *
616 * Lookup and return the item at position @index in the radix
617 * tree @root.
618 *
619 * Until there is more than one item in the tree, no nodes are
620 * allocated and @root->rnode is used as a direct slot instead of
621 * pointing to a node, in which case *@nodep will be NULL.
622 */
623 void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
624 struct radix_tree_node **nodep, void ***slotp)
625 {
626 struct radix_tree_node *node, *parent;
627 unsigned long maxindex;
628 void **slot;
629
630 restart:
631 parent = NULL;
632 slot = (void **)&root->rnode;
633 radix_tree_load_root(root, &node, &maxindex);
634 if (index > maxindex)
635 return NULL;
636
637 while (radix_tree_is_internal_node(node)) {
638 unsigned offset;
639
640 if (node == RADIX_TREE_RETRY)
641 goto restart;
642 parent = entry_to_node(node);
643 offset = radix_tree_descend(parent, &node, index);
644 slot = parent->slots + offset;
645 }
646
647 if (nodep)
648 *nodep = parent;
649 if (slotp)
650 *slotp = slot;
651 return node;
652 }
653
654 /**
655 * radix_tree_lookup_slot - lookup a slot in a radix tree
656 * @root: radix tree root
657 * @index: index key
658 *
659 * Returns: the slot corresponding to the position @index in the
660 * radix tree @root. This is useful for update-if-exists operations.
661 *
662 * This function can be called under rcu_read_lock iff the slot is not
663 * modified by radix_tree_replace_slot, otherwise it must be called
664 * exclusive from other writers. Any dereference of the slot must be done
665 * using radix_tree_deref_slot.
666 */
667 void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
668 {
669 void **slot;
670
671 if (!__radix_tree_lookup(root, index, NULL, &slot))
672 return NULL;
673 return slot;
674 }
675 EXPORT_SYMBOL(radix_tree_lookup_slot);
676
677 /**
678 * radix_tree_lookup - perform lookup operation on a radix tree
679 * @root: radix tree root
680 * @index: index key
681 *
682 * Lookup the item at the position @index in the radix tree @root.
683 *
684 * This function can be called under rcu_read_lock, however the caller
685 * must manage lifetimes of leaf nodes (eg. RCU may also be used to free
686 * them safely). No RCU barriers are required to access or modify the
687 * returned item, however.
688 */
689 void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
690 {
691 return __radix_tree_lookup(root, index, NULL, NULL);
692 }
693 EXPORT_SYMBOL(radix_tree_lookup);
694
695 /**
696 * radix_tree_tag_set - set a tag on a radix tree node
697 * @root: radix tree root
698 * @index: index key
699 * @tag: tag index
700 *
701 * Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
702 * corresponding to @index in the radix tree. From
703 * the root all the way down to the leaf node.
704 *
705 * Returns the address of the tagged item. Setting a tag on a not-present
706 * item is a bug.
707 */
708 void *radix_tree_tag_set(struct radix_tree_root *root,
709 unsigned long index, unsigned int tag)
710 {
711 struct radix_tree_node *node, *parent;
712 unsigned long maxindex;
713
714 radix_tree_load_root(root, &node, &maxindex);
715 BUG_ON(index > maxindex);
716
717 while (radix_tree_is_internal_node(node)) {
718 unsigned offset;
719
720 parent = entry_to_node(node);
721 offset = radix_tree_descend(parent, &node, index);
722 BUG_ON(!node);
723
724 if (!tag_get(parent, tag, offset))
725 tag_set(parent, tag, offset);
726 }
727
728 /* set the root's tag bit */
729 if (!root_tag_get(root, tag))
730 root_tag_set(root, tag);
731
732 return node;
733 }
734 EXPORT_SYMBOL(radix_tree_tag_set);
735
736 static void node_tag_clear(struct radix_tree_root *root,
737 struct radix_tree_node *node,
738 unsigned int tag, unsigned int offset)
739 {
740 while (node) {
741 if (!tag_get(node, tag, offset))
742 return;
743 tag_clear(node, tag, offset);
744 if (any_tag_set(node, tag))
745 return;
746
747 offset = node->offset;
748 node = node->parent;
749 }
750
751 /* clear the root's tag bit */
752 if (root_tag_get(root, tag))
753 root_tag_clear(root, tag);
754 }
755
756 /**
757 * radix_tree_tag_clear - clear a tag on a radix tree node
758 * @root: radix tree root
759 * @index: index key
760 * @tag: tag index
761 *
762 * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
763 * corresponding to @index in the radix tree. If this causes
764 * the leaf node to have no tags set then clear the tag in the
765 * next-to-leaf node, etc.
766 *
767 * Returns the address of the tagged item on success, else NULL. ie:
768 * has the same return value and semantics as radix_tree_lookup().
769 */
770 void *radix_tree_tag_clear(struct radix_tree_root *root,
771 unsigned long index, unsigned int tag)
772 {
773 struct radix_tree_node *node, *parent;
774 unsigned long maxindex;
775 int uninitialized_var(offset);
776
777 radix_tree_load_root(root, &node, &maxindex);
778 if (index > maxindex)
779 return NULL;
780
781 parent = NULL;
782
783 while (radix_tree_is_internal_node(node)) {
784 parent = entry_to_node(node);
785 offset = radix_tree_descend(parent, &node, index);
786 }
787
788 if (node)
789 node_tag_clear(root, parent, tag, offset);
790
791 return node;
792 }
793 EXPORT_SYMBOL(radix_tree_tag_clear);
794
795 /**
796 * radix_tree_tag_get - get a tag on a radix tree node
797 * @root: radix tree root
798 * @index: index key
799 * @tag: tag index (< RADIX_TREE_MAX_TAGS)
800 *
801 * Return values:
802 *
803 * 0: tag not present or not set
804 * 1: tag set
805 *
806 * Note that the return value of this function may not be relied on, even if
807 * the RCU lock is held, unless tag modification and node deletion are excluded
808 * from concurrency.
809 */
810 int radix_tree_tag_get(struct radix_tree_root *root,
811 unsigned long index, unsigned int tag)
812 {
813 struct radix_tree_node *node, *parent;
814 unsigned long maxindex;
815
816 if (!root_tag_get(root, tag))
817 return 0;
818
819 radix_tree_load_root(root, &node, &maxindex);
820 if (index > maxindex)
821 return 0;
822 if (node == NULL)
823 return 0;
824
825 while (radix_tree_is_internal_node(node)) {
826 unsigned offset;
827
828 parent = entry_to_node(node);
829 offset = radix_tree_descend(parent, &node, index);
830
831 if (!node)
832 return 0;
833 if (!tag_get(parent, tag, offset))
834 return 0;
835 if (node == RADIX_TREE_RETRY)
836 break;
837 }
838
839 return 1;
840 }
841 EXPORT_SYMBOL(radix_tree_tag_get);
842
843 static inline void __set_iter_shift(struct radix_tree_iter *iter,
844 unsigned int shift)
845 {
846 #ifdef CONFIG_RADIX_TREE_MULTIORDER
847 iter->shift = shift;
848 #endif
849 }
850
851 /**
852 * radix_tree_next_chunk - find next chunk of slots for iteration
853 *
854 * @root: radix tree root
855 * @iter: iterator state
856 * @flags: RADIX_TREE_ITER_* flags and tag index
857 * Returns: pointer to chunk first slot, or NULL if iteration is over
858 */
859 void **radix_tree_next_chunk(struct radix_tree_root *root,
860 struct radix_tree_iter *iter, unsigned flags)
861 {
862 unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
863 struct radix_tree_node *node, *child;
864 unsigned long index, offset, maxindex;
865
866 if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
867 return NULL;
868
869 /*
870 * Catch next_index overflow after ~0UL. iter->index never overflows
871 * during iterating; it can be zero only at the beginning.
872 * And we cannot overflow iter->next_index in a single step,
873 * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
874 *
875 * This condition also used by radix_tree_next_slot() to stop
876 * contiguous iterating, and forbid swithing to the next chunk.
877 */
878 index = iter->next_index;
879 if (!index && iter->index)
880 return NULL;
881
882 restart:
883 radix_tree_load_root(root, &child, &maxindex);
884 if (index > maxindex)
885 return NULL;
886 if (!child)
887 return NULL;
888
889 if (!radix_tree_is_internal_node(child)) {
890 /* Single-slot tree */
891 iter->index = index;
892 iter->next_index = maxindex + 1;
893 iter->tags = 1;
894 __set_iter_shift(iter, 0);
895 return (void **)&root->rnode;
896 }
897
898 do {
899 node = entry_to_node(child);
900 offset = radix_tree_descend(node, &child, index);
901
902 if ((flags & RADIX_TREE_ITER_TAGGED) ?
903 !tag_get(node, tag, offset) : !child) {
904 /* Hole detected */
905 if (flags & RADIX_TREE_ITER_CONTIG)
906 return NULL;
907
908 if (flags & RADIX_TREE_ITER_TAGGED)
909 offset = radix_tree_find_next_bit(
910 node->tags[tag],
911 RADIX_TREE_MAP_SIZE,
912 offset + 1);
913 else
914 while (++offset < RADIX_TREE_MAP_SIZE) {
915 void *slot = node->slots[offset];
916 if (is_sibling_entry(node, slot))
917 continue;
918 if (slot)
919 break;
920 }
921 index &= ~node_maxindex(node);
922 index += offset << node->shift;
923 /* Overflow after ~0UL */
924 if (!index)
925 return NULL;
926 if (offset == RADIX_TREE_MAP_SIZE)
927 goto restart;
928 child = rcu_dereference_raw(node->slots[offset]);
929 }
930
931 if ((child == NULL) || (child == RADIX_TREE_RETRY))
932 goto restart;
933 } while (radix_tree_is_internal_node(child));
934
935 /* Update the iterator state */
936 iter->index = (index &~ node_maxindex(node)) | (offset << node->shift);
937 iter->next_index = (index | node_maxindex(node)) + 1;
938 __set_iter_shift(iter, node->shift);
939
940 /* Construct iter->tags bit-mask from node->tags[tag] array */
941 if (flags & RADIX_TREE_ITER_TAGGED) {
942 unsigned tag_long, tag_bit;
943
944 tag_long = offset / BITS_PER_LONG;
945 tag_bit = offset % BITS_PER_LONG;
946 iter->tags = node->tags[tag][tag_long] >> tag_bit;
947 /* This never happens if RADIX_TREE_TAG_LONGS == 1 */
948 if (tag_long < RADIX_TREE_TAG_LONGS - 1) {
949 /* Pick tags from next element */
950 if (tag_bit)
951 iter->tags |= node->tags[tag][tag_long + 1] <<
952 (BITS_PER_LONG - tag_bit);
953 /* Clip chunk size, here only BITS_PER_LONG tags */
954 iter->next_index = index + BITS_PER_LONG;
955 }
956 }
957
958 return node->slots + offset;
959 }
960 EXPORT_SYMBOL(radix_tree_next_chunk);
961
962 /**
963 * radix_tree_range_tag_if_tagged - for each item in given range set given
964 * tag if item has another tag set
965 * @root: radix tree root
966 * @first_indexp: pointer to a starting index of a range to scan
967 * @last_index: last index of a range to scan
968 * @nr_to_tag: maximum number items to tag
969 * @iftag: tag index to test
970 * @settag: tag index to set if tested tag is set
971 *
972 * This function scans range of radix tree from first_index to last_index
973 * (inclusive). For each item in the range if iftag is set, the function sets
974 * also settag. The function stops either after tagging nr_to_tag items or
975 * after reaching last_index.
976 *
977 * The tags must be set from the leaf level only and propagated back up the
978 * path to the root. We must do this so that we resolve the full path before
979 * setting any tags on intermediate nodes. If we set tags as we descend, then
980 * we can get to the leaf node and find that the index that has the iftag
981 * set is outside the range we are scanning. This reults in dangling tags and
982 * can lead to problems with later tag operations (e.g. livelocks on lookups).
983 *
984 * The function returns the number of leaves where the tag was set and sets
985 * *first_indexp to the first unscanned index.
986 * WARNING! *first_indexp can wrap if last_index is ULONG_MAX. Caller must
987 * be prepared to handle that.
988 */
989 unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root,
990 unsigned long *first_indexp, unsigned long last_index,
991 unsigned long nr_to_tag,
992 unsigned int iftag, unsigned int settag)
993 {
994 struct radix_tree_node *parent, *node, *child;
995 unsigned long maxindex;
996 unsigned long tagged = 0;
997 unsigned long index = *first_indexp;
998
999 radix_tree_load_root(root, &child, &maxindex);
1000 last_index = min(last_index, maxindex);
1001 if (index > last_index)
1002 return 0;
1003 if (!nr_to_tag)
1004 return 0;
1005 if (!root_tag_get(root, iftag)) {
1006 *first_indexp = last_index + 1;
1007 return 0;
1008 }
1009 if (!radix_tree_is_internal_node(child)) {
1010 *first_indexp = last_index + 1;
1011 root_tag_set(root, settag);
1012 return 1;
1013 }
1014
1015 node = entry_to_node(child);
1016
1017 for (;;) {
1018 unsigned offset = radix_tree_descend(node, &child, index);
1019 if (!child)
1020 goto next;
1021 if (!tag_get(node, iftag, offset))
1022 goto next;
1023 /* Sibling slots never have tags set on them */
1024 if (radix_tree_is_internal_node(child)) {
1025 node = entry_to_node(child);
1026 continue;
1027 }
1028
1029 /* tag the leaf */
1030 tagged++;
1031 tag_set(node, settag, offset);
1032
1033 /* walk back up the path tagging interior nodes */
1034 parent = node;
1035 for (;;) {
1036 offset = parent->offset;
1037 parent = parent->parent;
1038 if (!parent)
1039 break;
1040 /* stop if we find a node with the tag already set */
1041 if (tag_get(parent, settag, offset))
1042 break;
1043 tag_set(parent, settag, offset);
1044 }
1045 next:
1046 /* Go to next entry in node */
1047 index = ((index >> node->shift) + 1) << node->shift;
1048 /* Overflow can happen when last_index is ~0UL... */
1049 if (index > last_index || !index)
1050 break;
1051 offset = (index >> node->shift) & RADIX_TREE_MAP_MASK;
1052 while (offset == 0) {
1053 /*
1054 * We've fully scanned this node. Go up. Because
1055 * last_index is guaranteed to be in the tree, what
1056 * we do below cannot wander astray.
1057 */
1058 node = node->parent;
1059 offset = (index >> node->shift) & RADIX_TREE_MAP_MASK;
1060 }
1061 if (is_sibling_entry(node, node->slots[offset]))
1062 goto next;
1063 if (tagged >= nr_to_tag)
1064 break;
1065 }
1066 /*
1067 * We need not to tag the root tag if there is no tag which is set with
1068 * settag within the range from *first_indexp to last_index.
1069 */
1070 if (tagged > 0)
1071 root_tag_set(root, settag);
1072 *first_indexp = index;
1073
1074 return tagged;
1075 }
1076 EXPORT_SYMBOL(radix_tree_range_tag_if_tagged);
1077
1078 /**
1079 * radix_tree_gang_lookup - perform multiple lookup on a radix tree
1080 * @root: radix tree root
1081 * @results: where the results of the lookup are placed
1082 * @first_index: start the lookup from this key
1083 * @max_items: place up to this many items at *results
1084 *
1085 * Performs an index-ascending scan of the tree for present items. Places
1086 * them at *@results and returns the number of items which were placed at
1087 * *@results.
1088 *
1089 * The implementation is naive.
1090 *
1091 * Like radix_tree_lookup, radix_tree_gang_lookup may be called under
1092 * rcu_read_lock. In this case, rather than the returned results being
1093 * an atomic snapshot of the tree at a single point in time, the
1094 * semantics of an RCU protected gang lookup are as though multiple
1095 * radix_tree_lookups have been issued in individual locks, and results
1096 * stored in 'results'.
1097 */
1098 unsigned int
1099 radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
1100 unsigned long first_index, unsigned int max_items)
1101 {
1102 struct radix_tree_iter iter;
1103 void **slot;
1104 unsigned int ret = 0;
1105
1106 if (unlikely(!max_items))
1107 return 0;
1108
1109 radix_tree_for_each_slot(slot, root, &iter, first_index) {
1110 results[ret] = rcu_dereference_raw(*slot);
1111 if (!results[ret])
1112 continue;
1113 if (radix_tree_is_internal_node(results[ret])) {
1114 slot = radix_tree_iter_retry(&iter);
1115 continue;
1116 }
1117 if (++ret == max_items)
1118 break;
1119 }
1120
1121 return ret;
1122 }
1123 EXPORT_SYMBOL(radix_tree_gang_lookup);
1124
1125 /**
1126 * radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
1127 * @root: radix tree root
1128 * @results: where the results of the lookup are placed
1129 * @indices: where their indices should be placed (but usually NULL)
1130 * @first_index: start the lookup from this key
1131 * @max_items: place up to this many items at *results
1132 *
1133 * Performs an index-ascending scan of the tree for present items. Places
1134 * their slots at *@results and returns the number of items which were
1135 * placed at *@results.
1136 *
1137 * The implementation is naive.
1138 *
1139 * Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must
1140 * be dereferenced with radix_tree_deref_slot, and if using only RCU
1141 * protection, radix_tree_deref_slot may fail requiring a retry.
1142 */
1143 unsigned int
1144 radix_tree_gang_lookup_slot(struct radix_tree_root *root,
1145 void ***results, unsigned long *indices,
1146 unsigned long first_index, unsigned int max_items)
1147 {
1148 struct radix_tree_iter iter;
1149 void **slot;
1150 unsigned int ret = 0;
1151
1152 if (unlikely(!max_items))
1153 return 0;
1154
1155 radix_tree_for_each_slot(slot, root, &iter, first_index) {
1156 results[ret] = slot;
1157 if (indices)
1158 indices[ret] = iter.index;
1159 if (++ret == max_items)
1160 break;
1161 }
1162
1163 return ret;
1164 }
1165 EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
1166
1167 /**
1168 * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
1169 * based on a tag
1170 * @root: radix tree root
1171 * @results: where the results of the lookup are placed
1172 * @first_index: start the lookup from this key
1173 * @max_items: place up to this many items at *results
1174 * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
1175 *
1176 * Performs an index-ascending scan of the tree for present items which
1177 * have the tag indexed by @tag set. Places the items at *@results and
1178 * returns the number of items which were placed at *@results.
1179 */
1180 unsigned int
1181 radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
1182 unsigned long first_index, unsigned int max_items,
1183 unsigned int tag)
1184 {
1185 struct radix_tree_iter iter;
1186 void **slot;
1187 unsigned int ret = 0;
1188
1189 if (unlikely(!max_items))
1190 return 0;
1191
1192 radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
1193 results[ret] = rcu_dereference_raw(*slot);
1194 if (!results[ret])
1195 continue;
1196 if (radix_tree_is_internal_node(results[ret])) {
1197 slot = radix_tree_iter_retry(&iter);
1198 continue;
1199 }
1200 if (++ret == max_items)
1201 break;
1202 }
1203
1204 return ret;
1205 }
1206 EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
1207
1208 /**
1209 * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
1210 * radix tree based on a tag
1211 * @root: radix tree root
1212 * @results: where the results of the lookup are placed
1213 * @first_index: start the lookup from this key
1214 * @max_items: place up to this many items at *results
1215 * @tag: the tag index (< RADIX_TREE_MAX_TAGS)
1216 *
1217 * Performs an index-ascending scan of the tree for present items which
1218 * have the tag indexed by @tag set. Places the slots at *@results and
1219 * returns the number of slots which were placed at *@results.
1220 */
1221 unsigned int
1222 radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
1223 unsigned long first_index, unsigned int max_items,
1224 unsigned int tag)
1225 {
1226 struct radix_tree_iter iter;
1227 void **slot;
1228 unsigned int ret = 0;
1229
1230 if (unlikely(!max_items))
1231 return 0;
1232
1233 radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
1234 results[ret] = slot;
1235 if (++ret == max_items)
1236 break;
1237 }
1238
1239 return ret;
1240 }
1241 EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
1242
1243 #if defined(CONFIG_SHMEM) && defined(CONFIG_SWAP)
1244 #include <linux/sched.h> /* for cond_resched() */
1245
1246 struct locate_info {
1247 unsigned long found_index;
1248 bool stop;
1249 };
1250
1251 /*
1252 * This linear search is at present only useful to shmem_unuse_inode().
1253 */
1254 static unsigned long __locate(struct radix_tree_node *slot, void *item,
1255 unsigned long index, struct locate_info *info)
1256 {
1257 unsigned long i;
1258
1259 do {
1260 unsigned int shift = slot->shift;
1261
1262 for (i = (index >> shift) & RADIX_TREE_MAP_MASK;
1263 i < RADIX_TREE_MAP_SIZE;
1264 i++, index += (1UL << shift)) {
1265 struct radix_tree_node *node =
1266 rcu_dereference_raw(slot->slots[i]);
1267 if (node == RADIX_TREE_RETRY)
1268 goto out;
1269 if (!radix_tree_is_internal_node(node)) {
1270 if (node == item) {
1271 info->found_index = index;
1272 info->stop = true;
1273 goto out;
1274 }
1275 continue;
1276 }
1277 node = entry_to_node(node);
1278 if (is_sibling_entry(slot, node))
1279 continue;
1280 slot = node;
1281 break;
1282 }
1283 } while (i < RADIX_TREE_MAP_SIZE);
1284
1285 out:
1286 if ((index == 0) && (i == RADIX_TREE_MAP_SIZE))
1287 info->stop = true;
1288 return index;
1289 }
1290
1291 /**
1292 * radix_tree_locate_item - search through radix tree for item
1293 * @root: radix tree root
1294 * @item: item to be found
1295 *
1296 * Returns index where item was found, or -1 if not found.
1297 * Caller must hold no lock (since this time-consuming function needs
1298 * to be preemptible), and must check afterwards if item is still there.
1299 */
1300 unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
1301 {
1302 struct radix_tree_node *node;
1303 unsigned long max_index;
1304 unsigned long cur_index = 0;
1305 struct locate_info info = {
1306 .found_index = -1,
1307 .stop = false,
1308 };
1309
1310 do {
1311 rcu_read_lock();
1312 node = rcu_dereference_raw(root->rnode);
1313 if (!radix_tree_is_internal_node(node)) {
1314 rcu_read_unlock();
1315 if (node == item)
1316 info.found_index = 0;
1317 break;
1318 }
1319
1320 node = entry_to_node(node);
1321
1322 max_index = node_maxindex(node);
1323 if (cur_index > max_index) {
1324 rcu_read_unlock();
1325 break;
1326 }
1327
1328 cur_index = __locate(node, item, cur_index, &info);
1329 rcu_read_unlock();
1330 cond_resched();
1331 } while (!info.stop && cur_index <= max_index);
1332
1333 return info.found_index;
1334 }
1335 #else
1336 unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
1337 {
1338 return -1;
1339 }
1340 #endif /* CONFIG_SHMEM && CONFIG_SWAP */
1341
1342 /**
1343 * radix_tree_shrink - shrink radix tree to minimum height
1344 * @root radix tree root
1345 */
1346 static inline bool radix_tree_shrink(struct radix_tree_root *root)
1347 {
1348 bool shrunk = false;
1349
1350 for (;;) {
1351 struct radix_tree_node *node = root->rnode;
1352 struct radix_tree_node *child;
1353
1354 if (!radix_tree_is_internal_node(node))
1355 break;
1356 node = entry_to_node(node);
1357
1358 /*
1359 * The candidate node has more than one child, or its child
1360 * is not at the leftmost slot, or the child is a multiorder
1361 * entry, we cannot shrink.
1362 */
1363 if (node->count != 1)
1364 break;
1365 child = node->slots[0];
1366 if (!child)
1367 break;
1368 if (!radix_tree_is_internal_node(child) && node->shift)
1369 break;
1370
1371 if (radix_tree_is_internal_node(child))
1372 entry_to_node(child)->parent = NULL;
1373
1374 /*
1375 * We don't need rcu_assign_pointer(), since we are simply
1376 * moving the node from one part of the tree to another: if it
1377 * was safe to dereference the old pointer to it
1378 * (node->slots[0]), it will be safe to dereference the new
1379 * one (root->rnode) as far as dependent read barriers go.
1380 */
1381 root->rnode = child;
1382
1383 /*
1384 * We have a dilemma here. The node's slot[0] must not be
1385 * NULLed in case there are concurrent lookups expecting to
1386 * find the item. However if this was a bottom-level node,
1387 * then it may be subject to the slot pointer being visible
1388 * to callers dereferencing it. If item corresponding to
1389 * slot[0] is subsequently deleted, these callers would expect
1390 * their slot to become empty sooner or later.
1391 *
1392 * For example, lockless pagecache will look up a slot, deref
1393 * the page pointer, and if the page has 0 refcount it means it
1394 * was concurrently deleted from pagecache so try the deref
1395 * again. Fortunately there is already a requirement for logic
1396 * to retry the entire slot lookup -- the indirect pointer
1397 * problem (replacing direct root node with an indirect pointer
1398 * also results in a stale slot). So tag the slot as indirect
1399 * to force callers to retry.
1400 */
1401 if (!radix_tree_is_internal_node(child))
1402 node->slots[0] = RADIX_TREE_RETRY;
1403
1404 radix_tree_node_free(node);
1405 shrunk = true;
1406 }
1407
1408 return shrunk;
1409 }
1410
1411 /**
1412 * __radix_tree_delete_node - try to free node after clearing a slot
1413 * @root: radix tree root
1414 * @node: node containing @index
1415 *
1416 * After clearing the slot at @index in @node from radix tree
1417 * rooted at @root, call this function to attempt freeing the
1418 * node and shrinking the tree.
1419 *
1420 * Returns %true if @node was freed, %false otherwise.
1421 */
1422 bool __radix_tree_delete_node(struct radix_tree_root *root,
1423 struct radix_tree_node *node)
1424 {
1425 bool deleted = false;
1426
1427 do {
1428 struct radix_tree_node *parent;
1429
1430 if (node->count) {
1431 if (node == entry_to_node(root->rnode))
1432 deleted |= radix_tree_shrink(root);
1433 return deleted;
1434 }
1435
1436 parent = node->parent;
1437 if (parent) {
1438 parent->slots[node->offset] = NULL;
1439 parent->count--;
1440 } else {
1441 root_tag_clear_all(root);
1442 root->rnode = NULL;
1443 }
1444
1445 radix_tree_node_free(node);
1446 deleted = true;
1447
1448 node = parent;
1449 } while (node);
1450
1451 return deleted;
1452 }
1453
1454 static inline void delete_sibling_entries(struct radix_tree_node *node,
1455 void *ptr, unsigned offset)
1456 {
1457 #ifdef CONFIG_RADIX_TREE_MULTIORDER
1458 int i;
1459 for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) {
1460 if (node->slots[offset + i] != ptr)
1461 break;
1462 node->slots[offset + i] = NULL;
1463 node->count--;
1464 }
1465 #endif
1466 }
1467
1468 /**
1469 * radix_tree_delete_item - delete an item from a radix tree
1470 * @root: radix tree root
1471 * @index: index key
1472 * @item: expected item
1473 *
1474 * Remove @item at @index from the radix tree rooted at @root.
1475 *
1476 * Returns the address of the deleted item, or NULL if it was not present
1477 * or the entry at the given @index was not @item.
1478 */
1479 void *radix_tree_delete_item(struct radix_tree_root *root,
1480 unsigned long index, void *item)
1481 {
1482 struct radix_tree_node *node;
1483 unsigned int offset;
1484 void **slot;
1485 void *entry;
1486 int tag;
1487
1488 entry = __radix_tree_lookup(root, index, &node, &slot);
1489 if (!entry)
1490 return NULL;
1491
1492 if (item && entry != item)
1493 return NULL;
1494
1495 if (!node) {
1496 root_tag_clear_all(root);
1497 root->rnode = NULL;
1498 return entry;
1499 }
1500
1501 offset = get_slot_offset(node, slot);
1502
1503 /* Clear all tags associated with the item to be deleted. */
1504 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1505 node_tag_clear(root, node, tag, offset);
1506
1507 delete_sibling_entries(node, node_to_entry(slot), offset);
1508 node->slots[offset] = NULL;
1509 node->count--;
1510
1511 __radix_tree_delete_node(root, node);
1512
1513 return entry;
1514 }
1515 EXPORT_SYMBOL(radix_tree_delete_item);
1516
1517 /**
1518 * radix_tree_delete - delete an item from a radix tree
1519 * @root: radix tree root
1520 * @index: index key
1521 *
1522 * Remove the item at @index from the radix tree rooted at @root.
1523 *
1524 * Returns the address of the deleted item, or NULL if it was not present.
1525 */
1526 void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1527 {
1528 return radix_tree_delete_item(root, index, NULL);
1529 }
1530 EXPORT_SYMBOL(radix_tree_delete);
1531
1532 struct radix_tree_node *radix_tree_replace_clear_tags(
1533 struct radix_tree_root *root,
1534 unsigned long index, void *entry)
1535 {
1536 struct radix_tree_node *node;
1537 void **slot;
1538
1539 __radix_tree_lookup(root, index, &node, &slot);
1540
1541 if (node) {
1542 unsigned int tag, offset = get_slot_offset(node, slot);
1543 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1544 node_tag_clear(root, node, tag, offset);
1545 } else {
1546 /* Clear root node tags */
1547 root->gfp_mask &= __GFP_BITS_MASK;
1548 }
1549
1550 radix_tree_replace_slot(slot, entry);
1551 return node;
1552 }
1553
1554 /**
1555 * radix_tree_tagged - test whether any items in the tree are tagged
1556 * @root: radix tree root
1557 * @tag: tag to test
1558 */
1559 int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
1560 {
1561 return root_tag_get(root, tag);
1562 }
1563 EXPORT_SYMBOL(radix_tree_tagged);
1564
1565 static void
1566 radix_tree_node_ctor(void *arg)
1567 {
1568 struct radix_tree_node *node = arg;
1569
1570 memset(node, 0, sizeof(*node));
1571 INIT_LIST_HEAD(&node->private_list);
1572 }
1573
1574 static int radix_tree_callback(struct notifier_block *nfb,
1575 unsigned long action, void *hcpu)
1576 {
1577 int cpu = (long)hcpu;
1578 struct radix_tree_preload *rtp;
1579 struct radix_tree_node *node;
1580
1581 /* Free per-cpu pool of preloaded nodes */
1582 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
1583 rtp = &per_cpu(radix_tree_preloads, cpu);
1584 while (rtp->nr) {
1585 node = rtp->nodes;
1586 rtp->nodes = node->private_data;
1587 kmem_cache_free(radix_tree_node_cachep, node);
1588 rtp->nr--;
1589 }
1590 }
1591 return NOTIFY_OK;
1592 }
1593
1594 void __init radix_tree_init(void)
1595 {
1596 radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
1597 sizeof(struct radix_tree_node), 0,
1598 SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
1599 radix_tree_node_ctor);
1600 hotcpu_notifier(radix_tree_callback, 0);
1601 }