1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016 Facebook
4 * Copyright (C) 2013-2014 Jens Axboe
7 #include <linux/sched.h>
8 #include <linux/random.h>
9 #include <linux/sbitmap.h>
10 #include <linux/seq_file.h>
12 static int init_alloc_hint(struct sbitmap
*sb
, gfp_t flags
)
14 unsigned depth
= sb
->depth
;
16 sb
->alloc_hint
= alloc_percpu_gfp(unsigned int, flags
);
20 if (depth
&& !sb
->round_robin
) {
23 for_each_possible_cpu(i
)
24 *per_cpu_ptr(sb
->alloc_hint
, i
) = get_random_u32_below(depth
);
29 static inline unsigned update_alloc_hint_before_get(struct sbitmap
*sb
,
34 hint
= this_cpu_read(*sb
->alloc_hint
);
35 if (unlikely(hint
>= depth
)) {
36 hint
= depth
? get_random_u32_below(depth
) : 0;
37 this_cpu_write(*sb
->alloc_hint
, hint
);
43 static inline void update_alloc_hint_after_get(struct sbitmap
*sb
,
49 /* If the map is full, a hint won't do us much good. */
50 this_cpu_write(*sb
->alloc_hint
, 0);
51 } else if (nr
== hint
|| unlikely(sb
->round_robin
)) {
52 /* Only update the hint if we used it. */
54 if (hint
>= depth
- 1)
56 this_cpu_write(*sb
->alloc_hint
, hint
);
61 * See if we have deferred clears that we can batch move
63 static inline bool sbitmap_deferred_clear(struct sbitmap_word
*map
)
67 if (!READ_ONCE(map
->cleared
))
71 * First get a stable cleared mask, setting the old mask to 0.
73 mask
= xchg(&map
->cleared
, 0);
76 * Now clear the masked bits in our free word
78 atomic_long_andnot(mask
, (atomic_long_t
*)&map
->word
);
79 BUILD_BUG_ON(sizeof(atomic_long_t
) != sizeof(map
->word
));
83 int sbitmap_init_node(struct sbitmap
*sb
, unsigned int depth
, int shift
,
84 gfp_t flags
, int node
, bool round_robin
,
87 unsigned int bits_per_word
;
90 shift
= sbitmap_calculate_shift(depth
);
92 bits_per_word
= 1U << shift
;
93 if (bits_per_word
> BITS_PER_LONG
)
98 sb
->map_nr
= DIV_ROUND_UP(sb
->depth
, bits_per_word
);
99 sb
->round_robin
= round_robin
;
107 if (init_alloc_hint(sb
, flags
))
110 sb
->alloc_hint
= NULL
;
113 sb
->map
= kvzalloc_node(sb
->map_nr
* sizeof(*sb
->map
), flags
, node
);
115 free_percpu(sb
->alloc_hint
);
121 EXPORT_SYMBOL_GPL(sbitmap_init_node
);
123 void sbitmap_resize(struct sbitmap
*sb
, unsigned int depth
)
125 unsigned int bits_per_word
= 1U << sb
->shift
;
128 for (i
= 0; i
< sb
->map_nr
; i
++)
129 sbitmap_deferred_clear(&sb
->map
[i
]);
132 sb
->map_nr
= DIV_ROUND_UP(sb
->depth
, bits_per_word
);
134 EXPORT_SYMBOL_GPL(sbitmap_resize
);
136 static int __sbitmap_get_word(unsigned long *word
, unsigned long depth
,
137 unsigned int hint
, bool wrap
)
141 /* don't wrap if starting from 0 */
145 nr
= find_next_zero_bit(word
, depth
, hint
);
146 if (unlikely(nr
>= depth
)) {
148 * We started with an offset, and we didn't reset the
149 * offset to 0 in a failure case, so start from 0 to
159 if (!test_and_set_bit_lock(nr
, word
))
163 if (hint
>= depth
- 1)
170 static int sbitmap_find_bit_in_word(struct sbitmap_word
*map
,
172 unsigned int alloc_hint
,
178 nr
= __sbitmap_get_word(&map
->word
, depth
,
182 if (!sbitmap_deferred_clear(map
))
189 static int sbitmap_find_bit(struct sbitmap
*sb
,
192 unsigned int alloc_hint
,
198 for (i
= 0; i
< sb
->map_nr
; i
++) {
199 nr
= sbitmap_find_bit_in_word(&sb
->map
[index
],
201 __map_depth(sb
, index
),
206 nr
+= index
<< sb
->shift
;
210 /* Jump to next index. */
212 if (++index
>= sb
->map_nr
)
219 static int __sbitmap_get(struct sbitmap
*sb
, unsigned int alloc_hint
)
223 index
= SB_NR_TO_INDEX(sb
, alloc_hint
);
226 * Unless we're doing round robin tag allocation, just use the
227 * alloc_hint to find the right word index. No point in looping
228 * twice in find_next_zero_bit() for that case.
231 alloc_hint
= SB_NR_TO_BIT(sb
, alloc_hint
);
235 return sbitmap_find_bit(sb
, UINT_MAX
, index
, alloc_hint
,
239 int sbitmap_get(struct sbitmap
*sb
)
242 unsigned int hint
, depth
;
244 if (WARN_ON_ONCE(unlikely(!sb
->alloc_hint
)))
247 depth
= READ_ONCE(sb
->depth
);
248 hint
= update_alloc_hint_before_get(sb
, depth
);
249 nr
= __sbitmap_get(sb
, hint
);
250 update_alloc_hint_after_get(sb
, depth
, hint
, nr
);
254 EXPORT_SYMBOL_GPL(sbitmap_get
);
256 static int __sbitmap_get_shallow(struct sbitmap
*sb
,
257 unsigned int alloc_hint
,
258 unsigned long shallow_depth
)
262 index
= SB_NR_TO_INDEX(sb
, alloc_hint
);
263 alloc_hint
= SB_NR_TO_BIT(sb
, alloc_hint
);
265 return sbitmap_find_bit(sb
, shallow_depth
, index
, alloc_hint
, true);
268 int sbitmap_get_shallow(struct sbitmap
*sb
, unsigned long shallow_depth
)
271 unsigned int hint
, depth
;
273 if (WARN_ON_ONCE(unlikely(!sb
->alloc_hint
)))
276 depth
= READ_ONCE(sb
->depth
);
277 hint
= update_alloc_hint_before_get(sb
, depth
);
278 nr
= __sbitmap_get_shallow(sb
, hint
, shallow_depth
);
279 update_alloc_hint_after_get(sb
, depth
, hint
, nr
);
283 EXPORT_SYMBOL_GPL(sbitmap_get_shallow
);
285 bool sbitmap_any_bit_set(const struct sbitmap
*sb
)
289 for (i
= 0; i
< sb
->map_nr
; i
++) {
290 if (sb
->map
[i
].word
& ~sb
->map
[i
].cleared
)
295 EXPORT_SYMBOL_GPL(sbitmap_any_bit_set
);
297 static unsigned int __sbitmap_weight(const struct sbitmap
*sb
, bool set
)
299 unsigned int i
, weight
= 0;
301 for (i
= 0; i
< sb
->map_nr
; i
++) {
302 const struct sbitmap_word
*word
= &sb
->map
[i
];
303 unsigned int word_depth
= __map_depth(sb
, i
);
306 weight
+= bitmap_weight(&word
->word
, word_depth
);
308 weight
+= bitmap_weight(&word
->cleared
, word_depth
);
313 static unsigned int sbitmap_cleared(const struct sbitmap
*sb
)
315 return __sbitmap_weight(sb
, false);
318 unsigned int sbitmap_weight(const struct sbitmap
*sb
)
320 return __sbitmap_weight(sb
, true) - sbitmap_cleared(sb
);
322 EXPORT_SYMBOL_GPL(sbitmap_weight
);
324 void sbitmap_show(struct sbitmap
*sb
, struct seq_file
*m
)
326 seq_printf(m
, "depth=%u\n", sb
->depth
);
327 seq_printf(m
, "busy=%u\n", sbitmap_weight(sb
));
328 seq_printf(m
, "cleared=%u\n", sbitmap_cleared(sb
));
329 seq_printf(m
, "bits_per_word=%u\n", 1U << sb
->shift
);
330 seq_printf(m
, "map_nr=%u\n", sb
->map_nr
);
332 EXPORT_SYMBOL_GPL(sbitmap_show
);
334 static inline void emit_byte(struct seq_file
*m
, unsigned int offset
, u8 byte
)
336 if ((offset
& 0xf) == 0) {
339 seq_printf(m
, "%08x:", offset
);
341 if ((offset
& 0x1) == 0)
343 seq_printf(m
, "%02x", byte
);
346 void sbitmap_bitmap_show(struct sbitmap
*sb
, struct seq_file
*m
)
349 unsigned int byte_bits
= 0;
350 unsigned int offset
= 0;
353 for (i
= 0; i
< sb
->map_nr
; i
++) {
354 unsigned long word
= READ_ONCE(sb
->map
[i
].word
);
355 unsigned long cleared
= READ_ONCE(sb
->map
[i
].cleared
);
356 unsigned int word_bits
= __map_depth(sb
, i
);
360 while (word_bits
> 0) {
361 unsigned int bits
= min(8 - byte_bits
, word_bits
);
363 byte
|= (word
& (BIT(bits
) - 1)) << byte_bits
;
365 if (byte_bits
== 8) {
366 emit_byte(m
, offset
, byte
);
376 emit_byte(m
, offset
, byte
);
382 EXPORT_SYMBOL_GPL(sbitmap_bitmap_show
);
384 static unsigned int sbq_calc_wake_batch(struct sbitmap_queue
*sbq
,
387 unsigned int wake_batch
;
388 unsigned int shallow_depth
;
391 * For each batch, we wake up one queue. We need to make sure that our
392 * batch size is small enough that the full depth of the bitmap,
393 * potentially limited by a shallow depth, is enough to wake up all of
396 * Each full word of the bitmap has bits_per_word bits, and there might
397 * be a partial word. There are depth / bits_per_word full words and
398 * depth % bits_per_word bits left over. In bitwise arithmetic:
400 * bits_per_word = 1 << shift
401 * depth / bits_per_word = depth >> shift
402 * depth % bits_per_word = depth & ((1 << shift) - 1)
404 * Each word can be limited to sbq->min_shallow_depth bits.
406 shallow_depth
= min(1U << sbq
->sb
.shift
, sbq
->min_shallow_depth
);
407 depth
= ((depth
>> sbq
->sb
.shift
) * shallow_depth
+
408 min(depth
& ((1U << sbq
->sb
.shift
) - 1), shallow_depth
));
409 wake_batch
= clamp_t(unsigned int, depth
/ SBQ_WAIT_QUEUES
, 1,
415 int sbitmap_queue_init_node(struct sbitmap_queue
*sbq
, unsigned int depth
,
416 int shift
, bool round_robin
, gfp_t flags
, int node
)
421 ret
= sbitmap_init_node(&sbq
->sb
, depth
, shift
, flags
, node
,
426 sbq
->min_shallow_depth
= UINT_MAX
;
427 sbq
->wake_batch
= sbq_calc_wake_batch(sbq
, depth
);
428 atomic_set(&sbq
->wake_index
, 0);
429 atomic_set(&sbq
->ws_active
, 0);
430 atomic_set(&sbq
->completion_cnt
, 0);
431 atomic_set(&sbq
->wakeup_cnt
, 0);
433 sbq
->ws
= kzalloc_node(SBQ_WAIT_QUEUES
* sizeof(*sbq
->ws
), flags
, node
);
435 sbitmap_free(&sbq
->sb
);
439 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++)
440 init_waitqueue_head(&sbq
->ws
[i
].wait
);
444 EXPORT_SYMBOL_GPL(sbitmap_queue_init_node
);
446 static void sbitmap_queue_update_wake_batch(struct sbitmap_queue
*sbq
,
449 unsigned int wake_batch
;
451 wake_batch
= sbq_calc_wake_batch(sbq
, depth
);
452 if (sbq
->wake_batch
!= wake_batch
)
453 WRITE_ONCE(sbq
->wake_batch
, wake_batch
);
456 void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue
*sbq
,
459 unsigned int wake_batch
;
460 unsigned int depth
= (sbq
->sb
.depth
+ users
- 1) / users
;
462 wake_batch
= clamp_val(depth
/ SBQ_WAIT_QUEUES
,
465 WRITE_ONCE(sbq
->wake_batch
, wake_batch
);
467 EXPORT_SYMBOL_GPL(sbitmap_queue_recalculate_wake_batch
);
469 void sbitmap_queue_resize(struct sbitmap_queue
*sbq
, unsigned int depth
)
471 sbitmap_queue_update_wake_batch(sbq
, depth
);
472 sbitmap_resize(&sbq
->sb
, depth
);
474 EXPORT_SYMBOL_GPL(sbitmap_queue_resize
);
476 int __sbitmap_queue_get(struct sbitmap_queue
*sbq
)
478 return sbitmap_get(&sbq
->sb
);
480 EXPORT_SYMBOL_GPL(__sbitmap_queue_get
);
482 unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue
*sbq
, int nr_tags
,
483 unsigned int *offset
)
485 struct sbitmap
*sb
= &sbq
->sb
;
486 unsigned int hint
, depth
;
487 unsigned long index
, nr
;
490 if (unlikely(sb
->round_robin
))
493 depth
= READ_ONCE(sb
->depth
);
494 hint
= update_alloc_hint_before_get(sb
, depth
);
496 index
= SB_NR_TO_INDEX(sb
, hint
);
498 for (i
= 0; i
< sb
->map_nr
; i
++) {
499 struct sbitmap_word
*map
= &sb
->map
[index
];
500 unsigned long get_mask
;
501 unsigned int map_depth
= __map_depth(sb
, index
);
503 sbitmap_deferred_clear(map
);
504 if (map
->word
== (1UL << (map_depth
- 1)) - 1)
507 nr
= find_first_zero_bit(&map
->word
, map_depth
);
508 if (nr
+ nr_tags
<= map_depth
) {
509 atomic_long_t
*ptr
= (atomic_long_t
*) &map
->word
;
512 get_mask
= ((1UL << nr_tags
) - 1) << nr
;
513 val
= READ_ONCE(map
->word
);
514 while (!atomic_long_try_cmpxchg(ptr
, &val
,
517 get_mask
= (get_mask
& ~val
) >> nr
;
519 *offset
= nr
+ (index
<< sb
->shift
);
520 update_alloc_hint_after_get(sb
, depth
, hint
,
521 *offset
+ nr_tags
- 1);
526 /* Jump to next index. */
527 if (++index
>= sb
->map_nr
)
534 int sbitmap_queue_get_shallow(struct sbitmap_queue
*sbq
,
535 unsigned int shallow_depth
)
537 WARN_ON_ONCE(shallow_depth
< sbq
->min_shallow_depth
);
539 return sbitmap_get_shallow(&sbq
->sb
, shallow_depth
);
541 EXPORT_SYMBOL_GPL(sbitmap_queue_get_shallow
);
543 void sbitmap_queue_min_shallow_depth(struct sbitmap_queue
*sbq
,
544 unsigned int min_shallow_depth
)
546 sbq
->min_shallow_depth
= min_shallow_depth
;
547 sbitmap_queue_update_wake_batch(sbq
, sbq
->sb
.depth
);
549 EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth
);
551 static void __sbitmap_queue_wake_up(struct sbitmap_queue
*sbq
, int nr
)
555 if (!atomic_read(&sbq
->ws_active
))
558 wake_index
= atomic_read(&sbq
->wake_index
);
559 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++) {
560 struct sbq_wait_state
*ws
= &sbq
->ws
[wake_index
];
563 * Advance the index before checking the current queue.
564 * It improves fairness, by ensuring the queue doesn't
565 * need to be fully emptied before trying to wake up
568 wake_index
= sbq_index_inc(wake_index
);
571 * It is sufficient to wake up at least one waiter to
572 * guarantee forward progress.
574 if (waitqueue_active(&ws
->wait
) &&
575 wake_up_nr(&ws
->wait
, nr
))
579 if (wake_index
!= atomic_read(&sbq
->wake_index
))
580 atomic_set(&sbq
->wake_index
, wake_index
);
583 void sbitmap_queue_wake_up(struct sbitmap_queue
*sbq
, int nr
)
585 unsigned int wake_batch
= READ_ONCE(sbq
->wake_batch
);
586 unsigned int wakeups
;
588 if (!atomic_read(&sbq
->ws_active
))
591 atomic_add(nr
, &sbq
->completion_cnt
);
592 wakeups
= atomic_read(&sbq
->wakeup_cnt
);
595 if (atomic_read(&sbq
->completion_cnt
) - wakeups
< wake_batch
)
597 } while (!atomic_try_cmpxchg(&sbq
->wakeup_cnt
,
598 &wakeups
, wakeups
+ wake_batch
));
600 __sbitmap_queue_wake_up(sbq
, wake_batch
);
602 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up
);
604 static inline void sbitmap_update_cpu_hint(struct sbitmap
*sb
, int cpu
, int tag
)
606 if (likely(!sb
->round_robin
&& tag
< sb
->depth
))
607 data_race(*per_cpu_ptr(sb
->alloc_hint
, cpu
) = tag
);
610 void sbitmap_queue_clear_batch(struct sbitmap_queue
*sbq
, int offset
,
611 int *tags
, int nr_tags
)
613 struct sbitmap
*sb
= &sbq
->sb
;
614 unsigned long *addr
= NULL
;
615 unsigned long mask
= 0;
618 smp_mb__before_atomic();
619 for (i
= 0; i
< nr_tags
; i
++) {
620 const int tag
= tags
[i
] - offset
;
621 unsigned long *this_addr
;
623 /* since we're clearing a batch, skip the deferred map */
624 this_addr
= &sb
->map
[SB_NR_TO_INDEX(sb
, tag
)].word
;
627 } else if (addr
!= this_addr
) {
628 atomic_long_andnot(mask
, (atomic_long_t
*) addr
);
632 mask
|= (1UL << SB_NR_TO_BIT(sb
, tag
));
636 atomic_long_andnot(mask
, (atomic_long_t
*) addr
);
638 smp_mb__after_atomic();
639 sbitmap_queue_wake_up(sbq
, nr_tags
);
640 sbitmap_update_cpu_hint(&sbq
->sb
, raw_smp_processor_id(),
641 tags
[nr_tags
- 1] - offset
);
644 void sbitmap_queue_clear(struct sbitmap_queue
*sbq
, unsigned int nr
,
648 * Once the clear bit is set, the bit may be allocated out.
650 * Orders READ/WRITE on the associated instance(such as request
651 * of blk_mq) by this bit for avoiding race with re-allocation,
652 * and its pair is the memory barrier implied in __sbitmap_get_word.
654 * One invariant is that the clear bit has to be zero when the bit
657 smp_mb__before_atomic();
658 sbitmap_deferred_clear_bit(&sbq
->sb
, nr
);
661 * Pairs with the memory barrier in set_current_state() to ensure the
662 * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
663 * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
664 * waiter. See the comment on waitqueue_active().
666 smp_mb__after_atomic();
667 sbitmap_queue_wake_up(sbq
, 1);
668 sbitmap_update_cpu_hint(&sbq
->sb
, cpu
, nr
);
670 EXPORT_SYMBOL_GPL(sbitmap_queue_clear
);
672 void sbitmap_queue_wake_all(struct sbitmap_queue
*sbq
)
677 * Pairs with the memory barrier in set_current_state() like in
678 * sbitmap_queue_wake_up().
681 wake_index
= atomic_read(&sbq
->wake_index
);
682 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++) {
683 struct sbq_wait_state
*ws
= &sbq
->ws
[wake_index
];
685 if (waitqueue_active(&ws
->wait
))
688 wake_index
= sbq_index_inc(wake_index
);
691 EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all
);
693 void sbitmap_queue_show(struct sbitmap_queue
*sbq
, struct seq_file
*m
)
698 sbitmap_show(&sbq
->sb
, m
);
700 seq_puts(m
, "alloc_hint={");
702 for_each_possible_cpu(i
) {
706 seq_printf(m
, "%u", *per_cpu_ptr(sbq
->sb
.alloc_hint
, i
));
710 seq_printf(m
, "wake_batch=%u\n", sbq
->wake_batch
);
711 seq_printf(m
, "wake_index=%d\n", atomic_read(&sbq
->wake_index
));
712 seq_printf(m
, "ws_active=%d\n", atomic_read(&sbq
->ws_active
));
714 seq_puts(m
, "ws={\n");
715 for (i
= 0; i
< SBQ_WAIT_QUEUES
; i
++) {
716 struct sbq_wait_state
*ws
= &sbq
->ws
[i
];
717 seq_printf(m
, "\t{.wait=%s},\n",
718 waitqueue_active(&ws
->wait
) ? "active" : "inactive");
722 seq_printf(m
, "round_robin=%d\n", sbq
->sb
.round_robin
);
723 seq_printf(m
, "min_shallow_depth=%u\n", sbq
->min_shallow_depth
);
725 EXPORT_SYMBOL_GPL(sbitmap_queue_show
);
727 void sbitmap_add_wait_queue(struct sbitmap_queue
*sbq
,
728 struct sbq_wait_state
*ws
,
729 struct sbq_wait
*sbq_wait
)
731 if (!sbq_wait
->sbq
) {
733 atomic_inc(&sbq
->ws_active
);
734 add_wait_queue(&ws
->wait
, &sbq_wait
->wait
);
737 EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue
);
739 void sbitmap_del_wait_queue(struct sbq_wait
*sbq_wait
)
741 list_del_init(&sbq_wait
->wait
.entry
);
743 atomic_dec(&sbq_wait
->sbq
->ws_active
);
744 sbq_wait
->sbq
= NULL
;
747 EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue
);
749 void sbitmap_prepare_to_wait(struct sbitmap_queue
*sbq
,
750 struct sbq_wait_state
*ws
,
751 struct sbq_wait
*sbq_wait
, int state
)
753 if (!sbq_wait
->sbq
) {
754 atomic_inc(&sbq
->ws_active
);
757 prepare_to_wait_exclusive(&ws
->wait
, &sbq_wait
->wait
, state
);
759 EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait
);
761 void sbitmap_finish_wait(struct sbitmap_queue
*sbq
, struct sbq_wait_state
*ws
,
762 struct sbq_wait
*sbq_wait
)
764 finish_wait(&ws
->wait
, &sbq_wait
->wait
);
766 atomic_dec(&sbq
->ws_active
);
767 sbq_wait
->sbq
= NULL
;
770 EXPORT_SYMBOL_GPL(sbitmap_finish_wait
);