1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
5 * Uses a block device as cache for other block devices; optimized for SSDs.
6 * All allocation is done in buckets, which should match the erase block size
9 * Buckets containing cached data are kept on a heap sorted by priority;
10 * bucket priority is increased on cache hit, and periodically all the buckets
11 * on the heap have their priority scaled down. This currently is just used as
12 * an LRU but in the future should allow for more intelligent heuristics.
14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15 * counter. Garbage collection is used to remove stale pointers.
17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18 * as keys are inserted we only sort the pages that have not yet been written.
19 * When garbage collection is run, we resort the entire node.
21 * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
29 #include <linux/slab.h>
30 #include <linux/bitops.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched/clock.h>
37 #include <linux/rculist.h>
38 #include <linux/delay.h>
39 #include <trace/events/bcache.h>
43 * register_bcache: Return errors out to userspace correctly
45 * Writeback: don't undirty key until after a cache flush
47 * Create an iterator for key pointers
49 * On btree write error, mark bucket such that it won't be freed from the cache
52 * Check for bad keys in replay
54 * Refcount journal entries in journal_replay
57 * Finish incremental gc
58 * Gc should free old UUIDs, data for invalid UUIDs
60 * Provide a way to list backing device UUIDs we have data cached for, and
61 * probably how long it's been since we've seen them, and a way to invalidate
62 * dirty data for devices that will never be attached again
64 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65 * that based on that and how much dirty data we have we can keep writeback
68 * Add a tracepoint or somesuch to watch for writeback starvation
70 * When btree depth > 1 and splitting an interior node, we have to make sure
71 * alloc_bucket() cannot fail. This should be true but is not completely
76 * If data write is less than hard sector size of ssd, round up offset in open
77 * bucket to the next whole sector
79 * Superblock needs to be fleshed out for multiple cache devices
81 * Add a sysfs tunable for the number of writeback IOs in flight
83 * Add a sysfs tunable for the number of open data buckets
85 * IO tracking: Can we track when one process is doing io on behalf of another?
86 * IO tracking: Don't use just an average, weigh more recent stuff higher
88 * Test module load/unload
91 #define MAX_NEED_GC 64
92 #define MAX_SAVE_PRIO 72
93 #define MAX_GC_TIMES 100
94 #define MIN_GC_NODES 100
95 #define GC_SLEEP_MS 100
97 #define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
99 #define PTR_HASH(c, k) \
100 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
102 static struct workqueue_struct
*btree_io_wq
;
104 #define insert_lock(s, b) ((b)->level <= (s)->lock)
107 static inline struct bset
*write_block(struct btree
*b
)
109 return ((void *) btree_bset_first(b
)) + b
->written
* block_bytes(b
->c
->cache
);
112 static void bch_btree_init_next(struct btree
*b
)
114 /* If not a leaf node, always sort */
115 if (b
->level
&& b
->keys
.nsets
)
116 bch_btree_sort(&b
->keys
, &b
->c
->sort
);
118 bch_btree_sort_lazy(&b
->keys
, &b
->c
->sort
);
120 if (b
->written
< btree_blocks(b
))
121 bch_bset_init_next(&b
->keys
, write_block(b
),
122 bset_magic(&b
->c
->cache
->sb
));
126 /* Btree key manipulation */
128 void bkey_put(struct cache_set
*c
, struct bkey
*k
)
132 for (i
= 0; i
< KEY_PTRS(k
); i
++)
133 if (ptr_available(c
, k
, i
))
134 atomic_dec_bug(&PTR_BUCKET(c
, k
, i
)->pin
);
139 static uint64_t btree_csum_set(struct btree
*b
, struct bset
*i
)
141 uint64_t crc
= b
->key
.ptr
[0];
142 void *data
= (void *) i
+ 8, *end
= bset_bkey_last(i
);
144 crc
= crc64_be(crc
, data
, end
- data
);
145 return crc
^ 0xffffffffffffffffULL
;
148 void bch_btree_node_read_done(struct btree
*b
)
150 const char *err
= "bad btree header";
151 struct bset
*i
= btree_bset_first(b
);
152 struct btree_iter
*iter
;
155 * c->fill_iter can allocate an iterator with more memory space
156 * than static MAX_BSETS.
157 * See the comment arount cache_set->fill_iter.
159 iter
= mempool_alloc(&b
->c
->fill_iter
, GFP_NOIO
);
160 iter
->size
= b
->c
->cache
->sb
.bucket_size
/ b
->c
->cache
->sb
.block_size
;
163 #ifdef CONFIG_BCACHE_DEBUG
171 b
->written
< btree_blocks(b
) && i
->seq
== b
->keys
.set
[0].data
->seq
;
172 i
= write_block(b
)) {
173 err
= "unsupported bset version";
174 if (i
->version
> BCACHE_BSET_VERSION
)
177 err
= "bad btree header";
178 if (b
->written
+ set_blocks(i
, block_bytes(b
->c
->cache
)) >
183 if (i
->magic
!= bset_magic(&b
->c
->cache
->sb
))
186 err
= "bad checksum";
187 switch (i
->version
) {
189 if (i
->csum
!= csum_set(i
))
192 case BCACHE_BSET_VERSION
:
193 if (i
->csum
!= btree_csum_set(b
, i
))
199 if (i
!= b
->keys
.set
[0].data
&& !i
->keys
)
202 bch_btree_iter_push(iter
, i
->start
, bset_bkey_last(i
));
204 b
->written
+= set_blocks(i
, block_bytes(b
->c
->cache
));
207 err
= "corrupted btree";
208 for (i
= write_block(b
);
209 bset_sector_offset(&b
->keys
, i
) < KEY_SIZE(&b
->key
);
210 i
= ((void *) i
) + block_bytes(b
->c
->cache
))
211 if (i
->seq
== b
->keys
.set
[0].data
->seq
)
214 bch_btree_sort_and_fix_extents(&b
->keys
, iter
, &b
->c
->sort
);
216 i
= b
->keys
.set
[0].data
;
217 err
= "short btree key";
218 if (b
->keys
.set
[0].size
&&
219 bkey_cmp(&b
->key
, &b
->keys
.set
[0].end
) < 0)
222 if (b
->written
< btree_blocks(b
))
223 bch_bset_init_next(&b
->keys
, write_block(b
),
224 bset_magic(&b
->c
->cache
->sb
));
226 mempool_free(iter
, &b
->c
->fill_iter
);
229 set_btree_node_io_error(b
);
230 bch_cache_set_error(b
->c
, "%s at bucket %zu, block %u, %u keys",
231 err
, PTR_BUCKET_NR(b
->c
, &b
->key
, 0),
232 bset_block_offset(b
, i
), i
->keys
);
236 static void btree_node_read_endio(struct bio
*bio
)
238 struct closure
*cl
= bio
->bi_private
;
243 static void bch_btree_node_read(struct btree
*b
)
245 uint64_t start_time
= local_clock();
249 trace_bcache_btree_read(b
);
251 closure_init_stack(&cl
);
253 bio
= bch_bbio_alloc(b
->c
);
254 bio
->bi_iter
.bi_size
= KEY_SIZE(&b
->key
) << 9;
255 bio
->bi_end_io
= btree_node_read_endio
;
256 bio
->bi_private
= &cl
;
257 bio
->bi_opf
= REQ_OP_READ
| REQ_META
;
259 bch_bio_map(bio
, b
->keys
.set
[0].data
);
261 bch_submit_bbio(bio
, b
->c
, &b
->key
, 0);
265 set_btree_node_io_error(b
);
267 bch_bbio_free(bio
, b
->c
);
269 if (btree_node_io_error(b
))
272 bch_btree_node_read_done(b
);
273 bch_time_stats_update(&b
->c
->btree_read_time
, start_time
);
277 bch_cache_set_error(b
->c
, "io error reading bucket %zu",
278 PTR_BUCKET_NR(b
->c
, &b
->key
, 0));
281 static void btree_complete_write(struct btree
*b
, struct btree_write
*w
)
283 if (w
->prio_blocked
&&
284 !atomic_sub_return(w
->prio_blocked
, &b
->c
->prio_blocked
))
285 wake_up_allocators(b
->c
);
288 atomic_dec_bug(w
->journal
);
289 __closure_wake_up(&b
->c
->journal
.wait
);
296 static void btree_node_write_unlock(struct closure
*cl
)
298 struct btree
*b
= container_of(cl
, struct btree
, io
);
303 static void __btree_node_write_done(struct closure
*cl
)
305 struct btree
*b
= container_of(cl
, struct btree
, io
);
306 struct btree_write
*w
= btree_prev_write(b
);
308 bch_bbio_free(b
->bio
, b
->c
);
310 btree_complete_write(b
, w
);
312 if (btree_node_dirty(b
))
313 queue_delayed_work(btree_io_wq
, &b
->work
, 30 * HZ
);
315 closure_return_with_destructor(cl
, btree_node_write_unlock
);
318 static void btree_node_write_done(struct closure
*cl
)
320 struct btree
*b
= container_of(cl
, struct btree
, io
);
322 bio_free_pages(b
->bio
);
323 __btree_node_write_done(cl
);
326 static void btree_node_write_endio(struct bio
*bio
)
328 struct closure
*cl
= bio
->bi_private
;
329 struct btree
*b
= container_of(cl
, struct btree
, io
);
332 set_btree_node_io_error(b
);
334 bch_bbio_count_io_errors(b
->c
, bio
, bio
->bi_status
, "writing btree");
338 static void do_btree_node_write(struct btree
*b
)
340 struct closure
*cl
= &b
->io
;
341 struct bset
*i
= btree_bset_last(b
);
344 i
->version
= BCACHE_BSET_VERSION
;
345 i
->csum
= btree_csum_set(b
, i
);
348 b
->bio
= bch_bbio_alloc(b
->c
);
350 b
->bio
->bi_end_io
= btree_node_write_endio
;
351 b
->bio
->bi_private
= cl
;
352 b
->bio
->bi_iter
.bi_size
= roundup(set_bytes(i
), block_bytes(b
->c
->cache
));
353 b
->bio
->bi_opf
= REQ_OP_WRITE
| REQ_META
| REQ_FUA
;
354 bch_bio_map(b
->bio
, i
);
357 * If we're appending to a leaf node, we don't technically need FUA -
358 * this write just needs to be persisted before the next journal write,
359 * which will be marked FLUSH|FUA.
361 * Similarly if we're writing a new btree root - the pointer is going to
362 * be in the next journal entry.
364 * But if we're writing a new btree node (that isn't a root) or
365 * appending to a non leaf btree node, we need either FUA or a flush
366 * when we write the parent with the new pointer. FUA is cheaper than a
367 * flush, and writes appending to leaf nodes aren't blocking anything so
368 * just make all btree node writes FUA to keep things sane.
371 bkey_copy(&k
.key
, &b
->key
);
372 SET_PTR_OFFSET(&k
.key
, 0, PTR_OFFSET(&k
.key
, 0) +
373 bset_sector_offset(&b
->keys
, i
));
375 if (!bch_bio_alloc_pages(b
->bio
, __GFP_NOWARN
|GFP_NOWAIT
)) {
377 void *addr
= (void *) ((unsigned long) i
& ~(PAGE_SIZE
- 1));
378 struct bvec_iter_all iter_all
;
380 bio_for_each_segment_all(bv
, b
->bio
, iter_all
) {
381 memcpy(page_address(bv
->bv_page
), addr
, PAGE_SIZE
);
385 bch_submit_bbio(b
->bio
, b
->c
, &k
.key
, 0);
387 continue_at(cl
, btree_node_write_done
, NULL
);
390 * No problem for multipage bvec since the bio is
394 bch_bio_map(b
->bio
, i
);
396 bch_submit_bbio(b
->bio
, b
->c
, &k
.key
, 0);
399 continue_at_nobarrier(cl
, __btree_node_write_done
, NULL
);
403 void __bch_btree_node_write(struct btree
*b
, struct closure
*parent
)
405 struct bset
*i
= btree_bset_last(b
);
407 lockdep_assert_held(&b
->write_lock
);
409 trace_bcache_btree_write(b
);
411 BUG_ON(current
->bio_list
);
412 BUG_ON(b
->written
>= btree_blocks(b
));
413 BUG_ON(b
->written
&& !i
->keys
);
414 BUG_ON(btree_bset_first(b
)->seq
!= i
->seq
);
415 bch_check_keys(&b
->keys
, "writing");
417 cancel_delayed_work(&b
->work
);
419 /* If caller isn't waiting for write, parent refcount is cache set */
421 closure_init(&b
->io
, parent
?: &b
->c
->cl
);
423 clear_bit(BTREE_NODE_dirty
, &b
->flags
);
424 change_bit(BTREE_NODE_write_idx
, &b
->flags
);
426 do_btree_node_write(b
);
428 atomic_long_add(set_blocks(i
, block_bytes(b
->c
->cache
)) * b
->c
->cache
->sb
.block_size
,
429 &b
->c
->cache
->btree_sectors_written
);
431 b
->written
+= set_blocks(i
, block_bytes(b
->c
->cache
));
434 void bch_btree_node_write(struct btree
*b
, struct closure
*parent
)
436 unsigned int nsets
= b
->keys
.nsets
;
438 lockdep_assert_held(&b
->lock
);
440 __bch_btree_node_write(b
, parent
);
443 * do verify if there was more than one set initially (i.e. we did a
444 * sort) and we sorted down to a single set:
446 if (nsets
&& !b
->keys
.nsets
)
449 bch_btree_init_next(b
);
452 static void bch_btree_node_write_sync(struct btree
*b
)
456 closure_init_stack(&cl
);
458 mutex_lock(&b
->write_lock
);
459 bch_btree_node_write(b
, &cl
);
460 mutex_unlock(&b
->write_lock
);
465 static void btree_node_write_work(struct work_struct
*w
)
467 struct btree
*b
= container_of(to_delayed_work(w
), struct btree
, work
);
469 mutex_lock(&b
->write_lock
);
470 if (btree_node_dirty(b
))
471 __bch_btree_node_write(b
, NULL
);
472 mutex_unlock(&b
->write_lock
);
475 static void bch_btree_leaf_dirty(struct btree
*b
, atomic_t
*journal_ref
)
477 struct bset
*i
= btree_bset_last(b
);
478 struct btree_write
*w
= btree_current_write(b
);
480 lockdep_assert_held(&b
->write_lock
);
485 if (!btree_node_dirty(b
))
486 queue_delayed_work(btree_io_wq
, &b
->work
, 30 * HZ
);
488 set_btree_node_dirty(b
);
491 * w->journal is always the oldest journal pin of all bkeys
492 * in the leaf node, to make sure the oldest jset seq won't
493 * be increased before this btree node is flushed.
497 journal_pin_cmp(b
->c
, w
->journal
, journal_ref
)) {
498 atomic_dec_bug(w
->journal
);
503 w
->journal
= journal_ref
;
504 atomic_inc(w
->journal
);
508 /* Force write if set is too big */
509 if (set_bytes(i
) > PAGE_SIZE
- 48 &&
511 bch_btree_node_write(b
, NULL
);
515 * Btree in memory cache - allocation/freeing
516 * mca -> memory cache
519 #define mca_reserve(c) (((!IS_ERR_OR_NULL(c->root) && c->root->level) \
520 ? c->root->level : 1) * 8 + 16)
521 #define mca_can_free(c) \
522 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
524 static void mca_data_free(struct btree
*b
)
526 BUG_ON(b
->io_mutex
.count
!= 1);
528 bch_btree_keys_free(&b
->keys
);
530 b
->c
->btree_cache_used
--;
531 list_move(&b
->list
, &b
->c
->btree_cache_freed
);
534 static void mca_bucket_free(struct btree
*b
)
536 BUG_ON(btree_node_dirty(b
));
539 hlist_del_init_rcu(&b
->hash
);
540 list_move(&b
->list
, &b
->c
->btree_cache_freeable
);
543 static unsigned int btree_order(struct bkey
*k
)
545 return ilog2(KEY_SIZE(k
) / PAGE_SECTORS
?: 1);
548 static void mca_data_alloc(struct btree
*b
, struct bkey
*k
, gfp_t gfp
)
550 if (!bch_btree_keys_alloc(&b
->keys
,
552 ilog2(b
->c
->btree_pages
),
555 b
->c
->btree_cache_used
++;
556 list_move(&b
->list
, &b
->c
->btree_cache
);
558 list_move(&b
->list
, &b
->c
->btree_cache_freed
);
562 #define cmp_int(l, r) ((l > r) - (l < r))
564 #ifdef CONFIG_PROVE_LOCKING
565 static int btree_lock_cmp_fn(const struct lockdep_map
*_a
,
566 const struct lockdep_map
*_b
)
568 const struct btree
*a
= container_of(_a
, struct btree
, lock
.dep_map
);
569 const struct btree
*b
= container_of(_b
, struct btree
, lock
.dep_map
);
571 return -cmp_int(a
->level
, b
->level
) ?: bkey_cmp(&a
->key
, &b
->key
);
574 static void btree_lock_print_fn(const struct lockdep_map
*map
)
576 const struct btree
*b
= container_of(map
, struct btree
, lock
.dep_map
);
578 printk(KERN_CONT
" l=%u %llu:%llu", b
->level
,
579 KEY_INODE(&b
->key
), KEY_OFFSET(&b
->key
));
583 static struct btree
*mca_bucket_alloc(struct cache_set
*c
,
584 struct bkey
*k
, gfp_t gfp
)
587 * kzalloc() is necessary here for initialization,
588 * see code comments in bch_btree_keys_init().
590 struct btree
*b
= kzalloc(sizeof(struct btree
), gfp
);
595 init_rwsem(&b
->lock
);
596 lock_set_cmp_fn(&b
->lock
, btree_lock_cmp_fn
, btree_lock_print_fn
);
597 mutex_init(&b
->write_lock
);
598 lockdep_set_novalidate_class(&b
->write_lock
);
599 INIT_LIST_HEAD(&b
->list
);
600 INIT_DELAYED_WORK(&b
->work
, btree_node_write_work
);
602 sema_init(&b
->io_mutex
, 1);
604 mca_data_alloc(b
, k
, gfp
);
608 static int mca_reap(struct btree
*b
, unsigned int min_order
, bool flush
)
612 closure_init_stack(&cl
);
613 lockdep_assert_held(&b
->c
->bucket_lock
);
615 if (!down_write_trylock(&b
->lock
))
618 BUG_ON(btree_node_dirty(b
) && !b
->keys
.set
[0].data
);
620 if (b
->keys
.page_order
< min_order
)
624 if (btree_node_dirty(b
))
627 if (down_trylock(&b
->io_mutex
))
634 * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
635 * __bch_btree_node_write(). To avoid an extra flush, acquire
636 * b->write_lock before checking BTREE_NODE_dirty bit.
638 mutex_lock(&b
->write_lock
);
640 * If this btree node is selected in btree_flush_write() by journal
641 * code, delay and retry until the node is flushed by journal code
642 * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
644 if (btree_node_journal_flush(b
)) {
645 pr_debug("bnode %p is flushing by journal, retry\n", b
);
646 mutex_unlock(&b
->write_lock
);
651 if (btree_node_dirty(b
))
652 __bch_btree_node_write(b
, &cl
);
653 mutex_unlock(&b
->write_lock
);
657 /* wait for any in flight btree write */
667 static unsigned long bch_mca_scan(struct shrinker
*shrink
,
668 struct shrink_control
*sc
)
670 struct cache_set
*c
= shrink
->private_data
;
672 unsigned long i
, nr
= sc
->nr_to_scan
;
673 unsigned long freed
= 0;
674 unsigned int btree_cache_used
;
676 if (c
->shrinker_disabled
)
679 if (c
->btree_cache_alloc_lock
)
682 /* Return -1 if we can't do anything right now */
683 if (sc
->gfp_mask
& __GFP_IO
)
684 mutex_lock(&c
->bucket_lock
);
685 else if (!mutex_trylock(&c
->bucket_lock
))
689 * It's _really_ critical that we don't free too many btree nodes - we
690 * have to always leave ourselves a reserve. The reserve is how we
691 * guarantee that allocating memory for a new btree node can always
692 * succeed, so that inserting keys into the btree can always succeed and
693 * IO can always make forward progress:
695 nr
/= c
->btree_pages
;
698 nr
= min_t(unsigned long, nr
, mca_can_free(c
));
701 btree_cache_used
= c
->btree_cache_used
;
702 list_for_each_entry_safe_reverse(b
, t
, &c
->btree_cache_freeable
, list
) {
706 if (!mca_reap(b
, 0, false)) {
715 list_for_each_entry_safe_reverse(b
, t
, &c
->btree_cache
, list
) {
716 if (nr
<= 0 || i
>= btree_cache_used
)
719 if (!mca_reap(b
, 0, false)) {
730 mutex_unlock(&c
->bucket_lock
);
731 return freed
* c
->btree_pages
;
734 static unsigned long bch_mca_count(struct shrinker
*shrink
,
735 struct shrink_control
*sc
)
737 struct cache_set
*c
= shrink
->private_data
;
739 if (c
->shrinker_disabled
)
742 if (c
->btree_cache_alloc_lock
)
745 return mca_can_free(c
) * c
->btree_pages
;
748 void bch_btree_cache_free(struct cache_set
*c
)
753 closure_init_stack(&cl
);
756 shrinker_free(c
->shrink
);
758 mutex_lock(&c
->bucket_lock
);
760 #ifdef CONFIG_BCACHE_DEBUG
762 list_move(&c
->verify_data
->list
, &c
->btree_cache
);
764 free_pages((unsigned long) c
->verify_ondisk
, ilog2(meta_bucket_pages(&c
->cache
->sb
)));
767 list_splice(&c
->btree_cache_freeable
,
770 while (!list_empty(&c
->btree_cache
)) {
771 b
= list_first_entry(&c
->btree_cache
, struct btree
, list
);
774 * This function is called by cache_set_free(), no I/O
775 * request on cache now, it is unnecessary to acquire
776 * b->write_lock before clearing BTREE_NODE_dirty anymore.
778 if (btree_node_dirty(b
)) {
779 btree_complete_write(b
, btree_current_write(b
));
780 clear_bit(BTREE_NODE_dirty
, &b
->flags
);
785 while (!list_empty(&c
->btree_cache_freed
)) {
786 b
= list_first_entry(&c
->btree_cache_freed
,
789 cancel_delayed_work_sync(&b
->work
);
793 mutex_unlock(&c
->bucket_lock
);
796 int bch_btree_cache_alloc(struct cache_set
*c
)
800 for (i
= 0; i
< mca_reserve(c
); i
++)
801 if (!mca_bucket_alloc(c
, &ZERO_KEY
, GFP_KERNEL
))
804 list_splice_init(&c
->btree_cache
,
805 &c
->btree_cache_freeable
);
807 #ifdef CONFIG_BCACHE_DEBUG
808 mutex_init(&c
->verify_lock
);
810 c
->verify_ondisk
= (void *)
811 __get_free_pages(GFP_KERNEL
|__GFP_COMP
,
812 ilog2(meta_bucket_pages(&c
->cache
->sb
)));
813 if (!c
->verify_ondisk
) {
815 * Don't worry about the mca_rereserve buckets
816 * allocated in previous for-loop, they will be
817 * handled properly in bch_cache_set_unregister().
822 c
->verify_data
= mca_bucket_alloc(c
, &ZERO_KEY
, GFP_KERNEL
);
824 if (c
->verify_data
&&
825 c
->verify_data
->keys
.set
->data
)
826 list_del_init(&c
->verify_data
->list
);
828 c
->verify_data
= NULL
;
831 c
->shrink
= shrinker_alloc(0, "md-bcache:%pU", c
->set_uuid
);
833 pr_warn("bcache: %s: could not allocate shrinker\n", __func__
);
837 c
->shrink
->count_objects
= bch_mca_count
;
838 c
->shrink
->scan_objects
= bch_mca_scan
;
839 c
->shrink
->seeks
= 4;
840 c
->shrink
->batch
= c
->btree_pages
* 2;
841 c
->shrink
->private_data
= c
;
843 shrinker_register(c
->shrink
);
848 /* Btree in memory cache - hash table */
850 static struct hlist_head
*mca_hash(struct cache_set
*c
, struct bkey
*k
)
852 return &c
->bucket_hash
[hash_32(PTR_HASH(c
, k
), BUCKET_HASH_BITS
)];
855 static struct btree
*mca_find(struct cache_set
*c
, struct bkey
*k
)
860 hlist_for_each_entry_rcu(b
, mca_hash(c
, k
), hash
)
861 if (PTR_HASH(c
, &b
->key
) == PTR_HASH(c
, k
))
869 static int mca_cannibalize_lock(struct cache_set
*c
, struct btree_op
*op
)
871 spin_lock(&c
->btree_cannibalize_lock
);
872 if (likely(c
->btree_cache_alloc_lock
== NULL
)) {
873 c
->btree_cache_alloc_lock
= current
;
874 } else if (c
->btree_cache_alloc_lock
!= current
) {
876 prepare_to_wait(&c
->btree_cache_wait
, &op
->wait
,
877 TASK_UNINTERRUPTIBLE
);
878 spin_unlock(&c
->btree_cannibalize_lock
);
881 spin_unlock(&c
->btree_cannibalize_lock
);
886 static struct btree
*mca_cannibalize(struct cache_set
*c
, struct btree_op
*op
,
891 trace_bcache_btree_cache_cannibalize(c
);
893 if (mca_cannibalize_lock(c
, op
))
894 return ERR_PTR(-EINTR
);
896 list_for_each_entry_reverse(b
, &c
->btree_cache
, list
)
897 if (!mca_reap(b
, btree_order(k
), false))
900 list_for_each_entry_reverse(b
, &c
->btree_cache
, list
)
901 if (!mca_reap(b
, btree_order(k
), true))
904 WARN(1, "btree cache cannibalize failed\n");
905 return ERR_PTR(-ENOMEM
);
909 * We can only have one thread cannibalizing other cached btree nodes at a time,
910 * or we'll deadlock. We use an open coded mutex to ensure that, which a
911 * cannibalize_bucket() will take. This means every time we unlock the root of
912 * the btree, we need to release this lock if we have it held.
914 void bch_cannibalize_unlock(struct cache_set
*c
)
916 spin_lock(&c
->btree_cannibalize_lock
);
917 if (c
->btree_cache_alloc_lock
== current
) {
918 c
->btree_cache_alloc_lock
= NULL
;
919 wake_up(&c
->btree_cache_wait
);
921 spin_unlock(&c
->btree_cannibalize_lock
);
924 static struct btree
*mca_alloc(struct cache_set
*c
, struct btree_op
*op
,
925 struct bkey
*k
, int level
)
929 BUG_ON(current
->bio_list
);
931 lockdep_assert_held(&c
->bucket_lock
);
936 /* btree_free() doesn't free memory; it sticks the node on the end of
937 * the list. Check if there's any freed nodes there:
939 list_for_each_entry(b
, &c
->btree_cache_freeable
, list
)
940 if (!mca_reap(b
, btree_order(k
), false))
943 /* We never free struct btree itself, just the memory that holds the on
944 * disk node. Check the freed list before allocating a new one:
946 list_for_each_entry(b
, &c
->btree_cache_freed
, list
)
947 if (!mca_reap(b
, 0, false)) {
948 mca_data_alloc(b
, k
, __GFP_NOWARN
|GFP_NOIO
);
949 if (!b
->keys
.set
[0].data
)
955 b
= mca_bucket_alloc(c
, k
, __GFP_NOWARN
|GFP_NOIO
);
959 BUG_ON(!down_write_trylock(&b
->lock
));
960 if (!b
->keys
.set
->data
)
963 BUG_ON(b
->io_mutex
.count
!= 1);
965 bkey_copy(&b
->key
, k
);
966 list_move(&b
->list
, &c
->btree_cache
);
967 hlist_del_init_rcu(&b
->hash
);
968 hlist_add_head_rcu(&b
->hash
, mca_hash(c
, k
));
970 lock_set_subclass(&b
->lock
.dep_map
, level
+ 1, _THIS_IP_
);
971 b
->parent
= (void *) ~0UL;
977 bch_btree_keys_init(&b
->keys
, &bch_extent_keys_ops
,
978 &b
->c
->expensive_debug_checks
);
980 bch_btree_keys_init(&b
->keys
, &bch_btree_keys_ops
,
981 &b
->c
->expensive_debug_checks
);
988 b
= mca_cannibalize(c
, op
, k
);
996 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
997 * in from disk if necessary.
999 * If IO is necessary and running under submit_bio_noacct, returns -EAGAIN.
1001 * The btree node will have either a read or a write lock held, depending on
1002 * level and op->lock.
1004 struct btree
*bch_btree_node_get(struct cache_set
*c
, struct btree_op
*op
,
1005 struct bkey
*k
, int level
, bool write
,
1006 struct btree
*parent
)
1016 if (current
->bio_list
)
1017 return ERR_PTR(-EAGAIN
);
1019 mutex_lock(&c
->bucket_lock
);
1020 b
= mca_alloc(c
, op
, k
, level
);
1021 mutex_unlock(&c
->bucket_lock
);
1028 bch_btree_node_read(b
);
1031 downgrade_write(&b
->lock
);
1033 rw_lock(write
, b
, level
);
1034 if (PTR_HASH(c
, &b
->key
) != PTR_HASH(c
, k
)) {
1035 rw_unlock(write
, b
);
1038 BUG_ON(b
->level
!= level
);
1041 if (btree_node_io_error(b
)) {
1042 rw_unlock(write
, b
);
1043 return ERR_PTR(-EIO
);
1046 BUG_ON(!b
->written
);
1050 for (; i
<= b
->keys
.nsets
&& b
->keys
.set
[i
].size
; i
++) {
1051 prefetch(b
->keys
.set
[i
].tree
);
1052 prefetch(b
->keys
.set
[i
].data
);
1055 for (; i
<= b
->keys
.nsets
; i
++)
1056 prefetch(b
->keys
.set
[i
].data
);
1061 static void btree_node_prefetch(struct btree
*parent
, struct bkey
*k
)
1065 mutex_lock(&parent
->c
->bucket_lock
);
1066 b
= mca_alloc(parent
->c
, NULL
, k
, parent
->level
- 1);
1067 mutex_unlock(&parent
->c
->bucket_lock
);
1069 if (!IS_ERR_OR_NULL(b
)) {
1071 bch_btree_node_read(b
);
1078 static void btree_node_free(struct btree
*b
)
1080 trace_bcache_btree_node_free(b
);
1082 BUG_ON(b
== b
->c
->root
);
1085 mutex_lock(&b
->write_lock
);
1087 * If the btree node is selected and flushing in btree_flush_write(),
1088 * delay and retry until the BTREE_NODE_journal_flush bit cleared,
1089 * then it is safe to free the btree node here. Otherwise this btree
1090 * node will be in race condition.
1092 if (btree_node_journal_flush(b
)) {
1093 mutex_unlock(&b
->write_lock
);
1094 pr_debug("bnode %p journal_flush set, retry\n", b
);
1099 if (btree_node_dirty(b
)) {
1100 btree_complete_write(b
, btree_current_write(b
));
1101 clear_bit(BTREE_NODE_dirty
, &b
->flags
);
1104 mutex_unlock(&b
->write_lock
);
1106 cancel_delayed_work(&b
->work
);
1108 mutex_lock(&b
->c
->bucket_lock
);
1109 bch_bucket_free(b
->c
, &b
->key
);
1111 mutex_unlock(&b
->c
->bucket_lock
);
1114 struct btree
*__bch_btree_node_alloc(struct cache_set
*c
, struct btree_op
*op
,
1115 int level
, bool wait
,
1116 struct btree
*parent
)
1121 mutex_lock(&c
->bucket_lock
);
1123 /* return ERR_PTR(-EAGAIN) when it fails */
1124 b
= ERR_PTR(-EAGAIN
);
1125 if (__bch_bucket_alloc_set(c
, RESERVE_BTREE
, &k
.key
, wait
))
1128 bkey_put(c
, &k
.key
);
1129 SET_KEY_SIZE(&k
.key
, c
->btree_pages
* PAGE_SECTORS
);
1131 b
= mca_alloc(c
, op
, &k
.key
, level
);
1137 "Tried to allocate bucket that was in btree cache");
1142 bch_bset_init_next(&b
->keys
, b
->keys
.set
->data
, bset_magic(&b
->c
->cache
->sb
));
1144 mutex_unlock(&c
->bucket_lock
);
1146 trace_bcache_btree_node_alloc(b
);
1149 bch_bucket_free(c
, &k
.key
);
1151 mutex_unlock(&c
->bucket_lock
);
1153 trace_bcache_btree_node_alloc_fail(c
);
1157 static struct btree
*bch_btree_node_alloc(struct cache_set
*c
,
1158 struct btree_op
*op
, int level
,
1159 struct btree
*parent
)
1161 return __bch_btree_node_alloc(c
, op
, level
, op
!= NULL
, parent
);
1164 static struct btree
*btree_node_alloc_replacement(struct btree
*b
,
1165 struct btree_op
*op
)
1167 struct btree
*n
= bch_btree_node_alloc(b
->c
, op
, b
->level
, b
->parent
);
1170 mutex_lock(&n
->write_lock
);
1171 bch_btree_sort_into(&b
->keys
, &n
->keys
, &b
->c
->sort
);
1172 bkey_copy_key(&n
->key
, &b
->key
);
1173 mutex_unlock(&n
->write_lock
);
1179 static void make_btree_freeing_key(struct btree
*b
, struct bkey
*k
)
1183 mutex_lock(&b
->c
->bucket_lock
);
1185 atomic_inc(&b
->c
->prio_blocked
);
1187 bkey_copy(k
, &b
->key
);
1188 bkey_copy_key(k
, &ZERO_KEY
);
1190 for (i
= 0; i
< KEY_PTRS(k
); i
++)
1192 bch_inc_gen(b
->c
->cache
,
1193 PTR_BUCKET(b
->c
, &b
->key
, i
)));
1195 mutex_unlock(&b
->c
->bucket_lock
);
1198 static int btree_check_reserve(struct btree
*b
, struct btree_op
*op
)
1200 struct cache_set
*c
= b
->c
;
1201 struct cache
*ca
= c
->cache
;
1202 unsigned int reserve
= (c
->root
->level
- b
->level
) * 2 + 1;
1204 mutex_lock(&c
->bucket_lock
);
1206 if (fifo_used(&ca
->free
[RESERVE_BTREE
]) < reserve
) {
1208 prepare_to_wait(&c
->btree_cache_wait
, &op
->wait
,
1209 TASK_UNINTERRUPTIBLE
);
1210 mutex_unlock(&c
->bucket_lock
);
1214 mutex_unlock(&c
->bucket_lock
);
1216 return mca_cannibalize_lock(b
->c
, op
);
1219 /* Garbage collection */
1221 static uint8_t __bch_btree_mark_key(struct cache_set
*c
, int level
,
1229 * ptr_invalid() can't return true for the keys that mark btree nodes as
1230 * freed, but since ptr_bad() returns true we'll never actually use them
1231 * for anything and thus we don't want mark their pointers here
1233 if (!bkey_cmp(k
, &ZERO_KEY
))
1236 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
1237 if (!ptr_available(c
, k
, i
))
1240 g
= PTR_BUCKET(c
, k
, i
);
1242 if (gen_after(g
->last_gc
, PTR_GEN(k
, i
)))
1243 g
->last_gc
= PTR_GEN(k
, i
);
1245 if (ptr_stale(c
, k
, i
)) {
1246 stale
= max(stale
, ptr_stale(c
, k
, i
));
1250 cache_bug_on(GC_MARK(g
) &&
1251 (GC_MARK(g
) == GC_MARK_METADATA
) != (level
!= 0),
1252 c
, "inconsistent ptrs: mark = %llu, level = %i",
1256 SET_GC_MARK(g
, GC_MARK_METADATA
);
1257 else if (KEY_DIRTY(k
))
1258 SET_GC_MARK(g
, GC_MARK_DIRTY
);
1259 else if (!GC_MARK(g
))
1260 SET_GC_MARK(g
, GC_MARK_RECLAIMABLE
);
1262 /* guard against overflow */
1263 SET_GC_SECTORS_USED(g
, min_t(unsigned int,
1264 GC_SECTORS_USED(g
) + KEY_SIZE(k
),
1265 MAX_GC_SECTORS_USED
));
1267 BUG_ON(!GC_SECTORS_USED(g
));
1273 #define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1275 void bch_initial_mark_key(struct cache_set
*c
, int level
, struct bkey
*k
)
1279 for (i
= 0; i
< KEY_PTRS(k
); i
++)
1280 if (ptr_available(c
, k
, i
) &&
1281 !ptr_stale(c
, k
, i
)) {
1282 struct bucket
*b
= PTR_BUCKET(c
, k
, i
);
1284 b
->gen
= PTR_GEN(k
, i
);
1286 if (level
&& bkey_cmp(k
, &ZERO_KEY
))
1287 b
->prio
= BTREE_PRIO
;
1288 else if (!level
&& b
->prio
== BTREE_PRIO
)
1289 b
->prio
= INITIAL_PRIO
;
1292 __bch_btree_mark_key(c
, level
, k
);
1295 void bch_update_bucket_in_use(struct cache_set
*c
, struct gc_stat
*stats
)
1297 stats
->in_use
= (c
->nbuckets
- c
->avail_nbuckets
) * 100 / c
->nbuckets
;
1300 static bool btree_gc_mark_node(struct btree
*b
, struct gc_stat
*gc
)
1303 unsigned int keys
= 0, good_keys
= 0;
1305 struct btree_iter iter
;
1306 struct bset_tree
*t
;
1310 for_each_key_filter(&b
->keys
, k
, &iter
, bch_ptr_invalid
) {
1311 stale
= max(stale
, btree_mark_key(b
, k
));
1314 if (bch_ptr_bad(&b
->keys
, k
))
1317 gc
->key_bytes
+= bkey_u64s(k
);
1321 gc
->data
+= KEY_SIZE(k
);
1324 for (t
= b
->keys
.set
; t
<= &b
->keys
.set
[b
->keys
.nsets
]; t
++)
1325 btree_bug_on(t
->size
&&
1326 bset_written(&b
->keys
, t
) &&
1327 bkey_cmp(&b
->key
, &t
->end
) < 0,
1328 b
, "found short btree key in gc");
1330 if (b
->c
->gc_always_rewrite
)
1336 if ((keys
- good_keys
) * 2 > keys
)
1342 #define GC_MERGE_NODES 4U
1344 struct gc_merge_info
{
1349 static int bch_btree_insert_node(struct btree
*b
, struct btree_op
*op
,
1350 struct keylist
*insert_keys
,
1351 atomic_t
*journal_ref
,
1352 struct bkey
*replace_key
);
1354 static int btree_gc_coalesce(struct btree
*b
, struct btree_op
*op
,
1355 struct gc_stat
*gc
, struct gc_merge_info
*r
)
1357 unsigned int i
, nodes
= 0, keys
= 0, blocks
;
1358 struct btree
*new_nodes
[GC_MERGE_NODES
];
1359 struct keylist keylist
;
1363 bch_keylist_init(&keylist
);
1365 if (btree_check_reserve(b
, NULL
))
1368 memset(new_nodes
, 0, sizeof(new_nodes
));
1369 closure_init_stack(&cl
);
1371 while (nodes
< GC_MERGE_NODES
&& !IS_ERR(r
[nodes
].b
))
1372 keys
+= r
[nodes
++].keys
;
1374 blocks
= btree_default_blocks(b
->c
) * 2 / 3;
1377 __set_blocks(b
->keys
.set
[0].data
, keys
,
1378 block_bytes(b
->c
->cache
)) > blocks
* (nodes
- 1))
1381 for (i
= 0; i
< nodes
; i
++) {
1382 new_nodes
[i
] = btree_node_alloc_replacement(r
[i
].b
, NULL
);
1383 if (IS_ERR(new_nodes
[i
]))
1384 goto out_nocoalesce
;
1388 * We have to check the reserve here, after we've allocated our new
1389 * nodes, to make sure the insert below will succeed - we also check
1390 * before as an optimization to potentially avoid a bunch of expensive
1393 if (btree_check_reserve(b
, NULL
))
1394 goto out_nocoalesce
;
1396 for (i
= 0; i
< nodes
; i
++)
1397 mutex_lock(&new_nodes
[i
]->write_lock
);
1399 for (i
= nodes
- 1; i
> 0; --i
) {
1400 struct bset
*n1
= btree_bset_first(new_nodes
[i
]);
1401 struct bset
*n2
= btree_bset_first(new_nodes
[i
- 1]);
1402 struct bkey
*k
, *last
= NULL
;
1408 k
< bset_bkey_last(n2
);
1410 if (__set_blocks(n1
, n1
->keys
+ keys
+
1412 block_bytes(b
->c
->cache
)) > blocks
)
1416 keys
+= bkey_u64s(k
);
1420 * Last node we're not getting rid of - we're getting
1421 * rid of the node at r[0]. Have to try and fit all of
1422 * the remaining keys into this node; we can't ensure
1423 * they will always fit due to rounding and variable
1424 * length keys (shouldn't be possible in practice,
1427 if (__set_blocks(n1
, n1
->keys
+ n2
->keys
,
1428 block_bytes(b
->c
->cache
)) >
1429 btree_blocks(new_nodes
[i
]))
1430 goto out_unlock_nocoalesce
;
1433 /* Take the key of the node we're getting rid of */
1437 BUG_ON(__set_blocks(n1
, n1
->keys
+ keys
, block_bytes(b
->c
->cache
)) >
1438 btree_blocks(new_nodes
[i
]));
1441 bkey_copy_key(&new_nodes
[i
]->key
, last
);
1443 memcpy(bset_bkey_last(n1
),
1445 (void *) bset_bkey_idx(n2
, keys
) - (void *) n2
->start
);
1448 r
[i
].keys
= n1
->keys
;
1451 bset_bkey_idx(n2
, keys
),
1452 (void *) bset_bkey_last(n2
) -
1453 (void *) bset_bkey_idx(n2
, keys
));
1457 if (__bch_keylist_realloc(&keylist
,
1458 bkey_u64s(&new_nodes
[i
]->key
)))
1459 goto out_unlock_nocoalesce
;
1461 bch_btree_node_write(new_nodes
[i
], &cl
);
1462 bch_keylist_add(&keylist
, &new_nodes
[i
]->key
);
1465 for (i
= 0; i
< nodes
; i
++)
1466 mutex_unlock(&new_nodes
[i
]->write_lock
);
1470 /* We emptied out this node */
1471 BUG_ON(btree_bset_first(new_nodes
[0])->keys
);
1472 btree_node_free(new_nodes
[0]);
1473 rw_unlock(true, new_nodes
[0]);
1474 new_nodes
[0] = NULL
;
1476 for (i
= 0; i
< nodes
; i
++) {
1477 if (__bch_keylist_realloc(&keylist
, bkey_u64s(&r
[i
].b
->key
)))
1478 goto out_nocoalesce
;
1480 make_btree_freeing_key(r
[i
].b
, keylist
.top
);
1481 bch_keylist_push(&keylist
);
1484 bch_btree_insert_node(b
, op
, &keylist
, NULL
, NULL
);
1485 BUG_ON(!bch_keylist_empty(&keylist
));
1487 for (i
= 0; i
< nodes
; i
++) {
1488 btree_node_free(r
[i
].b
);
1489 rw_unlock(true, r
[i
].b
);
1491 r
[i
].b
= new_nodes
[i
];
1494 memmove(r
, r
+ 1, sizeof(r
[0]) * (nodes
- 1));
1495 r
[nodes
- 1].b
= ERR_PTR(-EINTR
);
1497 trace_bcache_btree_gc_coalesce(nodes
);
1500 bch_keylist_free(&keylist
);
1502 /* Invalidated our iterator */
1505 out_unlock_nocoalesce
:
1506 for (i
= 0; i
< nodes
; i
++)
1507 mutex_unlock(&new_nodes
[i
]->write_lock
);
1512 while ((k
= bch_keylist_pop(&keylist
)))
1513 if (!bkey_cmp(k
, &ZERO_KEY
))
1514 atomic_dec(&b
->c
->prio_blocked
);
1515 bch_keylist_free(&keylist
);
1517 for (i
= 0; i
< nodes
; i
++)
1518 if (!IS_ERR(new_nodes
[i
])) {
1519 btree_node_free(new_nodes
[i
]);
1520 rw_unlock(true, new_nodes
[i
]);
1525 static int btree_gc_rewrite_node(struct btree
*b
, struct btree_op
*op
,
1526 struct btree
*replace
)
1528 struct keylist keys
;
1531 if (btree_check_reserve(b
, NULL
))
1534 n
= btree_node_alloc_replacement(replace
, NULL
);
1536 /* recheck reserve after allocating replacement node */
1537 if (btree_check_reserve(b
, NULL
)) {
1543 bch_btree_node_write_sync(n
);
1545 bch_keylist_init(&keys
);
1546 bch_keylist_add(&keys
, &n
->key
);
1548 make_btree_freeing_key(replace
, keys
.top
);
1549 bch_keylist_push(&keys
);
1551 bch_btree_insert_node(b
, op
, &keys
, NULL
, NULL
);
1552 BUG_ON(!bch_keylist_empty(&keys
));
1554 btree_node_free(replace
);
1557 /* Invalidated our iterator */
1561 static unsigned int btree_gc_count_keys(struct btree
*b
)
1564 struct btree_iter iter
;
1565 unsigned int ret
= 0;
1567 for_each_key_filter(&b
->keys
, k
, &iter
, bch_ptr_bad
)
1568 ret
+= bkey_u64s(k
);
1573 static size_t btree_gc_min_nodes(struct cache_set
*c
)
1578 * Since incremental GC would stop 100ms when front
1579 * side I/O comes, so when there are many btree nodes,
1580 * if GC only processes constant (100) nodes each time,
1581 * GC would last a long time, and the front side I/Os
1582 * would run out of the buckets (since no new bucket
1583 * can be allocated during GC), and be blocked again.
1584 * So GC should not process constant nodes, but varied
1585 * nodes according to the number of btree nodes, which
1586 * realized by dividing GC into constant(100) times,
1587 * so when there are many btree nodes, GC can process
1588 * more nodes each time, otherwise, GC will process less
1589 * nodes each time (but no less than MIN_GC_NODES)
1591 min_nodes
= c
->gc_stats
.nodes
/ MAX_GC_TIMES
;
1592 if (min_nodes
< MIN_GC_NODES
)
1593 min_nodes
= MIN_GC_NODES
;
1599 static int btree_gc_recurse(struct btree
*b
, struct btree_op
*op
,
1600 struct closure
*writes
, struct gc_stat
*gc
)
1603 bool should_rewrite
;
1605 struct btree_iter iter
;
1606 struct gc_merge_info r
[GC_MERGE_NODES
];
1607 struct gc_merge_info
*i
, *last
= r
+ ARRAY_SIZE(r
) - 1;
1609 bch_btree_iter_init(&b
->keys
, &iter
, &b
->c
->gc_done
);
1611 for (i
= r
; i
< r
+ ARRAY_SIZE(r
); i
++)
1612 i
->b
= ERR_PTR(-EINTR
);
1615 k
= bch_btree_iter_next_filter(&iter
, &b
->keys
, bch_ptr_bad
);
1617 r
->b
= bch_btree_node_get(b
->c
, op
, k
, b
->level
- 1,
1620 ret
= PTR_ERR(r
->b
);
1624 r
->keys
= btree_gc_count_keys(r
->b
);
1626 ret
= btree_gc_coalesce(b
, op
, gc
, r
);
1634 if (!IS_ERR(last
->b
)) {
1635 should_rewrite
= btree_gc_mark_node(last
->b
, gc
);
1636 if (should_rewrite
) {
1637 ret
= btree_gc_rewrite_node(b
, op
, last
->b
);
1642 if (last
->b
->level
) {
1643 ret
= btree_gc_recurse(last
->b
, op
, writes
, gc
);
1648 bkey_copy_key(&b
->c
->gc_done
, &last
->b
->key
);
1651 * Must flush leaf nodes before gc ends, since replace
1652 * operations aren't journalled
1654 mutex_lock(&last
->b
->write_lock
);
1655 if (btree_node_dirty(last
->b
))
1656 bch_btree_node_write(last
->b
, writes
);
1657 mutex_unlock(&last
->b
->write_lock
);
1658 rw_unlock(true, last
->b
);
1661 memmove(r
+ 1, r
, sizeof(r
[0]) * (GC_MERGE_NODES
- 1));
1664 if (atomic_read(&b
->c
->search_inflight
) &&
1665 gc
->nodes
>= gc
->nodes_pre
+ btree_gc_min_nodes(b
->c
)) {
1666 gc
->nodes_pre
= gc
->nodes
;
1671 if (need_resched()) {
1677 for (i
= r
; i
< r
+ ARRAY_SIZE(r
); i
++)
1678 if (!IS_ERR_OR_NULL(i
->b
)) {
1679 mutex_lock(&i
->b
->write_lock
);
1680 if (btree_node_dirty(i
->b
))
1681 bch_btree_node_write(i
->b
, writes
);
1682 mutex_unlock(&i
->b
->write_lock
);
1683 rw_unlock(true, i
->b
);
1689 static int bch_btree_gc_root(struct btree
*b
, struct btree_op
*op
,
1690 struct closure
*writes
, struct gc_stat
*gc
)
1692 struct btree
*n
= NULL
;
1694 bool should_rewrite
;
1696 should_rewrite
= btree_gc_mark_node(b
, gc
);
1697 if (should_rewrite
) {
1698 n
= btree_node_alloc_replacement(b
, NULL
);
1701 bch_btree_node_write_sync(n
);
1703 bch_btree_set_root(n
);
1711 __bch_btree_mark_key(b
->c
, b
->level
+ 1, &b
->key
);
1714 ret
= btree_gc_recurse(b
, op
, writes
, gc
);
1719 bkey_copy_key(&b
->c
->gc_done
, &b
->key
);
1724 static void btree_gc_start(struct cache_set
*c
)
1729 if (!c
->gc_mark_valid
)
1732 mutex_lock(&c
->bucket_lock
);
1734 c
->gc_mark_valid
= 0;
1735 c
->gc_done
= ZERO_KEY
;
1738 for_each_bucket(b
, ca
) {
1739 b
->last_gc
= b
->gen
;
1740 if (!atomic_read(&b
->pin
)) {
1742 SET_GC_SECTORS_USED(b
, 0);
1746 mutex_unlock(&c
->bucket_lock
);
1749 static void bch_btree_gc_finish(struct cache_set
*c
)
1756 mutex_lock(&c
->bucket_lock
);
1759 c
->gc_mark_valid
= 1;
1762 for (i
= 0; i
< KEY_PTRS(&c
->uuid_bucket
); i
++)
1763 SET_GC_MARK(PTR_BUCKET(c
, &c
->uuid_bucket
, i
),
1766 /* don't reclaim buckets to which writeback keys point */
1768 for (i
= 0; i
< c
->devices_max_used
; i
++) {
1769 struct bcache_device
*d
= c
->devices
[i
];
1770 struct cached_dev
*dc
;
1771 struct keybuf_key
*w
, *n
;
1773 if (!d
|| UUID_FLASH_ONLY(&c
->uuids
[i
]))
1775 dc
= container_of(d
, struct cached_dev
, disk
);
1777 spin_lock(&dc
->writeback_keys
.lock
);
1778 rbtree_postorder_for_each_entry_safe(w
, n
,
1779 &dc
->writeback_keys
.keys
, node
)
1780 for (j
= 0; j
< KEY_PTRS(&w
->key
); j
++)
1781 SET_GC_MARK(PTR_BUCKET(c
, &w
->key
, j
),
1783 spin_unlock(&dc
->writeback_keys
.lock
);
1787 c
->avail_nbuckets
= 0;
1790 ca
->invalidate_needs_gc
= 0;
1792 for (k
= ca
->sb
.d
; k
< ca
->sb
.d
+ ca
->sb
.keys
; k
++)
1793 SET_GC_MARK(ca
->buckets
+ *k
, GC_MARK_METADATA
);
1795 for (k
= ca
->prio_buckets
;
1796 k
< ca
->prio_buckets
+ prio_buckets(ca
) * 2; k
++)
1797 SET_GC_MARK(ca
->buckets
+ *k
, GC_MARK_METADATA
);
1799 for_each_bucket(b
, ca
) {
1800 c
->need_gc
= max(c
->need_gc
, bucket_gc_gen(b
));
1802 if (atomic_read(&b
->pin
))
1805 BUG_ON(!GC_MARK(b
) && GC_SECTORS_USED(b
));
1807 if (!GC_MARK(b
) || GC_MARK(b
) == GC_MARK_RECLAIMABLE
)
1808 c
->avail_nbuckets
++;
1811 mutex_unlock(&c
->bucket_lock
);
1814 static void bch_btree_gc(struct cache_set
*c
)
1817 struct gc_stat stats
;
1818 struct closure writes
;
1820 uint64_t start_time
= local_clock();
1822 trace_bcache_gc_start(c
);
1824 memset(&stats
, 0, sizeof(struct gc_stat
));
1825 closure_init_stack(&writes
);
1826 bch_btree_op_init(&op
, SHRT_MAX
);
1830 /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1832 ret
= bcache_btree_root(gc_root
, c
, &op
, &writes
, &stats
);
1833 closure_sync(&writes
);
1837 schedule_timeout_interruptible(msecs_to_jiffies
1840 pr_warn("gc failed!\n");
1841 } while (ret
&& !test_bit(CACHE_SET_IO_DISABLE
, &c
->flags
));
1843 bch_btree_gc_finish(c
);
1844 wake_up_allocators(c
);
1846 bch_time_stats_update(&c
->btree_gc_time
, start_time
);
1848 stats
.key_bytes
*= sizeof(uint64_t);
1850 bch_update_bucket_in_use(c
, &stats
);
1851 memcpy(&c
->gc_stats
, &stats
, sizeof(struct gc_stat
));
1853 trace_bcache_gc_end(c
);
1858 static bool gc_should_run(struct cache_set
*c
)
1860 struct cache
*ca
= c
->cache
;
1862 if (ca
->invalidate_needs_gc
)
1865 if (atomic_read(&c
->sectors_to_gc
) < 0)
1871 static int bch_gc_thread(void *arg
)
1873 struct cache_set
*c
= arg
;
1876 wait_event_interruptible(c
->gc_wait
,
1877 kthread_should_stop() ||
1878 test_bit(CACHE_SET_IO_DISABLE
, &c
->flags
) ||
1881 if (kthread_should_stop() ||
1882 test_bit(CACHE_SET_IO_DISABLE
, &c
->flags
))
1889 wait_for_kthread_stop();
1893 int bch_gc_thread_start(struct cache_set
*c
)
1895 c
->gc_thread
= kthread_run(bch_gc_thread
, c
, "bcache_gc");
1896 return PTR_ERR_OR_ZERO(c
->gc_thread
);
1899 /* Initial partial gc */
1901 static int bch_btree_check_recurse(struct btree
*b
, struct btree_op
*op
)
1904 struct bkey
*k
, *p
= NULL
;
1905 struct btree_iter iter
;
1907 for_each_key_filter(&b
->keys
, k
, &iter
, bch_ptr_invalid
)
1908 bch_initial_mark_key(b
->c
, b
->level
, k
);
1910 bch_initial_mark_key(b
->c
, b
->level
+ 1, &b
->key
);
1913 bch_btree_iter_init(&b
->keys
, &iter
, NULL
);
1916 k
= bch_btree_iter_next_filter(&iter
, &b
->keys
,
1919 btree_node_prefetch(b
, k
);
1921 * initiallize c->gc_stats.nodes
1922 * for incremental GC
1924 b
->c
->gc_stats
.nodes
++;
1928 ret
= bcache_btree(check_recurse
, p
, b
, op
);
1931 } while (p
&& !ret
);
1938 static int bch_btree_check_thread(void *arg
)
1941 struct btree_check_info
*info
= arg
;
1942 struct btree_check_state
*check_state
= info
->state
;
1943 struct cache_set
*c
= check_state
->c
;
1944 struct btree_iter iter
;
1946 int cur_idx
, prev_idx
, skip_nr
;
1949 cur_idx
= prev_idx
= 0;
1952 /* root node keys are checked before thread created */
1953 bch_btree_iter_init(&c
->root
->keys
, &iter
, NULL
);
1954 k
= bch_btree_iter_next_filter(&iter
, &c
->root
->keys
, bch_ptr_bad
);
1960 * Fetch a root node key index, skip the keys which
1961 * should be fetched by other threads, then check the
1962 * sub-tree indexed by the fetched key.
1964 spin_lock(&check_state
->idx_lock
);
1965 cur_idx
= check_state
->key_idx
;
1966 check_state
->key_idx
++;
1967 spin_unlock(&check_state
->idx_lock
);
1969 skip_nr
= cur_idx
- prev_idx
;
1972 k
= bch_btree_iter_next_filter(&iter
,
1979 * No more keys to check in root node,
1980 * current checking threads are enough,
1981 * stop creating more.
1983 atomic_set(&check_state
->enough
, 1);
1984 /* Update check_state->enough earlier */
1985 smp_mb__after_atomic();
1995 btree_node_prefetch(c
->root
, p
);
1996 c
->gc_stats
.nodes
++;
1997 bch_btree_op_init(&op
, 0);
1998 ret
= bcache_btree(check_recurse
, p
, c
->root
, &op
);
2000 * The op may be added to cache_set's btree_cache_wait
2001 * in mca_cannibalize(), must ensure it is removed from
2002 * the list and release btree_cache_alloc_lock before
2004 * Otherwise, the btree_cache_wait will be damaged.
2006 bch_cannibalize_unlock(c
);
2007 finish_wait(&c
->btree_cache_wait
, &(&op
)->wait
);
2018 /* update check_state->started among all CPUs */
2019 smp_mb__before_atomic();
2020 if (atomic_dec_and_test(&check_state
->started
))
2021 wake_up(&check_state
->wait
);
2028 static int bch_btree_chkthread_nr(void)
2030 int n
= num_online_cpus()/2;
2034 else if (n
> BCH_BTR_CHKTHREAD_MAX
)
2035 n
= BCH_BTR_CHKTHREAD_MAX
;
2040 int bch_btree_check(struct cache_set
*c
)
2044 struct bkey
*k
= NULL
;
2045 struct btree_iter iter
;
2046 struct btree_check_state check_state
;
2048 /* check and mark root node keys */
2049 for_each_key_filter(&c
->root
->keys
, k
, &iter
, bch_ptr_invalid
)
2050 bch_initial_mark_key(c
, c
->root
->level
, k
);
2052 bch_initial_mark_key(c
, c
->root
->level
+ 1, &c
->root
->key
);
2054 if (c
->root
->level
== 0)
2057 memset(&check_state
, 0, sizeof(struct btree_check_state
));
2059 check_state
.total_threads
= bch_btree_chkthread_nr();
2060 check_state
.key_idx
= 0;
2061 spin_lock_init(&check_state
.idx_lock
);
2062 atomic_set(&check_state
.started
, 0);
2063 atomic_set(&check_state
.enough
, 0);
2064 init_waitqueue_head(&check_state
.wait
);
2066 rw_lock(0, c
->root
, c
->root
->level
);
2068 * Run multiple threads to check btree nodes in parallel,
2069 * if check_state.enough is non-zero, it means current
2070 * running check threads are enough, unncessary to create
2073 for (i
= 0; i
< check_state
.total_threads
; i
++) {
2074 /* fetch latest check_state.enough earlier */
2075 smp_mb__before_atomic();
2076 if (atomic_read(&check_state
.enough
))
2079 check_state
.infos
[i
].result
= 0;
2080 check_state
.infos
[i
].state
= &check_state
;
2082 check_state
.infos
[i
].thread
=
2083 kthread_run(bch_btree_check_thread
,
2084 &check_state
.infos
[i
],
2085 "bch_btrchk[%d]", i
);
2086 if (IS_ERR(check_state
.infos
[i
].thread
)) {
2087 pr_err("fails to run thread bch_btrchk[%d]\n", i
);
2088 for (--i
; i
>= 0; i
--)
2089 kthread_stop(check_state
.infos
[i
].thread
);
2093 atomic_inc(&check_state
.started
);
2097 * Must wait for all threads to stop.
2099 wait_event(check_state
.wait
, atomic_read(&check_state
.started
) == 0);
2101 for (i
= 0; i
< check_state
.total_threads
; i
++) {
2102 if (check_state
.infos
[i
].result
) {
2103 ret
= check_state
.infos
[i
].result
;
2109 rw_unlock(0, c
->root
);
2113 void bch_initial_gc_finish(struct cache_set
*c
)
2115 struct cache
*ca
= c
->cache
;
2118 bch_btree_gc_finish(c
);
2120 mutex_lock(&c
->bucket_lock
);
2123 * We need to put some unused buckets directly on the prio freelist in
2124 * order to get the allocator thread started - it needs freed buckets in
2125 * order to rewrite the prios and gens, and it needs to rewrite prios
2126 * and gens in order to free buckets.
2128 * This is only safe for buckets that have no live data in them, which
2129 * there should always be some of.
2131 for_each_bucket(b
, ca
) {
2132 if (fifo_full(&ca
->free
[RESERVE_PRIO
]) &&
2133 fifo_full(&ca
->free
[RESERVE_BTREE
]))
2136 if (bch_can_invalidate_bucket(ca
, b
) &&
2138 __bch_invalidate_one_bucket(ca
, b
);
2139 if (!fifo_push(&ca
->free
[RESERVE_PRIO
],
2141 fifo_push(&ca
->free
[RESERVE_BTREE
],
2146 mutex_unlock(&c
->bucket_lock
);
2149 /* Btree insertion */
2151 static bool btree_insert_key(struct btree
*b
, struct bkey
*k
,
2152 struct bkey
*replace_key
)
2154 unsigned int status
;
2156 BUG_ON(bkey_cmp(k
, &b
->key
) > 0);
2158 status
= bch_btree_insert_key(&b
->keys
, k
, replace_key
);
2159 if (status
!= BTREE_INSERT_STATUS_NO_INSERT
) {
2160 bch_check_keys(&b
->keys
, "%u for %s", status
,
2161 replace_key
? "replace" : "insert");
2163 trace_bcache_btree_insert_key(b
, k
, replace_key
!= NULL
,
2170 static size_t insert_u64s_remaining(struct btree
*b
)
2172 long ret
= bch_btree_keys_u64s_remaining(&b
->keys
);
2175 * Might land in the middle of an existing extent and have to split it
2177 if (b
->keys
.ops
->is_extents
)
2178 ret
-= KEY_MAX_U64S
;
2180 return max(ret
, 0L);
2183 static bool bch_btree_insert_keys(struct btree
*b
, struct btree_op
*op
,
2184 struct keylist
*insert_keys
,
2185 struct bkey
*replace_key
)
2188 int oldsize
= bch_count_data(&b
->keys
);
2190 while (!bch_keylist_empty(insert_keys
)) {
2191 struct bkey
*k
= insert_keys
->keys
;
2193 if (bkey_u64s(k
) > insert_u64s_remaining(b
))
2196 if (bkey_cmp(k
, &b
->key
) <= 0) {
2200 ret
|= btree_insert_key(b
, k
, replace_key
);
2201 bch_keylist_pop_front(insert_keys
);
2202 } else if (bkey_cmp(&START_KEY(k
), &b
->key
) < 0) {
2203 BKEY_PADDED(key
) temp
;
2204 bkey_copy(&temp
.key
, insert_keys
->keys
);
2206 bch_cut_back(&b
->key
, &temp
.key
);
2207 bch_cut_front(&b
->key
, insert_keys
->keys
);
2209 ret
|= btree_insert_key(b
, &temp
.key
, replace_key
);
2217 op
->insert_collision
= true;
2219 BUG_ON(!bch_keylist_empty(insert_keys
) && b
->level
);
2221 BUG_ON(bch_count_data(&b
->keys
) < oldsize
);
2225 static int btree_split(struct btree
*b
, struct btree_op
*op
,
2226 struct keylist
*insert_keys
,
2227 struct bkey
*replace_key
)
2230 struct btree
*n1
, *n2
= NULL
, *n3
= NULL
;
2231 uint64_t start_time
= local_clock();
2233 struct keylist parent_keys
;
2235 closure_init_stack(&cl
);
2236 bch_keylist_init(&parent_keys
);
2238 if (btree_check_reserve(b
, op
)) {
2242 WARN(1, "insufficient reserve for split\n");
2245 n1
= btree_node_alloc_replacement(b
, op
);
2249 split
= set_blocks(btree_bset_first(n1
),
2250 block_bytes(n1
->c
->cache
)) > (btree_blocks(b
) * 4) / 5;
2253 unsigned int keys
= 0;
2255 trace_bcache_btree_node_split(b
, btree_bset_first(n1
)->keys
);
2257 n2
= bch_btree_node_alloc(b
->c
, op
, b
->level
, b
->parent
);
2262 n3
= bch_btree_node_alloc(b
->c
, op
, b
->level
+ 1, NULL
);
2267 mutex_lock(&n1
->write_lock
);
2268 mutex_lock(&n2
->write_lock
);
2270 bch_btree_insert_keys(n1
, op
, insert_keys
, replace_key
);
2273 * Has to be a linear search because we don't have an auxiliary
2277 while (keys
< (btree_bset_first(n1
)->keys
* 3) / 5)
2278 keys
+= bkey_u64s(bset_bkey_idx(btree_bset_first(n1
),
2281 bkey_copy_key(&n1
->key
,
2282 bset_bkey_idx(btree_bset_first(n1
), keys
));
2283 keys
+= bkey_u64s(bset_bkey_idx(btree_bset_first(n1
), keys
));
2285 btree_bset_first(n2
)->keys
= btree_bset_first(n1
)->keys
- keys
;
2286 btree_bset_first(n1
)->keys
= keys
;
2288 memcpy(btree_bset_first(n2
)->start
,
2289 bset_bkey_last(btree_bset_first(n1
)),
2290 btree_bset_first(n2
)->keys
* sizeof(uint64_t));
2292 bkey_copy_key(&n2
->key
, &b
->key
);
2294 bch_keylist_add(&parent_keys
, &n2
->key
);
2295 bch_btree_node_write(n2
, &cl
);
2296 mutex_unlock(&n2
->write_lock
);
2297 rw_unlock(true, n2
);
2299 trace_bcache_btree_node_compact(b
, btree_bset_first(n1
)->keys
);
2301 mutex_lock(&n1
->write_lock
);
2302 bch_btree_insert_keys(n1
, op
, insert_keys
, replace_key
);
2305 bch_keylist_add(&parent_keys
, &n1
->key
);
2306 bch_btree_node_write(n1
, &cl
);
2307 mutex_unlock(&n1
->write_lock
);
2310 /* Depth increases, make a new root */
2311 mutex_lock(&n3
->write_lock
);
2312 bkey_copy_key(&n3
->key
, &MAX_KEY
);
2313 bch_btree_insert_keys(n3
, op
, &parent_keys
, NULL
);
2314 bch_btree_node_write(n3
, &cl
);
2315 mutex_unlock(&n3
->write_lock
);
2318 bch_btree_set_root(n3
);
2319 rw_unlock(true, n3
);
2320 } else if (!b
->parent
) {
2321 /* Root filled up but didn't need to be split */
2323 bch_btree_set_root(n1
);
2325 /* Split a non root node */
2327 make_btree_freeing_key(b
, parent_keys
.top
);
2328 bch_keylist_push(&parent_keys
);
2330 bch_btree_insert_node(b
->parent
, op
, &parent_keys
, NULL
, NULL
);
2331 BUG_ON(!bch_keylist_empty(&parent_keys
));
2335 rw_unlock(true, n1
);
2337 bch_time_stats_update(&b
->c
->btree_split_time
, start_time
);
2341 bkey_put(b
->c
, &n2
->key
);
2342 btree_node_free(n2
);
2343 rw_unlock(true, n2
);
2345 bkey_put(b
->c
, &n1
->key
);
2346 btree_node_free(n1
);
2347 rw_unlock(true, n1
);
2349 WARN(1, "bcache: btree split failed (level %u)", b
->level
);
2351 if (n3
== ERR_PTR(-EAGAIN
) ||
2352 n2
== ERR_PTR(-EAGAIN
) ||
2353 n1
== ERR_PTR(-EAGAIN
))
2359 static int bch_btree_insert_node(struct btree
*b
, struct btree_op
*op
,
2360 struct keylist
*insert_keys
,
2361 atomic_t
*journal_ref
,
2362 struct bkey
*replace_key
)
2366 BUG_ON(b
->level
&& replace_key
);
2368 closure_init_stack(&cl
);
2370 mutex_lock(&b
->write_lock
);
2372 if (write_block(b
) != btree_bset_last(b
) &&
2373 b
->keys
.last_set_unwritten
)
2374 bch_btree_init_next(b
); /* just wrote a set */
2376 if (bch_keylist_nkeys(insert_keys
) > insert_u64s_remaining(b
)) {
2377 mutex_unlock(&b
->write_lock
);
2381 BUG_ON(write_block(b
) != btree_bset_last(b
));
2383 if (bch_btree_insert_keys(b
, op
, insert_keys
, replace_key
)) {
2385 bch_btree_leaf_dirty(b
, journal_ref
);
2387 bch_btree_node_write(b
, &cl
);
2390 mutex_unlock(&b
->write_lock
);
2392 /* wait for btree node write if necessary, after unlock */
2397 if (current
->bio_list
) {
2398 op
->lock
= b
->c
->root
->level
+ 1;
2400 } else if (op
->lock
<= b
->c
->root
->level
) {
2401 op
->lock
= b
->c
->root
->level
+ 1;
2404 /* Invalidated all iterators */
2405 int ret
= btree_split(b
, op
, insert_keys
, replace_key
);
2407 if (bch_keylist_empty(insert_keys
))
2415 int bch_btree_insert_check_key(struct btree
*b
, struct btree_op
*op
,
2416 struct bkey
*check_key
)
2419 uint64_t btree_ptr
= b
->key
.ptr
[0];
2420 unsigned long seq
= b
->seq
;
2421 struct keylist insert
;
2422 bool upgrade
= op
->lock
== -1;
2424 bch_keylist_init(&insert
);
2427 rw_unlock(false, b
);
2428 rw_lock(true, b
, b
->level
);
2430 if (b
->key
.ptr
[0] != btree_ptr
||
2431 b
->seq
!= seq
+ 1) {
2432 op
->lock
= b
->level
;
2437 SET_KEY_PTRS(check_key
, 1);
2438 get_random_bytes(&check_key
->ptr
[0], sizeof(uint64_t));
2440 SET_PTR_DEV(check_key
, 0, PTR_CHECK_DEV
);
2442 bch_keylist_add(&insert
, check_key
);
2444 ret
= bch_btree_insert_node(b
, op
, &insert
, NULL
, NULL
);
2446 BUG_ON(!ret
&& !bch_keylist_empty(&insert
));
2449 downgrade_write(&b
->lock
);
2453 struct btree_insert_op
{
2455 struct keylist
*keys
;
2456 atomic_t
*journal_ref
;
2457 struct bkey
*replace_key
;
2460 static int btree_insert_fn(struct btree_op
*b_op
, struct btree
*b
)
2462 struct btree_insert_op
*op
= container_of(b_op
,
2463 struct btree_insert_op
, op
);
2465 int ret
= bch_btree_insert_node(b
, &op
->op
, op
->keys
,
2466 op
->journal_ref
, op
->replace_key
);
2467 if (ret
&& !bch_keylist_empty(op
->keys
))
2473 int bch_btree_insert(struct cache_set
*c
, struct keylist
*keys
,
2474 atomic_t
*journal_ref
, struct bkey
*replace_key
)
2476 struct btree_insert_op op
;
2479 BUG_ON(current
->bio_list
);
2480 BUG_ON(bch_keylist_empty(keys
));
2482 bch_btree_op_init(&op
.op
, 0);
2484 op
.journal_ref
= journal_ref
;
2485 op
.replace_key
= replace_key
;
2487 while (!ret
&& !bch_keylist_empty(keys
)) {
2489 ret
= bch_btree_map_leaf_nodes(&op
.op
, c
,
2490 &START_KEY(keys
->keys
),
2497 pr_err("error %i\n", ret
);
2499 while ((k
= bch_keylist_pop(keys
)))
2501 } else if (op
.op
.insert_collision
)
2507 void bch_btree_set_root(struct btree
*b
)
2512 closure_init_stack(&cl
);
2514 trace_bcache_btree_set_root(b
);
2516 BUG_ON(!b
->written
);
2518 for (i
= 0; i
< KEY_PTRS(&b
->key
); i
++)
2519 BUG_ON(PTR_BUCKET(b
->c
, &b
->key
, i
)->prio
!= BTREE_PRIO
);
2521 mutex_lock(&b
->c
->bucket_lock
);
2522 list_del_init(&b
->list
);
2523 mutex_unlock(&b
->c
->bucket_lock
);
2527 bch_journal_meta(b
->c
, &cl
);
2531 /* Map across nodes or keys */
2533 static int bch_btree_map_nodes_recurse(struct btree
*b
, struct btree_op
*op
,
2535 btree_map_nodes_fn
*fn
, int flags
)
2537 int ret
= MAP_CONTINUE
;
2541 struct btree_iter iter
;
2543 bch_btree_iter_init(&b
->keys
, &iter
, from
);
2545 while ((k
= bch_btree_iter_next_filter(&iter
, &b
->keys
,
2547 ret
= bcache_btree(map_nodes_recurse
, k
, b
,
2548 op
, from
, fn
, flags
);
2551 if (ret
!= MAP_CONTINUE
)
2556 if (!b
->level
|| flags
== MAP_ALL_NODES
)
2562 int __bch_btree_map_nodes(struct btree_op
*op
, struct cache_set
*c
,
2563 struct bkey
*from
, btree_map_nodes_fn
*fn
, int flags
)
2565 return bcache_btree_root(map_nodes_recurse
, c
, op
, from
, fn
, flags
);
2568 int bch_btree_map_keys_recurse(struct btree
*b
, struct btree_op
*op
,
2569 struct bkey
*from
, btree_map_keys_fn
*fn
,
2572 int ret
= MAP_CONTINUE
;
2574 struct btree_iter iter
;
2576 bch_btree_iter_init(&b
->keys
, &iter
, from
);
2578 while ((k
= bch_btree_iter_next_filter(&iter
, &b
->keys
, bch_ptr_bad
))) {
2581 : bcache_btree(map_keys_recurse
, k
,
2582 b
, op
, from
, fn
, flags
);
2585 if (ret
!= MAP_CONTINUE
)
2589 if (!b
->level
&& (flags
& MAP_END_KEY
))
2590 ret
= fn(op
, b
, &KEY(KEY_INODE(&b
->key
),
2591 KEY_OFFSET(&b
->key
), 0));
2596 int bch_btree_map_keys(struct btree_op
*op
, struct cache_set
*c
,
2597 struct bkey
*from
, btree_map_keys_fn
*fn
, int flags
)
2599 return bcache_btree_root(map_keys_recurse
, c
, op
, from
, fn
, flags
);
2604 static inline int keybuf_cmp(struct keybuf_key
*l
, struct keybuf_key
*r
)
2606 /* Overlapping keys compare equal */
2607 if (bkey_cmp(&l
->key
, &START_KEY(&r
->key
)) <= 0)
2609 if (bkey_cmp(&START_KEY(&l
->key
), &r
->key
) >= 0)
2614 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key
*l
,
2615 struct keybuf_key
*r
)
2617 return clamp_t(int64_t, bkey_cmp(&l
->key
, &r
->key
), -1, 1);
2622 unsigned int nr_found
;
2625 keybuf_pred_fn
*pred
;
2628 static int refill_keybuf_fn(struct btree_op
*op
, struct btree
*b
,
2631 struct refill
*refill
= container_of(op
, struct refill
, op
);
2632 struct keybuf
*buf
= refill
->buf
;
2633 int ret
= MAP_CONTINUE
;
2635 if (bkey_cmp(k
, refill
->end
) > 0) {
2640 if (!KEY_SIZE(k
)) /* end key */
2643 if (refill
->pred(buf
, k
)) {
2644 struct keybuf_key
*w
;
2646 spin_lock(&buf
->lock
);
2648 w
= array_alloc(&buf
->freelist
);
2650 spin_unlock(&buf
->lock
);
2655 bkey_copy(&w
->key
, k
);
2657 if (RB_INSERT(&buf
->keys
, w
, node
, keybuf_cmp
))
2658 array_free(&buf
->freelist
, w
);
2662 if (array_freelist_empty(&buf
->freelist
))
2665 spin_unlock(&buf
->lock
);
2668 buf
->last_scanned
= *k
;
2672 void bch_refill_keybuf(struct cache_set
*c
, struct keybuf
*buf
,
2673 struct bkey
*end
, keybuf_pred_fn
*pred
)
2675 struct bkey start
= buf
->last_scanned
;
2676 struct refill refill
;
2680 bch_btree_op_init(&refill
.op
, -1);
2681 refill
.nr_found
= 0;
2686 bch_btree_map_keys(&refill
.op
, c
, &buf
->last_scanned
,
2687 refill_keybuf_fn
, MAP_END_KEY
);
2689 trace_bcache_keyscan(refill
.nr_found
,
2690 KEY_INODE(&start
), KEY_OFFSET(&start
),
2691 KEY_INODE(&buf
->last_scanned
),
2692 KEY_OFFSET(&buf
->last_scanned
));
2694 spin_lock(&buf
->lock
);
2696 if (!RB_EMPTY_ROOT(&buf
->keys
)) {
2697 struct keybuf_key
*w
;
2699 w
= RB_FIRST(&buf
->keys
, struct keybuf_key
, node
);
2700 buf
->start
= START_KEY(&w
->key
);
2702 w
= RB_LAST(&buf
->keys
, struct keybuf_key
, node
);
2705 buf
->start
= MAX_KEY
;
2709 spin_unlock(&buf
->lock
);
2712 static void __bch_keybuf_del(struct keybuf
*buf
, struct keybuf_key
*w
)
2714 rb_erase(&w
->node
, &buf
->keys
);
2715 array_free(&buf
->freelist
, w
);
2718 void bch_keybuf_del(struct keybuf
*buf
, struct keybuf_key
*w
)
2720 spin_lock(&buf
->lock
);
2721 __bch_keybuf_del(buf
, w
);
2722 spin_unlock(&buf
->lock
);
2725 bool bch_keybuf_check_overlapping(struct keybuf
*buf
, struct bkey
*start
,
2729 struct keybuf_key
*p
, *w
, s
;
2733 if (bkey_cmp(end
, &buf
->start
) <= 0 ||
2734 bkey_cmp(start
, &buf
->end
) >= 0)
2737 spin_lock(&buf
->lock
);
2738 w
= RB_GREATER(&buf
->keys
, s
, node
, keybuf_nonoverlapping_cmp
);
2740 while (w
&& bkey_cmp(&START_KEY(&w
->key
), end
) < 0) {
2742 w
= RB_NEXT(w
, node
);
2747 __bch_keybuf_del(buf
, p
);
2750 spin_unlock(&buf
->lock
);
2754 struct keybuf_key
*bch_keybuf_next(struct keybuf
*buf
)
2756 struct keybuf_key
*w
;
2758 spin_lock(&buf
->lock
);
2760 w
= RB_FIRST(&buf
->keys
, struct keybuf_key
, node
);
2762 while (w
&& w
->private)
2763 w
= RB_NEXT(w
, node
);
2766 w
->private = ERR_PTR(-EINTR
);
2768 spin_unlock(&buf
->lock
);
2772 struct keybuf_key
*bch_keybuf_next_rescan(struct cache_set
*c
,
2775 keybuf_pred_fn
*pred
)
2777 struct keybuf_key
*ret
;
2780 ret
= bch_keybuf_next(buf
);
2784 if (bkey_cmp(&buf
->last_scanned
, end
) >= 0) {
2785 pr_debug("scan finished\n");
2789 bch_refill_keybuf(c
, buf
, end
, pred
);
2795 void bch_keybuf_init(struct keybuf
*buf
)
2797 buf
->last_scanned
= MAX_KEY
;
2798 buf
->keys
= RB_ROOT
;
2800 spin_lock_init(&buf
->lock
);
2801 array_allocator_init(&buf
->freelist
);
2804 void bch_btree_exit(void)
2807 destroy_workqueue(btree_io_wq
);
2810 int __init
bch_btree_init(void)
2812 btree_io_wq
= alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM
, 0);