]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - drivers/md/bcache/btree.c
bcache: recover data from backing when data is clean
[thirdparty/kernel/stable.git] / drivers / md / bcache / btree.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
cafe5635
KO
2/*
3 * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4 *
5 * Uses a block device as cache for other block devices; optimized for SSDs.
6 * All allocation is done in buckets, which should match the erase block size
7 * of the device.
8 *
9 * Buckets containing cached data are kept on a heap sorted by priority;
10 * bucket priority is increased on cache hit, and periodically all the buckets
11 * on the heap have their priority scaled down. This currently is just used as
12 * an LRU but in the future should allow for more intelligent heuristics.
13 *
14 * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15 * counter. Garbage collection is used to remove stale pointers.
16 *
17 * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18 * as keys are inserted we only sort the pages that have not yet been written.
19 * When garbage collection is run, we resort the entire node.
20 *
21 * All configuration is done via sysfs; see Documentation/bcache.txt.
22 */
23
24#include "bcache.h"
25#include "btree.h"
26#include "debug.h"
65d45231 27#include "extents.h"
cafe5635
KO
28
29#include <linux/slab.h>
30#include <linux/bitops.h>
31#include <linux/hash.h>
72a44517 32#include <linux/kthread.h>
cd953ed0 33#include <linux/prefetch.h>
cafe5635
KO
34#include <linux/random.h>
35#include <linux/rcupdate.h>
e6017571 36#include <linux/sched/clock.h>
b2d09103
IM
37#include <linux/rculist.h>
38
cafe5635
KO
39#include <trace/events/bcache.h>
40
41/*
42 * Todo:
43 * register_bcache: Return errors out to userspace correctly
44 *
45 * Writeback: don't undirty key until after a cache flush
46 *
47 * Create an iterator for key pointers
48 *
49 * On btree write error, mark bucket such that it won't be freed from the cache
50 *
51 * Journalling:
52 * Check for bad keys in replay
53 * Propagate barriers
54 * Refcount journal entries in journal_replay
55 *
56 * Garbage collection:
57 * Finish incremental gc
58 * Gc should free old UUIDs, data for invalid UUIDs
59 *
60 * Provide a way to list backing device UUIDs we have data cached for, and
61 * probably how long it's been since we've seen them, and a way to invalidate
62 * dirty data for devices that will never be attached again
63 *
64 * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65 * that based on that and how much dirty data we have we can keep writeback
66 * from being starved
67 *
68 * Add a tracepoint or somesuch to watch for writeback starvation
69 *
70 * When btree depth > 1 and splitting an interior node, we have to make sure
71 * alloc_bucket() cannot fail. This should be true but is not completely
72 * obvious.
73 *
cafe5635
KO
74 * Plugging?
75 *
76 * If data write is less than hard sector size of ssd, round up offset in open
77 * bucket to the next whole sector
78 *
cafe5635
KO
79 * Superblock needs to be fleshed out for multiple cache devices
80 *
81 * Add a sysfs tunable for the number of writeback IOs in flight
82 *
83 * Add a sysfs tunable for the number of open data buckets
84 *
85 * IO tracking: Can we track when one process is doing io on behalf of another?
86 * IO tracking: Don't use just an average, weigh more recent stuff higher
87 *
88 * Test module load/unload
89 */
90
cafe5635
KO
91#define MAX_NEED_GC 64
92#define MAX_SAVE_PRIO 72
93
94#define PTR_DIRTY_BIT (((uint64_t) 1 << 36))
95
96#define PTR_HASH(c, k) \
97 (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
98
df8e8970
KO
99#define insert_lock(s, b) ((b)->level <= (s)->lock)
100
101/*
102 * These macros are for recursing down the btree - they handle the details of
103 * locking and looking up nodes in the cache for you. They're best treated as
104 * mere syntax when reading code that uses them.
105 *
106 * op->lock determines whether we take a read or a write lock at a given depth.
107 * If you've got a read lock and find that you need a write lock (i.e. you're
108 * going to have to split), set op->lock and return -EINTR; btree_root() will
109 * call you again and you'll have the correct lock.
110 */
111
112/**
113 * btree - recurse down the btree on a specified key
114 * @fn: function to call, which will be passed the child node
115 * @key: key to recurse on
116 * @b: parent btree node
117 * @op: pointer to struct btree_op
118 */
119#define btree(fn, key, b, op, ...) \
120({ \
121 int _r, l = (b)->level - 1; \
122 bool _w = l <= (op)->lock; \
2452cc89
SP
123 struct btree *_child = bch_btree_node_get((b)->c, op, key, l, \
124 _w, b); \
df8e8970 125 if (!IS_ERR(_child)) { \
df8e8970
KO
126 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__); \
127 rw_unlock(_w, _child); \
128 } else \
129 _r = PTR_ERR(_child); \
130 _r; \
131})
132
133/**
134 * btree_root - call a function on the root of the btree
135 * @fn: function to call, which will be passed the child node
136 * @c: cache set
137 * @op: pointer to struct btree_op
138 */
139#define btree_root(fn, c, op, ...) \
140({ \
141 int _r = -EINTR; \
142 do { \
143 struct btree *_b = (c)->root; \
144 bool _w = insert_lock(op, _b); \
145 rw_lock(_w, _b, _b->level); \
146 if (_b == (c)->root && \
147 _w == insert_lock(op, _b)) { \
df8e8970
KO
148 _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__); \
149 } \
150 rw_unlock(_w, _b); \
0a63b66d 151 bch_cannibalize_unlock(c); \
78365411
KO
152 if (_r == -EINTR) \
153 schedule(); \
df8e8970
KO
154 } while (_r == -EINTR); \
155 \
0a63b66d 156 finish_wait(&(c)->btree_cache_wait, &(op)->wait); \
df8e8970
KO
157 _r; \
158})
159
a85e968e
KO
160static inline struct bset *write_block(struct btree *b)
161{
162 return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
163}
164
2a285686
KO
165static void bch_btree_init_next(struct btree *b)
166{
167 /* If not a leaf node, always sort */
168 if (b->level && b->keys.nsets)
169 bch_btree_sort(&b->keys, &b->c->sort);
170 else
171 bch_btree_sort_lazy(&b->keys, &b->c->sort);
172
173 if (b->written < btree_blocks(b))
174 bch_bset_init_next(&b->keys, write_block(b),
175 bset_magic(&b->c->sb));
176
177}
178
cafe5635
KO
179/* Btree key manipulation */
180
3a3b6a4e 181void bkey_put(struct cache_set *c, struct bkey *k)
e7c590eb
KO
182{
183 unsigned i;
184
185 for (i = 0; i < KEY_PTRS(k); i++)
186 if (ptr_available(c, k, i))
187 atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
188}
189
cafe5635
KO
190/* Btree IO */
191
192static uint64_t btree_csum_set(struct btree *b, struct bset *i)
193{
194 uint64_t crc = b->key.ptr[0];
fafff81c 195 void *data = (void *) i + 8, *end = bset_bkey_last(i);
cafe5635 196
169ef1cf 197 crc = bch_crc64_update(crc, data, end - data);
c19ed23a 198 return crc ^ 0xffffffffffffffffULL;
cafe5635
KO
199}
200
78b77bf8 201void bch_btree_node_read_done(struct btree *b)
cafe5635 202{
cafe5635 203 const char *err = "bad btree header";
ee811287 204 struct bset *i = btree_bset_first(b);
57943511 205 struct btree_iter *iter;
cafe5635 206
bcf090e0 207 iter = mempool_alloc(b->c->fill_iter, GFP_NOIO);
57943511 208 iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
cafe5635
KO
209 iter->used = 0;
210
280481d0 211#ifdef CONFIG_BCACHE_DEBUG
c052dd9a 212 iter->b = &b->keys;
280481d0
KO
213#endif
214
57943511 215 if (!i->seq)
cafe5635
KO
216 goto err;
217
218 for (;
a85e968e 219 b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
cafe5635
KO
220 i = write_block(b)) {
221 err = "unsupported bset version";
222 if (i->version > BCACHE_BSET_VERSION)
223 goto err;
224
225 err = "bad btree header";
ee811287
KO
226 if (b->written + set_blocks(i, block_bytes(b->c)) >
227 btree_blocks(b))
cafe5635
KO
228 goto err;
229
230 err = "bad magic";
81ab4190 231 if (i->magic != bset_magic(&b->c->sb))
cafe5635
KO
232 goto err;
233
234 err = "bad checksum";
235 switch (i->version) {
236 case 0:
237 if (i->csum != csum_set(i))
238 goto err;
239 break;
240 case BCACHE_BSET_VERSION:
241 if (i->csum != btree_csum_set(b, i))
242 goto err;
243 break;
244 }
245
246 err = "empty set";
a85e968e 247 if (i != b->keys.set[0].data && !i->keys)
cafe5635
KO
248 goto err;
249
fafff81c 250 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
cafe5635 251
ee811287 252 b->written += set_blocks(i, block_bytes(b->c));
cafe5635
KO
253 }
254
255 err = "corrupted btree";
256 for (i = write_block(b);
a85e968e 257 bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
cafe5635 258 i = ((void *) i) + block_bytes(b->c))
a85e968e 259 if (i->seq == b->keys.set[0].data->seq)
cafe5635
KO
260 goto err;
261
a85e968e 262 bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
cafe5635 263
a85e968e 264 i = b->keys.set[0].data;
cafe5635 265 err = "short btree key";
a85e968e
KO
266 if (b->keys.set[0].size &&
267 bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
cafe5635
KO
268 goto err;
269
270 if (b->written < btree_blocks(b))
a85e968e
KO
271 bch_bset_init_next(&b->keys, write_block(b),
272 bset_magic(&b->c->sb));
cafe5635 273out:
57943511
KO
274 mempool_free(iter, b->c->fill_iter);
275 return;
cafe5635
KO
276err:
277 set_btree_node_io_error(b);
88b9f8c4 278 bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
cafe5635 279 err, PTR_BUCKET_NR(b->c, &b->key, 0),
88b9f8c4 280 bset_block_offset(b, i), i->keys);
cafe5635
KO
281 goto out;
282}
283
4246a0b6 284static void btree_node_read_endio(struct bio *bio)
cafe5635 285{
57943511
KO
286 struct closure *cl = bio->bi_private;
287 closure_put(cl);
288}
cafe5635 289
78b77bf8 290static void bch_btree_node_read(struct btree *b)
57943511
KO
291{
292 uint64_t start_time = local_clock();
293 struct closure cl;
294 struct bio *bio;
cafe5635 295
c37511b8
KO
296 trace_bcache_btree_read(b);
297
57943511 298 closure_init_stack(&cl);
cafe5635 299
57943511 300 bio = bch_bbio_alloc(b->c);
4f024f37 301 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
57943511
KO
302 bio->bi_end_io = btree_node_read_endio;
303 bio->bi_private = &cl;
70fd7614 304 bio->bi_opf = REQ_OP_READ | REQ_META;
cafe5635 305
a85e968e 306 bch_bio_map(bio, b->keys.set[0].data);
cafe5635 307
57943511
KO
308 bch_submit_bbio(bio, b->c, &b->key, 0);
309 closure_sync(&cl);
cafe5635 310
4e4cbee9 311 if (bio->bi_status)
57943511
KO
312 set_btree_node_io_error(b);
313
314 bch_bbio_free(bio, b->c);
315
316 if (btree_node_io_error(b))
317 goto err;
318
319 bch_btree_node_read_done(b);
57943511 320 bch_time_stats_update(&b->c->btree_read_time, start_time);
57943511
KO
321
322 return;
323err:
61cbd250 324 bch_cache_set_error(b->c, "io error reading bucket %zu",
57943511 325 PTR_BUCKET_NR(b->c, &b->key, 0));
cafe5635
KO
326}
327
328static void btree_complete_write(struct btree *b, struct btree_write *w)
329{
330 if (w->prio_blocked &&
331 !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
119ba0f8 332 wake_up_allocators(b->c);
cafe5635
KO
333
334 if (w->journal) {
335 atomic_dec_bug(w->journal);
336 __closure_wake_up(&b->c->journal.wait);
337 }
338
cafe5635
KO
339 w->prio_blocked = 0;
340 w->journal = NULL;
cafe5635
KO
341}
342
cb7a583e
KO
343static void btree_node_write_unlock(struct closure *cl)
344{
345 struct btree *b = container_of(cl, struct btree, io);
346
347 up(&b->io_mutex);
348}
349
57943511 350static void __btree_node_write_done(struct closure *cl)
cafe5635 351{
cb7a583e 352 struct btree *b = container_of(cl, struct btree, io);
cafe5635
KO
353 struct btree_write *w = btree_prev_write(b);
354
355 bch_bbio_free(b->bio, b->c);
356 b->bio = NULL;
357 btree_complete_write(b, w);
358
359 if (btree_node_dirty(b))
56b30770 360 schedule_delayed_work(&b->work, 30 * HZ);
cafe5635 361
cb7a583e 362 closure_return_with_destructor(cl, btree_node_write_unlock);
cafe5635
KO
363}
364
57943511 365static void btree_node_write_done(struct closure *cl)
cafe5635 366{
cb7a583e 367 struct btree *b = container_of(cl, struct btree, io);
cafe5635 368
491221f8 369 bio_free_pages(b->bio);
57943511 370 __btree_node_write_done(cl);
cafe5635
KO
371}
372
4246a0b6 373static void btree_node_write_endio(struct bio *bio)
57943511
KO
374{
375 struct closure *cl = bio->bi_private;
cb7a583e 376 struct btree *b = container_of(cl, struct btree, io);
57943511 377
4e4cbee9 378 if (bio->bi_status)
57943511
KO
379 set_btree_node_io_error(b);
380
4e4cbee9 381 bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
57943511
KO
382 closure_put(cl);
383}
384
385static void do_btree_node_write(struct btree *b)
cafe5635 386{
cb7a583e 387 struct closure *cl = &b->io;
ee811287 388 struct bset *i = btree_bset_last(b);
cafe5635
KO
389 BKEY_PADDED(key) k;
390
391 i->version = BCACHE_BSET_VERSION;
392 i->csum = btree_csum_set(b, i);
393
57943511
KO
394 BUG_ON(b->bio);
395 b->bio = bch_bbio_alloc(b->c);
396
397 b->bio->bi_end_io = btree_node_write_endio;
faadf0c9 398 b->bio->bi_private = cl;
ee811287 399 b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
70fd7614 400 b->bio->bi_opf = REQ_OP_WRITE | REQ_META | REQ_FUA;
169ef1cf 401 bch_bio_map(b->bio, i);
cafe5635 402
e49c7c37
KO
403 /*
404 * If we're appending to a leaf node, we don't technically need FUA -
405 * this write just needs to be persisted before the next journal write,
406 * which will be marked FLUSH|FUA.
407 *
408 * Similarly if we're writing a new btree root - the pointer is going to
409 * be in the next journal entry.
410 *
411 * But if we're writing a new btree node (that isn't a root) or
412 * appending to a non leaf btree node, we need either FUA or a flush
413 * when we write the parent with the new pointer. FUA is cheaper than a
414 * flush, and writes appending to leaf nodes aren't blocking anything so
415 * just make all btree node writes FUA to keep things sane.
416 */
417
cafe5635 418 bkey_copy(&k.key, &b->key);
ee811287 419 SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
a85e968e 420 bset_sector_offset(&b->keys, i));
cafe5635 421
501d52a9 422 if (!bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
cafe5635
KO
423 int j;
424 struct bio_vec *bv;
425 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
426
7988613b 427 bio_for_each_segment_all(bv, b->bio, j)
cafe5635
KO
428 memcpy(page_address(bv->bv_page),
429 base + j * PAGE_SIZE, PAGE_SIZE);
430
cafe5635
KO
431 bch_submit_bbio(b->bio, b->c, &k.key, 0);
432
57943511 433 continue_at(cl, btree_node_write_done, NULL);
cafe5635
KO
434 } else {
435 b->bio->bi_vcnt = 0;
169ef1cf 436 bch_bio_map(b->bio, i);
cafe5635 437
cafe5635
KO
438 bch_submit_bbio(b->bio, b->c, &k.key, 0);
439
440 closure_sync(cl);
cb7a583e 441 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
cafe5635
KO
442 }
443}
444
2a285686 445void __bch_btree_node_write(struct btree *b, struct closure *parent)
cafe5635 446{
ee811287 447 struct bset *i = btree_bset_last(b);
cafe5635 448
2a285686
KO
449 lockdep_assert_held(&b->write_lock);
450
c37511b8
KO
451 trace_bcache_btree_write(b);
452
cafe5635 453 BUG_ON(current->bio_list);
57943511
KO
454 BUG_ON(b->written >= btree_blocks(b));
455 BUG_ON(b->written && !i->keys);
ee811287 456 BUG_ON(btree_bset_first(b)->seq != i->seq);
dc9d98d6 457 bch_check_keys(&b->keys, "writing");
cafe5635 458
cafe5635
KO
459 cancel_delayed_work(&b->work);
460
57943511 461 /* If caller isn't waiting for write, parent refcount is cache set */
cb7a583e
KO
462 down(&b->io_mutex);
463 closure_init(&b->io, parent ?: &b->c->cl);
57943511 464
cafe5635
KO
465 clear_bit(BTREE_NODE_dirty, &b->flags);
466 change_bit(BTREE_NODE_write_idx, &b->flags);
467
57943511 468 do_btree_node_write(b);
cafe5635 469
ee811287 470 atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
cafe5635
KO
471 &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
472
a85e968e 473 b->written += set_blocks(i, block_bytes(b->c));
2a285686 474}
a85e968e 475
2a285686
KO
476void bch_btree_node_write(struct btree *b, struct closure *parent)
477{
478 unsigned nsets = b->keys.nsets;
479
480 lockdep_assert_held(&b->lock);
481
482 __bch_btree_node_write(b, parent);
cafe5635 483
78b77bf8
KO
484 /*
485 * do verify if there was more than one set initially (i.e. we did a
486 * sort) and we sorted down to a single set:
487 */
2a285686 488 if (nsets && !b->keys.nsets)
78b77bf8
KO
489 bch_btree_verify(b);
490
2a285686 491 bch_btree_init_next(b);
cafe5635
KO
492}
493
f269af5a
KO
494static void bch_btree_node_write_sync(struct btree *b)
495{
496 struct closure cl;
497
498 closure_init_stack(&cl);
2a285686
KO
499
500 mutex_lock(&b->write_lock);
f269af5a 501 bch_btree_node_write(b, &cl);
2a285686
KO
502 mutex_unlock(&b->write_lock);
503
f269af5a
KO
504 closure_sync(&cl);
505}
506
57943511 507static void btree_node_write_work(struct work_struct *w)
cafe5635
KO
508{
509 struct btree *b = container_of(to_delayed_work(w), struct btree, work);
510
2a285686 511 mutex_lock(&b->write_lock);
cafe5635 512 if (btree_node_dirty(b))
2a285686
KO
513 __bch_btree_node_write(b, NULL);
514 mutex_unlock(&b->write_lock);
cafe5635
KO
515}
516
c18536a7 517static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
cafe5635 518{
ee811287 519 struct bset *i = btree_bset_last(b);
cafe5635
KO
520 struct btree_write *w = btree_current_write(b);
521
2a285686
KO
522 lockdep_assert_held(&b->write_lock);
523
57943511
KO
524 BUG_ON(!b->written);
525 BUG_ON(!i->keys);
cafe5635 526
57943511 527 if (!btree_node_dirty(b))
56b30770 528 schedule_delayed_work(&b->work, 30 * HZ);
cafe5635 529
57943511 530 set_btree_node_dirty(b);
cafe5635 531
c18536a7 532 if (journal_ref) {
cafe5635 533 if (w->journal &&
c18536a7 534 journal_pin_cmp(b->c, w->journal, journal_ref)) {
cafe5635
KO
535 atomic_dec_bug(w->journal);
536 w->journal = NULL;
537 }
538
539 if (!w->journal) {
c18536a7 540 w->journal = journal_ref;
cafe5635
KO
541 atomic_inc(w->journal);
542 }
543 }
544
cafe5635 545 /* Force write if set is too big */
57943511
KO
546 if (set_bytes(i) > PAGE_SIZE - 48 &&
547 !current->bio_list)
548 bch_btree_node_write(b, NULL);
cafe5635
KO
549}
550
551/*
552 * Btree in memory cache - allocation/freeing
553 * mca -> memory cache
554 */
555
cafe5635
KO
556#define mca_reserve(c) (((c->root && c->root->level) \
557 ? c->root->level : 1) * 8 + 16)
558#define mca_can_free(c) \
0a63b66d 559 max_t(int, 0, c->btree_cache_used - mca_reserve(c))
cafe5635
KO
560
561static void mca_data_free(struct btree *b)
562{
cb7a583e 563 BUG_ON(b->io_mutex.count != 1);
cafe5635 564
a85e968e 565 bch_btree_keys_free(&b->keys);
cafe5635 566
0a63b66d 567 b->c->btree_cache_used--;
ee811287 568 list_move(&b->list, &b->c->btree_cache_freed);
cafe5635
KO
569}
570
571static void mca_bucket_free(struct btree *b)
572{
573 BUG_ON(btree_node_dirty(b));
574
575 b->key.ptr[0] = 0;
576 hlist_del_init_rcu(&b->hash);
577 list_move(&b->list, &b->c->btree_cache_freeable);
578}
579
580static unsigned btree_order(struct bkey *k)
581{
582 return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
583}
584
585static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
586{
a85e968e 587 if (!bch_btree_keys_alloc(&b->keys,
ee811287
KO
588 max_t(unsigned,
589 ilog2(b->c->btree_pages),
590 btree_order(k)),
591 gfp)) {
0a63b66d 592 b->c->btree_cache_used++;
ee811287
KO
593 list_move(&b->list, &b->c->btree_cache);
594 } else {
595 list_move(&b->list, &b->c->btree_cache_freed);
596 }
cafe5635
KO
597}
598
599static struct btree *mca_bucket_alloc(struct cache_set *c,
600 struct bkey *k, gfp_t gfp)
601{
602 struct btree *b = kzalloc(sizeof(struct btree), gfp);
603 if (!b)
604 return NULL;
605
606 init_rwsem(&b->lock);
607 lockdep_set_novalidate_class(&b->lock);
2a285686
KO
608 mutex_init(&b->write_lock);
609 lockdep_set_novalidate_class(&b->write_lock);
cafe5635 610 INIT_LIST_HEAD(&b->list);
57943511 611 INIT_DELAYED_WORK(&b->work, btree_node_write_work);
cafe5635 612 b->c = c;
cb7a583e 613 sema_init(&b->io_mutex, 1);
cafe5635
KO
614
615 mca_data_alloc(b, k, gfp);
616 return b;
617}
618
e8e1d468 619static int mca_reap(struct btree *b, unsigned min_order, bool flush)
cafe5635 620{
e8e1d468
KO
621 struct closure cl;
622
623 closure_init_stack(&cl);
cafe5635
KO
624 lockdep_assert_held(&b->c->bucket_lock);
625
626 if (!down_write_trylock(&b->lock))
627 return -ENOMEM;
628
a85e968e 629 BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
e8e1d468 630
a85e968e 631 if (b->keys.page_order < min_order)
cb7a583e
KO
632 goto out_unlock;
633
634 if (!flush) {
635 if (btree_node_dirty(b))
636 goto out_unlock;
637
638 if (down_trylock(&b->io_mutex))
639 goto out_unlock;
640 up(&b->io_mutex);
cafe5635
KO
641 }
642
2a285686 643 mutex_lock(&b->write_lock);
f269af5a 644 if (btree_node_dirty(b))
2a285686
KO
645 __bch_btree_node_write(b, &cl);
646 mutex_unlock(&b->write_lock);
647
648 closure_sync(&cl);
cafe5635 649
e8e1d468 650 /* wait for any in flight btree write */
cb7a583e
KO
651 down(&b->io_mutex);
652 up(&b->io_mutex);
e8e1d468 653
cafe5635 654 return 0;
cb7a583e
KO
655out_unlock:
656 rw_unlock(true, b);
657 return -ENOMEM;
cafe5635
KO
658}
659
7dc19d5a
DC
660static unsigned long bch_mca_scan(struct shrinker *shrink,
661 struct shrink_control *sc)
cafe5635
KO
662{
663 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
664 struct btree *b, *t;
665 unsigned long i, nr = sc->nr_to_scan;
7dc19d5a 666 unsigned long freed = 0;
cafe5635
KO
667
668 if (c->shrinker_disabled)
7dc19d5a 669 return SHRINK_STOP;
cafe5635 670
0a63b66d 671 if (c->btree_cache_alloc_lock)
7dc19d5a 672 return SHRINK_STOP;
cafe5635
KO
673
674 /* Return -1 if we can't do anything right now */
a698e08c 675 if (sc->gfp_mask & __GFP_IO)
cafe5635
KO
676 mutex_lock(&c->bucket_lock);
677 else if (!mutex_trylock(&c->bucket_lock))
678 return -1;
679
36c9ea98
KO
680 /*
681 * It's _really_ critical that we don't free too many btree nodes - we
682 * have to always leave ourselves a reserve. The reserve is how we
683 * guarantee that allocating memory for a new btree node can always
684 * succeed, so that inserting keys into the btree can always succeed and
685 * IO can always make forward progress:
686 */
cafe5635
KO
687 nr /= c->btree_pages;
688 nr = min_t(unsigned long, nr, mca_can_free(c));
689
690 i = 0;
691 list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
7dc19d5a 692 if (freed >= nr)
cafe5635
KO
693 break;
694
695 if (++i > 3 &&
e8e1d468 696 !mca_reap(b, 0, false)) {
cafe5635
KO
697 mca_data_free(b);
698 rw_unlock(true, b);
7dc19d5a 699 freed++;
cafe5635
KO
700 }
701 }
702
0a63b66d 703 for (i = 0; (nr--) && i < c->btree_cache_used; i++) {
b0f32a56
KO
704 if (list_empty(&c->btree_cache))
705 goto out;
706
cafe5635
KO
707 b = list_first_entry(&c->btree_cache, struct btree, list);
708 list_rotate_left(&c->btree_cache);
709
710 if (!b->accessed &&
e8e1d468 711 !mca_reap(b, 0, false)) {
cafe5635
KO
712 mca_bucket_free(b);
713 mca_data_free(b);
714 rw_unlock(true, b);
7dc19d5a 715 freed++;
cafe5635
KO
716 } else
717 b->accessed = 0;
718 }
719out:
cafe5635 720 mutex_unlock(&c->bucket_lock);
7dc19d5a
DC
721 return freed;
722}
723
724static unsigned long bch_mca_count(struct shrinker *shrink,
725 struct shrink_control *sc)
726{
727 struct cache_set *c = container_of(shrink, struct cache_set, shrink);
728
729 if (c->shrinker_disabled)
730 return 0;
731
0a63b66d 732 if (c->btree_cache_alloc_lock)
7dc19d5a
DC
733 return 0;
734
735 return mca_can_free(c) * c->btree_pages;
cafe5635
KO
736}
737
738void bch_btree_cache_free(struct cache_set *c)
739{
740 struct btree *b;
741 struct closure cl;
742 closure_init_stack(&cl);
743
744 if (c->shrink.list.next)
745 unregister_shrinker(&c->shrink);
746
747 mutex_lock(&c->bucket_lock);
748
749#ifdef CONFIG_BCACHE_DEBUG
750 if (c->verify_data)
751 list_move(&c->verify_data->list, &c->btree_cache);
78b77bf8
KO
752
753 free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
cafe5635
KO
754#endif
755
756 list_splice(&c->btree_cache_freeable,
757 &c->btree_cache);
758
759 while (!list_empty(&c->btree_cache)) {
760 b = list_first_entry(&c->btree_cache, struct btree, list);
761
762 if (btree_node_dirty(b))
763 btree_complete_write(b, btree_current_write(b));
764 clear_bit(BTREE_NODE_dirty, &b->flags);
765
766 mca_data_free(b);
767 }
768
769 while (!list_empty(&c->btree_cache_freed)) {
770 b = list_first_entry(&c->btree_cache_freed,
771 struct btree, list);
772 list_del(&b->list);
773 cancel_delayed_work_sync(&b->work);
774 kfree(b);
775 }
776
777 mutex_unlock(&c->bucket_lock);
778}
779
780int bch_btree_cache_alloc(struct cache_set *c)
781{
782 unsigned i;
783
cafe5635 784 for (i = 0; i < mca_reserve(c); i++)
72a44517
KO
785 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
786 return -ENOMEM;
cafe5635
KO
787
788 list_splice_init(&c->btree_cache,
789 &c->btree_cache_freeable);
790
791#ifdef CONFIG_BCACHE_DEBUG
792 mutex_init(&c->verify_lock);
793
78b77bf8
KO
794 c->verify_ondisk = (void *)
795 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
796
cafe5635
KO
797 c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
798
799 if (c->verify_data &&
a85e968e 800 c->verify_data->keys.set->data)
cafe5635
KO
801 list_del_init(&c->verify_data->list);
802 else
803 c->verify_data = NULL;
804#endif
805
7dc19d5a
DC
806 c->shrink.count_objects = bch_mca_count;
807 c->shrink.scan_objects = bch_mca_scan;
cafe5635
KO
808 c->shrink.seeks = 4;
809 c->shrink.batch = c->btree_pages * 2;
810 register_shrinker(&c->shrink);
811
812 return 0;
813}
814
815/* Btree in memory cache - hash table */
816
817static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
818{
819 return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
820}
821
822static struct btree *mca_find(struct cache_set *c, struct bkey *k)
823{
824 struct btree *b;
825
826 rcu_read_lock();
827 hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
828 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
829 goto out;
830 b = NULL;
831out:
832 rcu_read_unlock();
833 return b;
834}
835
0a63b66d
KO
836static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
837{
838 struct task_struct *old;
839
840 old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
841 if (old && old != current) {
842 if (op)
843 prepare_to_wait(&c->btree_cache_wait, &op->wait,
844 TASK_UNINTERRUPTIBLE);
845 return -EINTR;
846 }
847
848 return 0;
849}
850
851static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
852 struct bkey *k)
cafe5635 853{
e8e1d468 854 struct btree *b;
cafe5635 855
c37511b8
KO
856 trace_bcache_btree_cache_cannibalize(c);
857
0a63b66d
KO
858 if (mca_cannibalize_lock(c, op))
859 return ERR_PTR(-EINTR);
cafe5635 860
e8e1d468
KO
861 list_for_each_entry_reverse(b, &c->btree_cache, list)
862 if (!mca_reap(b, btree_order(k), false))
863 return b;
cafe5635 864
e8e1d468
KO
865 list_for_each_entry_reverse(b, &c->btree_cache, list)
866 if (!mca_reap(b, btree_order(k), true))
867 return b;
cafe5635 868
0a63b66d 869 WARN(1, "btree cache cannibalize failed\n");
e8e1d468 870 return ERR_PTR(-ENOMEM);
cafe5635
KO
871}
872
873/*
874 * We can only have one thread cannibalizing other cached btree nodes at a time,
875 * or we'll deadlock. We use an open coded mutex to ensure that, which a
876 * cannibalize_bucket() will take. This means every time we unlock the root of
877 * the btree, we need to release this lock if we have it held.
878 */
df8e8970 879static void bch_cannibalize_unlock(struct cache_set *c)
cafe5635 880{
0a63b66d
KO
881 if (c->btree_cache_alloc_lock == current) {
882 c->btree_cache_alloc_lock = NULL;
883 wake_up(&c->btree_cache_wait);
cafe5635
KO
884 }
885}
886
0a63b66d
KO
887static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
888 struct bkey *k, int level)
cafe5635
KO
889{
890 struct btree *b;
891
e8e1d468
KO
892 BUG_ON(current->bio_list);
893
cafe5635
KO
894 lockdep_assert_held(&c->bucket_lock);
895
896 if (mca_find(c, k))
897 return NULL;
898
899 /* btree_free() doesn't free memory; it sticks the node on the end of
900 * the list. Check if there's any freed nodes there:
901 */
902 list_for_each_entry(b, &c->btree_cache_freeable, list)
e8e1d468 903 if (!mca_reap(b, btree_order(k), false))
cafe5635
KO
904 goto out;
905
906 /* We never free struct btree itself, just the memory that holds the on
907 * disk node. Check the freed list before allocating a new one:
908 */
909 list_for_each_entry(b, &c->btree_cache_freed, list)
e8e1d468 910 if (!mca_reap(b, 0, false)) {
cafe5635 911 mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
a85e968e 912 if (!b->keys.set[0].data)
cafe5635
KO
913 goto err;
914 else
915 goto out;
916 }
917
918 b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
919 if (!b)
920 goto err;
921
922 BUG_ON(!down_write_trylock(&b->lock));
a85e968e 923 if (!b->keys.set->data)
cafe5635
KO
924 goto err;
925out:
cb7a583e 926 BUG_ON(b->io_mutex.count != 1);
cafe5635
KO
927
928 bkey_copy(&b->key, k);
929 list_move(&b->list, &c->btree_cache);
930 hlist_del_init_rcu(&b->hash);
931 hlist_add_head_rcu(&b->hash, mca_hash(c, k));
932
933 lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
d6fd3b11 934 b->parent = (void *) ~0UL;
a85e968e
KO
935 b->flags = 0;
936 b->written = 0;
937 b->level = level;
cafe5635 938
65d45231 939 if (!b->level)
a85e968e
KO
940 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
941 &b->c->expensive_debug_checks);
65d45231 942 else
a85e968e
KO
943 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
944 &b->c->expensive_debug_checks);
cafe5635
KO
945
946 return b;
947err:
948 if (b)
949 rw_unlock(true, b);
950
0a63b66d 951 b = mca_cannibalize(c, op, k);
cafe5635
KO
952 if (!IS_ERR(b))
953 goto out;
954
955 return b;
956}
957
958/**
959 * bch_btree_node_get - find a btree node in the cache and lock it, reading it
960 * in from disk if necessary.
961 *
b54d6934 962 * If IO is necessary and running under generic_make_request, returns -EAGAIN.
cafe5635
KO
963 *
964 * The btree node will have either a read or a write lock held, depending on
965 * level and op->lock.
966 */
0a63b66d 967struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
2452cc89
SP
968 struct bkey *k, int level, bool write,
969 struct btree *parent)
cafe5635
KO
970{
971 int i = 0;
cafe5635
KO
972 struct btree *b;
973
974 BUG_ON(level < 0);
975retry:
976 b = mca_find(c, k);
977
978 if (!b) {
57943511
KO
979 if (current->bio_list)
980 return ERR_PTR(-EAGAIN);
981
cafe5635 982 mutex_lock(&c->bucket_lock);
0a63b66d 983 b = mca_alloc(c, op, k, level);
cafe5635
KO
984 mutex_unlock(&c->bucket_lock);
985
986 if (!b)
987 goto retry;
988 if (IS_ERR(b))
989 return b;
990
57943511 991 bch_btree_node_read(b);
cafe5635
KO
992
993 if (!write)
994 downgrade_write(&b->lock);
995 } else {
996 rw_lock(write, b, level);
997 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
998 rw_unlock(write, b);
999 goto retry;
1000 }
1001 BUG_ON(b->level != level);
1002 }
1003
2452cc89 1004 b->parent = parent;
cafe5635
KO
1005 b->accessed = 1;
1006
a85e968e
KO
1007 for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1008 prefetch(b->keys.set[i].tree);
1009 prefetch(b->keys.set[i].data);
cafe5635
KO
1010 }
1011
a85e968e
KO
1012 for (; i <= b->keys.nsets; i++)
1013 prefetch(b->keys.set[i].data);
cafe5635 1014
57943511 1015 if (btree_node_io_error(b)) {
cafe5635 1016 rw_unlock(write, b);
57943511
KO
1017 return ERR_PTR(-EIO);
1018 }
1019
1020 BUG_ON(!b->written);
cafe5635
KO
1021
1022 return b;
1023}
1024
2452cc89 1025static void btree_node_prefetch(struct btree *parent, struct bkey *k)
cafe5635
KO
1026{
1027 struct btree *b;
1028
2452cc89
SP
1029 mutex_lock(&parent->c->bucket_lock);
1030 b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1031 mutex_unlock(&parent->c->bucket_lock);
cafe5635
KO
1032
1033 if (!IS_ERR_OR_NULL(b)) {
2452cc89 1034 b->parent = parent;
57943511 1035 bch_btree_node_read(b);
cafe5635
KO
1036 rw_unlock(true, b);
1037 }
1038}
1039
1040/* Btree alloc */
1041
e8e1d468 1042static void btree_node_free(struct btree *b)
cafe5635 1043{
c37511b8
KO
1044 trace_bcache_btree_node_free(b);
1045
cafe5635 1046 BUG_ON(b == b->c->root);
cafe5635 1047
2a285686
KO
1048 mutex_lock(&b->write_lock);
1049
cafe5635
KO
1050 if (btree_node_dirty(b))
1051 btree_complete_write(b, btree_current_write(b));
1052 clear_bit(BTREE_NODE_dirty, &b->flags);
1053
2a285686
KO
1054 mutex_unlock(&b->write_lock);
1055
cafe5635
KO
1056 cancel_delayed_work(&b->work);
1057
1058 mutex_lock(&b->c->bucket_lock);
cafe5635
KO
1059 bch_bucket_free(b->c, &b->key);
1060 mca_bucket_free(b);
1061 mutex_unlock(&b->c->bucket_lock);
1062}
1063
c5aa4a31 1064struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
2452cc89
SP
1065 int level, bool wait,
1066 struct btree *parent)
cafe5635
KO
1067{
1068 BKEY_PADDED(key) k;
1069 struct btree *b = ERR_PTR(-EAGAIN);
1070
1071 mutex_lock(&c->bucket_lock);
1072retry:
c5aa4a31 1073 if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
cafe5635
KO
1074 goto err;
1075
3a3b6a4e 1076 bkey_put(c, &k.key);
cafe5635
KO
1077 SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1078
0a63b66d 1079 b = mca_alloc(c, op, &k.key, level);
cafe5635
KO
1080 if (IS_ERR(b))
1081 goto err_free;
1082
1083 if (!b) {
b1a67b0f
KO
1084 cache_bug(c,
1085 "Tried to allocate bucket that was in btree cache");
cafe5635
KO
1086 goto retry;
1087 }
1088
cafe5635 1089 b->accessed = 1;
2452cc89 1090 b->parent = parent;
a85e968e 1091 bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
cafe5635
KO
1092
1093 mutex_unlock(&c->bucket_lock);
c37511b8
KO
1094
1095 trace_bcache_btree_node_alloc(b);
cafe5635
KO
1096 return b;
1097err_free:
1098 bch_bucket_free(c, &k.key);
cafe5635
KO
1099err:
1100 mutex_unlock(&c->bucket_lock);
c37511b8 1101
913dc33f 1102 trace_bcache_btree_node_alloc_fail(c);
cafe5635
KO
1103 return b;
1104}
1105
c5aa4a31 1106static struct btree *bch_btree_node_alloc(struct cache_set *c,
2452cc89
SP
1107 struct btree_op *op, int level,
1108 struct btree *parent)
c5aa4a31 1109{
2452cc89 1110 return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
c5aa4a31
SP
1111}
1112
0a63b66d
KO
1113static struct btree *btree_node_alloc_replacement(struct btree *b,
1114 struct btree_op *op)
cafe5635 1115{
2452cc89 1116 struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
67539e85 1117 if (!IS_ERR_OR_NULL(n)) {
2a285686 1118 mutex_lock(&n->write_lock);
89ebb4a2 1119 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
67539e85 1120 bkey_copy_key(&n->key, &b->key);
2a285686 1121 mutex_unlock(&n->write_lock);
67539e85 1122 }
cafe5635
KO
1123
1124 return n;
1125}
1126
8835c123
KO
1127static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1128{
1129 unsigned i;
1130
05335cff
KO
1131 mutex_lock(&b->c->bucket_lock);
1132
1133 atomic_inc(&b->c->prio_blocked);
1134
8835c123
KO
1135 bkey_copy(k, &b->key);
1136 bkey_copy_key(k, &ZERO_KEY);
1137
05335cff
KO
1138 for (i = 0; i < KEY_PTRS(k); i++)
1139 SET_PTR_GEN(k, i,
1140 bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1141 PTR_BUCKET(b->c, &b->key, i)));
8835c123 1142
05335cff 1143 mutex_unlock(&b->c->bucket_lock);
8835c123
KO
1144}
1145
78365411
KO
1146static int btree_check_reserve(struct btree *b, struct btree_op *op)
1147{
1148 struct cache_set *c = b->c;
1149 struct cache *ca;
0a63b66d 1150 unsigned i, reserve = (c->root->level - b->level) * 2 + 1;
78365411
KO
1151
1152 mutex_lock(&c->bucket_lock);
1153
1154 for_each_cache(ca, c, i)
1155 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1156 if (op)
0a63b66d 1157 prepare_to_wait(&c->btree_cache_wait, &op->wait,
78365411 1158 TASK_UNINTERRUPTIBLE);
0a63b66d
KO
1159 mutex_unlock(&c->bucket_lock);
1160 return -EINTR;
78365411
KO
1161 }
1162
1163 mutex_unlock(&c->bucket_lock);
0a63b66d
KO
1164
1165 return mca_cannibalize_lock(b->c, op);
78365411
KO
1166}
1167
cafe5635
KO
1168/* Garbage collection */
1169
487dded8
KO
1170static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1171 struct bkey *k)
cafe5635
KO
1172{
1173 uint8_t stale = 0;
1174 unsigned i;
1175 struct bucket *g;
1176
1177 /*
1178 * ptr_invalid() can't return true for the keys that mark btree nodes as
1179 * freed, but since ptr_bad() returns true we'll never actually use them
1180 * for anything and thus we don't want mark their pointers here
1181 */
1182 if (!bkey_cmp(k, &ZERO_KEY))
1183 return stale;
1184
1185 for (i = 0; i < KEY_PTRS(k); i++) {
1186 if (!ptr_available(c, k, i))
1187 continue;
1188
1189 g = PTR_BUCKET(c, k, i);
1190
3a2fd9d5
KO
1191 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1192 g->last_gc = PTR_GEN(k, i);
cafe5635
KO
1193
1194 if (ptr_stale(c, k, i)) {
1195 stale = max(stale, ptr_stale(c, k, i));
1196 continue;
1197 }
1198
1199 cache_bug_on(GC_MARK(g) &&
1200 (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1201 c, "inconsistent ptrs: mark = %llu, level = %i",
1202 GC_MARK(g), level);
1203
1204 if (level)
1205 SET_GC_MARK(g, GC_MARK_METADATA);
1206 else if (KEY_DIRTY(k))
1207 SET_GC_MARK(g, GC_MARK_DIRTY);
4fe6a816
KO
1208 else if (!GC_MARK(g))
1209 SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
cafe5635
KO
1210
1211 /* guard against overflow */
1212 SET_GC_SECTORS_USED(g, min_t(unsigned,
1213 GC_SECTORS_USED(g) + KEY_SIZE(k),
94717447 1214 MAX_GC_SECTORS_USED));
cafe5635
KO
1215
1216 BUG_ON(!GC_SECTORS_USED(g));
1217 }
1218
1219 return stale;
1220}
1221
1222#define btree_mark_key(b, k) __bch_btree_mark_key(b->c, b->level, k)
1223
487dded8
KO
1224void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1225{
1226 unsigned i;
1227
1228 for (i = 0; i < KEY_PTRS(k); i++)
1229 if (ptr_available(c, k, i) &&
1230 !ptr_stale(c, k, i)) {
1231 struct bucket *b = PTR_BUCKET(c, k, i);
1232
1233 b->gen = PTR_GEN(k, i);
1234
1235 if (level && bkey_cmp(k, &ZERO_KEY))
1236 b->prio = BTREE_PRIO;
1237 else if (!level && b->prio == BTREE_PRIO)
1238 b->prio = INITIAL_PRIO;
1239 }
1240
1241 __bch_btree_mark_key(c, level, k);
1242}
1243
d44c2f9e
TJ
1244void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1245{
1246 stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1247}
1248
a1f0358b 1249static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
cafe5635
KO
1250{
1251 uint8_t stale = 0;
a1f0358b 1252 unsigned keys = 0, good_keys = 0;
cafe5635
KO
1253 struct bkey *k;
1254 struct btree_iter iter;
1255 struct bset_tree *t;
1256
1257 gc->nodes++;
1258
c052dd9a 1259 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
cafe5635 1260 stale = max(stale, btree_mark_key(b, k));
a1f0358b 1261 keys++;
cafe5635 1262
a85e968e 1263 if (bch_ptr_bad(&b->keys, k))
cafe5635
KO
1264 continue;
1265
cafe5635
KO
1266 gc->key_bytes += bkey_u64s(k);
1267 gc->nkeys++;
a1f0358b 1268 good_keys++;
cafe5635
KO
1269
1270 gc->data += KEY_SIZE(k);
cafe5635
KO
1271 }
1272
a85e968e 1273 for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
cafe5635 1274 btree_bug_on(t->size &&
a85e968e 1275 bset_written(&b->keys, t) &&
cafe5635
KO
1276 bkey_cmp(&b->key, &t->end) < 0,
1277 b, "found short btree key in gc");
1278
a1f0358b
KO
1279 if (b->c->gc_always_rewrite)
1280 return true;
cafe5635 1281
a1f0358b
KO
1282 if (stale > 10)
1283 return true;
cafe5635 1284
a1f0358b
KO
1285 if ((keys - good_keys) * 2 > keys)
1286 return true;
cafe5635 1287
a1f0358b 1288 return false;
cafe5635
KO
1289}
1290
a1f0358b 1291#define GC_MERGE_NODES 4U
cafe5635
KO
1292
1293struct gc_merge_info {
1294 struct btree *b;
cafe5635
KO
1295 unsigned keys;
1296};
1297
a1f0358b
KO
1298static int bch_btree_insert_node(struct btree *, struct btree_op *,
1299 struct keylist *, atomic_t *, struct bkey *);
1300
1301static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
0a63b66d 1302 struct gc_stat *gc, struct gc_merge_info *r)
cafe5635 1303{
a1f0358b
KO
1304 unsigned i, nodes = 0, keys = 0, blocks;
1305 struct btree *new_nodes[GC_MERGE_NODES];
0a63b66d 1306 struct keylist keylist;
b54d6934 1307 struct closure cl;
a1f0358b 1308 struct bkey *k;
b54d6934 1309
0a63b66d
KO
1310 bch_keylist_init(&keylist);
1311
1312 if (btree_check_reserve(b, NULL))
1313 return 0;
1314
a1f0358b 1315 memset(new_nodes, 0, sizeof(new_nodes));
b54d6934 1316 closure_init_stack(&cl);
cafe5635 1317
a1f0358b 1318 while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
cafe5635
KO
1319 keys += r[nodes++].keys;
1320
1321 blocks = btree_default_blocks(b->c) * 2 / 3;
1322
1323 if (nodes < 2 ||
a85e968e 1324 __set_blocks(b->keys.set[0].data, keys,
ee811287 1325 block_bytes(b->c)) > blocks * (nodes - 1))
a1f0358b 1326 return 0;
cafe5635 1327
a1f0358b 1328 for (i = 0; i < nodes; i++) {
0a63b66d 1329 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
a1f0358b
KO
1330 if (IS_ERR_OR_NULL(new_nodes[i]))
1331 goto out_nocoalesce;
cafe5635
KO
1332 }
1333
0a63b66d
KO
1334 /*
1335 * We have to check the reserve here, after we've allocated our new
1336 * nodes, to make sure the insert below will succeed - we also check
1337 * before as an optimization to potentially avoid a bunch of expensive
1338 * allocs/sorts
1339 */
1340 if (btree_check_reserve(b, NULL))
1341 goto out_nocoalesce;
1342
2a285686
KO
1343 for (i = 0; i < nodes; i++)
1344 mutex_lock(&new_nodes[i]->write_lock);
1345
cafe5635 1346 for (i = nodes - 1; i > 0; --i) {
ee811287
KO
1347 struct bset *n1 = btree_bset_first(new_nodes[i]);
1348 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
cafe5635
KO
1349 struct bkey *k, *last = NULL;
1350
1351 keys = 0;
1352
a1f0358b
KO
1353 if (i > 1) {
1354 for (k = n2->start;
fafff81c 1355 k < bset_bkey_last(n2);
a1f0358b
KO
1356 k = bkey_next(k)) {
1357 if (__set_blocks(n1, n1->keys + keys +
ee811287
KO
1358 bkey_u64s(k),
1359 block_bytes(b->c)) > blocks)
a1f0358b
KO
1360 break;
1361
1362 last = k;
1363 keys += bkey_u64s(k);
1364 }
1365 } else {
cafe5635
KO
1366 /*
1367 * Last node we're not getting rid of - we're getting
1368 * rid of the node at r[0]. Have to try and fit all of
1369 * the remaining keys into this node; we can't ensure
1370 * they will always fit due to rounding and variable
1371 * length keys (shouldn't be possible in practice,
1372 * though)
1373 */
a1f0358b 1374 if (__set_blocks(n1, n1->keys + n2->keys,
ee811287
KO
1375 block_bytes(b->c)) >
1376 btree_blocks(new_nodes[i]))
a1f0358b 1377 goto out_nocoalesce;
cafe5635
KO
1378
1379 keys = n2->keys;
a1f0358b 1380 /* Take the key of the node we're getting rid of */
cafe5635 1381 last = &r->b->key;
a1f0358b 1382 }
cafe5635 1383
ee811287
KO
1384 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1385 btree_blocks(new_nodes[i]));
cafe5635 1386
a1f0358b
KO
1387 if (last)
1388 bkey_copy_key(&new_nodes[i]->key, last);
cafe5635 1389
fafff81c 1390 memcpy(bset_bkey_last(n1),
cafe5635 1391 n2->start,
fafff81c 1392 (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
cafe5635
KO
1393
1394 n1->keys += keys;
a1f0358b 1395 r[i].keys = n1->keys;
cafe5635
KO
1396
1397 memmove(n2->start,
fafff81c
KO
1398 bset_bkey_idx(n2, keys),
1399 (void *) bset_bkey_last(n2) -
1400 (void *) bset_bkey_idx(n2, keys));
cafe5635
KO
1401
1402 n2->keys -= keys;
1403
0a63b66d 1404 if (__bch_keylist_realloc(&keylist,
085d2a3d 1405 bkey_u64s(&new_nodes[i]->key)))
a1f0358b
KO
1406 goto out_nocoalesce;
1407
1408 bch_btree_node_write(new_nodes[i], &cl);
0a63b66d 1409 bch_keylist_add(&keylist, &new_nodes[i]->key);
cafe5635
KO
1410 }
1411
2a285686
KO
1412 for (i = 0; i < nodes; i++)
1413 mutex_unlock(&new_nodes[i]->write_lock);
1414
05335cff
KO
1415 closure_sync(&cl);
1416
1417 /* We emptied out this node */
1418 BUG_ON(btree_bset_first(new_nodes[0])->keys);
1419 btree_node_free(new_nodes[0]);
1420 rw_unlock(true, new_nodes[0]);
400ffaa2 1421 new_nodes[0] = NULL;
05335cff 1422
a1f0358b 1423 for (i = 0; i < nodes; i++) {
0a63b66d 1424 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
a1f0358b 1425 goto out_nocoalesce;
cafe5635 1426
0a63b66d
KO
1427 make_btree_freeing_key(r[i].b, keylist.top);
1428 bch_keylist_push(&keylist);
a1f0358b 1429 }
cafe5635 1430
0a63b66d
KO
1431 bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1432 BUG_ON(!bch_keylist_empty(&keylist));
a1f0358b
KO
1433
1434 for (i = 0; i < nodes; i++) {
1435 btree_node_free(r[i].b);
1436 rw_unlock(true, r[i].b);
1437
1438 r[i].b = new_nodes[i];
1439 }
1440
a1f0358b
KO
1441 memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1442 r[nodes - 1].b = ERR_PTR(-EINTR);
1443
1444 trace_bcache_btree_gc_coalesce(nodes);
cafe5635 1445 gc->nodes--;
cafe5635 1446
0a63b66d
KO
1447 bch_keylist_free(&keylist);
1448
a1f0358b
KO
1449 /* Invalidated our iterator */
1450 return -EINTR;
1451
1452out_nocoalesce:
1453 closure_sync(&cl);
0a63b66d 1454 bch_keylist_free(&keylist);
a1f0358b 1455
0a63b66d 1456 while ((k = bch_keylist_pop(&keylist)))
a1f0358b
KO
1457 if (!bkey_cmp(k, &ZERO_KEY))
1458 atomic_dec(&b->c->prio_blocked);
1459
1460 for (i = 0; i < nodes; i++)
1461 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1462 btree_node_free(new_nodes[i]);
1463 rw_unlock(true, new_nodes[i]);
1464 }
1465 return 0;
cafe5635
KO
1466}
1467
0a63b66d
KO
1468static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1469 struct btree *replace)
1470{
1471 struct keylist keys;
1472 struct btree *n;
1473
1474 if (btree_check_reserve(b, NULL))
1475 return 0;
1476
1477 n = btree_node_alloc_replacement(replace, NULL);
1478
1479 /* recheck reserve after allocating replacement node */
1480 if (btree_check_reserve(b, NULL)) {
1481 btree_node_free(n);
1482 rw_unlock(true, n);
1483 return 0;
1484 }
1485
1486 bch_btree_node_write_sync(n);
1487
1488 bch_keylist_init(&keys);
1489 bch_keylist_add(&keys, &n->key);
1490
1491 make_btree_freeing_key(replace, keys.top);
1492 bch_keylist_push(&keys);
1493
1494 bch_btree_insert_node(b, op, &keys, NULL, NULL);
1495 BUG_ON(!bch_keylist_empty(&keys));
1496
1497 btree_node_free(replace);
1498 rw_unlock(true, n);
1499
1500 /* Invalidated our iterator */
1501 return -EINTR;
1502}
1503
a1f0358b 1504static unsigned btree_gc_count_keys(struct btree *b)
cafe5635 1505{
a1f0358b
KO
1506 struct bkey *k;
1507 struct btree_iter iter;
1508 unsigned ret = 0;
cafe5635 1509
c052dd9a 1510 for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
a1f0358b
KO
1511 ret += bkey_u64s(k);
1512
1513 return ret;
1514}
cafe5635 1515
a1f0358b
KO
1516static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1517 struct closure *writes, struct gc_stat *gc)
1518{
a1f0358b
KO
1519 int ret = 0;
1520 bool should_rewrite;
a1f0358b 1521 struct bkey *k;
a1f0358b 1522 struct btree_iter iter;
cafe5635 1523 struct gc_merge_info r[GC_MERGE_NODES];
2a285686 1524 struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
cafe5635 1525
c052dd9a 1526 bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
cafe5635 1527
2a285686
KO
1528 for (i = r; i < r + ARRAY_SIZE(r); i++)
1529 i->b = ERR_PTR(-EINTR);
cafe5635 1530
a1f0358b 1531 while (1) {
a85e968e 1532 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
a1f0358b 1533 if (k) {
0a63b66d 1534 r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
2452cc89 1535 true, b);
a1f0358b
KO
1536 if (IS_ERR(r->b)) {
1537 ret = PTR_ERR(r->b);
1538 break;
1539 }
1540
1541 r->keys = btree_gc_count_keys(r->b);
1542
0a63b66d 1543 ret = btree_gc_coalesce(b, op, gc, r);
a1f0358b
KO
1544 if (ret)
1545 break;
cafe5635
KO
1546 }
1547
a1f0358b
KO
1548 if (!last->b)
1549 break;
cafe5635 1550
a1f0358b
KO
1551 if (!IS_ERR(last->b)) {
1552 should_rewrite = btree_gc_mark_node(last->b, gc);
0a63b66d
KO
1553 if (should_rewrite) {
1554 ret = btree_gc_rewrite_node(b, op, last->b);
1555 if (ret)
a1f0358b 1556 break;
a1f0358b
KO
1557 }
1558
1559 if (last->b->level) {
1560 ret = btree_gc_recurse(last->b, op, writes, gc);
1561 if (ret)
1562 break;
1563 }
cafe5635 1564
a1f0358b
KO
1565 bkey_copy_key(&b->c->gc_done, &last->b->key);
1566
1567 /*
1568 * Must flush leaf nodes before gc ends, since replace
1569 * operations aren't journalled
1570 */
2a285686 1571 mutex_lock(&last->b->write_lock);
a1f0358b
KO
1572 if (btree_node_dirty(last->b))
1573 bch_btree_node_write(last->b, writes);
2a285686 1574 mutex_unlock(&last->b->write_lock);
a1f0358b
KO
1575 rw_unlock(true, last->b);
1576 }
1577
1578 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1579 r->b = NULL;
cafe5635 1580
cafe5635
KO
1581 if (need_resched()) {
1582 ret = -EAGAIN;
1583 break;
1584 }
cafe5635
KO
1585 }
1586
2a285686
KO
1587 for (i = r; i < r + ARRAY_SIZE(r); i++)
1588 if (!IS_ERR_OR_NULL(i->b)) {
1589 mutex_lock(&i->b->write_lock);
1590 if (btree_node_dirty(i->b))
1591 bch_btree_node_write(i->b, writes);
1592 mutex_unlock(&i->b->write_lock);
1593 rw_unlock(true, i->b);
a1f0358b 1594 }
cafe5635 1595
cafe5635
KO
1596 return ret;
1597}
1598
1599static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1600 struct closure *writes, struct gc_stat *gc)
1601{
1602 struct btree *n = NULL;
a1f0358b
KO
1603 int ret = 0;
1604 bool should_rewrite;
cafe5635 1605
a1f0358b
KO
1606 should_rewrite = btree_gc_mark_node(b, gc);
1607 if (should_rewrite) {
0a63b66d 1608 n = btree_node_alloc_replacement(b, NULL);
cafe5635 1609
a1f0358b
KO
1610 if (!IS_ERR_OR_NULL(n)) {
1611 bch_btree_node_write_sync(n);
2a285686 1612
a1f0358b
KO
1613 bch_btree_set_root(n);
1614 btree_node_free(b);
1615 rw_unlock(true, n);
cafe5635 1616
a1f0358b
KO
1617 return -EINTR;
1618 }
1619 }
cafe5635 1620
487dded8
KO
1621 __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1622
a1f0358b
KO
1623 if (b->level) {
1624 ret = btree_gc_recurse(b, op, writes, gc);
1625 if (ret)
1626 return ret;
cafe5635
KO
1627 }
1628
a1f0358b
KO
1629 bkey_copy_key(&b->c->gc_done, &b->key);
1630
cafe5635
KO
1631 return ret;
1632}
1633
1634static void btree_gc_start(struct cache_set *c)
1635{
1636 struct cache *ca;
1637 struct bucket *b;
cafe5635
KO
1638 unsigned i;
1639
1640 if (!c->gc_mark_valid)
1641 return;
1642
1643 mutex_lock(&c->bucket_lock);
1644
1645 c->gc_mark_valid = 0;
1646 c->gc_done = ZERO_KEY;
1647
1648 for_each_cache(ca, c, i)
1649 for_each_bucket(b, ca) {
3a2fd9d5 1650 b->last_gc = b->gen;
29ebf465 1651 if (!atomic_read(&b->pin)) {
4fe6a816 1652 SET_GC_MARK(b, 0);
29ebf465
KO
1653 SET_GC_SECTORS_USED(b, 0);
1654 }
cafe5635
KO
1655 }
1656
cafe5635
KO
1657 mutex_unlock(&c->bucket_lock);
1658}
1659
d44c2f9e 1660static void bch_btree_gc_finish(struct cache_set *c)
cafe5635 1661{
cafe5635
KO
1662 struct bucket *b;
1663 struct cache *ca;
cafe5635
KO
1664 unsigned i;
1665
1666 mutex_lock(&c->bucket_lock);
1667
1668 set_gc_sectors(c);
1669 c->gc_mark_valid = 1;
1670 c->need_gc = 0;
1671
cafe5635
KO
1672 for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1673 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1674 GC_MARK_METADATA);
1675
bf0a628a
NS
1676 /* don't reclaim buckets to which writeback keys point */
1677 rcu_read_lock();
1678 for (i = 0; i < c->nr_uuids; i++) {
1679 struct bcache_device *d = c->devices[i];
1680 struct cached_dev *dc;
1681 struct keybuf_key *w, *n;
1682 unsigned j;
1683
1684 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1685 continue;
1686 dc = container_of(d, struct cached_dev, disk);
1687
1688 spin_lock(&dc->writeback_keys.lock);
1689 rbtree_postorder_for_each_entry_safe(w, n,
1690 &dc->writeback_keys.keys, node)
1691 for (j = 0; j < KEY_PTRS(&w->key); j++)
1692 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1693 GC_MARK_DIRTY);
1694 spin_unlock(&dc->writeback_keys.lock);
1695 }
1696 rcu_read_unlock();
1697
d44c2f9e 1698 c->avail_nbuckets = 0;
cafe5635
KO
1699 for_each_cache(ca, c, i) {
1700 uint64_t *i;
1701
1702 ca->invalidate_needs_gc = 0;
1703
1704 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1705 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1706
1707 for (i = ca->prio_buckets;
1708 i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1709 SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1710
1711 for_each_bucket(b, ca) {
cafe5635
KO
1712 c->need_gc = max(c->need_gc, bucket_gc_gen(b));
1713
4fe6a816
KO
1714 if (atomic_read(&b->pin))
1715 continue;
1716
1717 BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1718
1719 if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
d44c2f9e 1720 c->avail_nbuckets++;
cafe5635
KO
1721 }
1722 }
1723
cafe5635 1724 mutex_unlock(&c->bucket_lock);
cafe5635
KO
1725}
1726
72a44517 1727static void bch_btree_gc(struct cache_set *c)
cafe5635 1728{
cafe5635 1729 int ret;
cafe5635
KO
1730 struct gc_stat stats;
1731 struct closure writes;
1732 struct btree_op op;
cafe5635 1733 uint64_t start_time = local_clock();
57943511 1734
c37511b8 1735 trace_bcache_gc_start(c);
cafe5635
KO
1736
1737 memset(&stats, 0, sizeof(struct gc_stat));
1738 closure_init_stack(&writes);
b54d6934 1739 bch_btree_op_init(&op, SHRT_MAX);
cafe5635
KO
1740
1741 btree_gc_start(c);
1742
a1f0358b
KO
1743 do {
1744 ret = btree_root(gc_root, c, &op, &writes, &stats);
1745 closure_sync(&writes);
c5f1e5ad 1746 cond_resched();
cafe5635 1747
a1f0358b
KO
1748 if (ret && ret != -EAGAIN)
1749 pr_warn("gc failed!");
1750 } while (ret);
cafe5635 1751
d44c2f9e 1752 bch_btree_gc_finish(c);
57943511
KO
1753 wake_up_allocators(c);
1754
169ef1cf 1755 bch_time_stats_update(&c->btree_gc_time, start_time);
cafe5635
KO
1756
1757 stats.key_bytes *= sizeof(uint64_t);
cafe5635 1758 stats.data <<= 9;
d44c2f9e 1759 bch_update_bucket_in_use(c, &stats);
cafe5635 1760 memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
cafe5635 1761
c37511b8 1762 trace_bcache_gc_end(c);
cafe5635 1763
72a44517
KO
1764 bch_moving_gc(c);
1765}
1766
be628be0 1767static bool gc_should_run(struct cache_set *c)
72a44517 1768{
a1f0358b
KO
1769 struct cache *ca;
1770 unsigned i;
72a44517 1771
be628be0
KO
1772 for_each_cache(ca, c, i)
1773 if (ca->invalidate_needs_gc)
1774 return true;
72a44517 1775
be628be0
KO
1776 if (atomic_read(&c->sectors_to_gc) < 0)
1777 return true;
72a44517 1778
be628be0
KO
1779 return false;
1780}
a1f0358b 1781
be628be0
KO
1782static int bch_gc_thread(void *arg)
1783{
1784 struct cache_set *c = arg;
a1f0358b 1785
be628be0
KO
1786 while (1) {
1787 wait_event_interruptible(c->gc_wait,
1788 kthread_should_stop() || gc_should_run(c));
a1f0358b 1789
be628be0
KO
1790 if (kthread_should_stop())
1791 break;
1792
1793 set_gc_sectors(c);
1794 bch_btree_gc(c);
72a44517
KO
1795 }
1796
1797 return 0;
cafe5635
KO
1798}
1799
72a44517 1800int bch_gc_thread_start(struct cache_set *c)
cafe5635 1801{
be628be0 1802 c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
72a44517
KO
1803 if (IS_ERR(c->gc_thread))
1804 return PTR_ERR(c->gc_thread);
1805
72a44517 1806 return 0;
cafe5635
KO
1807}
1808
1809/* Initial partial gc */
1810
487dded8 1811static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
cafe5635 1812{
50310164 1813 int ret = 0;
50310164 1814 struct bkey *k, *p = NULL;
cafe5635
KO
1815 struct btree_iter iter;
1816
487dded8
KO
1817 for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1818 bch_initial_mark_key(b->c, b->level, k);
cafe5635 1819
487dded8 1820 bch_initial_mark_key(b->c, b->level + 1, &b->key);
cafe5635
KO
1821
1822 if (b->level) {
c052dd9a 1823 bch_btree_iter_init(&b->keys, &iter, NULL);
cafe5635 1824
50310164 1825 do {
a85e968e
KO
1826 k = bch_btree_iter_next_filter(&iter, &b->keys,
1827 bch_ptr_bad);
50310164 1828 if (k)
2452cc89 1829 btree_node_prefetch(b, k);
cafe5635 1830
50310164 1831 if (p)
487dded8 1832 ret = btree(check_recurse, p, b, op);
cafe5635 1833
50310164
KO
1834 p = k;
1835 } while (p && !ret);
cafe5635
KO
1836 }
1837
487dded8 1838 return ret;
cafe5635
KO
1839}
1840
c18536a7 1841int bch_btree_check(struct cache_set *c)
cafe5635 1842{
c18536a7 1843 struct btree_op op;
cafe5635 1844
b54d6934 1845 bch_btree_op_init(&op, SHRT_MAX);
cafe5635 1846
487dded8 1847 return btree_root(check_recurse, c, &op);
cafe5635
KO
1848}
1849
2531d9ee
KO
1850void bch_initial_gc_finish(struct cache_set *c)
1851{
1852 struct cache *ca;
1853 struct bucket *b;
1854 unsigned i;
1855
1856 bch_btree_gc_finish(c);
1857
1858 mutex_lock(&c->bucket_lock);
1859
1860 /*
1861 * We need to put some unused buckets directly on the prio freelist in
1862 * order to get the allocator thread started - it needs freed buckets in
1863 * order to rewrite the prios and gens, and it needs to rewrite prios
1864 * and gens in order to free buckets.
1865 *
1866 * This is only safe for buckets that have no live data in them, which
1867 * there should always be some of.
1868 */
1869 for_each_cache(ca, c, i) {
1870 for_each_bucket(b, ca) {
1871 if (fifo_full(&ca->free[RESERVE_PRIO]))
1872 break;
1873
1874 if (bch_can_invalidate_bucket(ca, b) &&
1875 !GC_MARK(b)) {
1876 __bch_invalidate_one_bucket(ca, b);
1877 fifo_push(&ca->free[RESERVE_PRIO],
1878 b - ca->buckets);
1879 }
1880 }
1881 }
1882
1883 mutex_unlock(&c->bucket_lock);
1884}
1885
cafe5635
KO
1886/* Btree insertion */
1887
829a60b9
KO
1888static bool btree_insert_key(struct btree *b, struct bkey *k,
1889 struct bkey *replace_key)
cafe5635 1890{
829a60b9 1891 unsigned status;
cafe5635
KO
1892
1893 BUG_ON(bkey_cmp(k, &b->key) > 0);
1fa8455d 1894
829a60b9
KO
1895 status = bch_btree_insert_key(&b->keys, k, replace_key);
1896 if (status != BTREE_INSERT_STATUS_NO_INSERT) {
1897 bch_check_keys(&b->keys, "%u for %s", status,
1898 replace_key ? "replace" : "insert");
cafe5635 1899
829a60b9
KO
1900 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
1901 status);
1902 return true;
1903 } else
1904 return false;
cafe5635
KO
1905}
1906
59158fde
KO
1907static size_t insert_u64s_remaining(struct btree *b)
1908{
3572324a 1909 long ret = bch_btree_keys_u64s_remaining(&b->keys);
59158fde
KO
1910
1911 /*
1912 * Might land in the middle of an existing extent and have to split it
1913 */
1914 if (b->keys.ops->is_extents)
1915 ret -= KEY_MAX_U64S;
1916
1917 return max(ret, 0L);
1918}
1919
26c949f8 1920static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
1b207d80
KO
1921 struct keylist *insert_keys,
1922 struct bkey *replace_key)
cafe5635
KO
1923{
1924 bool ret = false;
dc9d98d6 1925 int oldsize = bch_count_data(&b->keys);
cafe5635 1926
26c949f8 1927 while (!bch_keylist_empty(insert_keys)) {
c2f95ae2 1928 struct bkey *k = insert_keys->keys;
26c949f8 1929
59158fde 1930 if (bkey_u64s(k) > insert_u64s_remaining(b))
403b6cde
KO
1931 break;
1932
1933 if (bkey_cmp(k, &b->key) <= 0) {
3a3b6a4e
KO
1934 if (!b->level)
1935 bkey_put(b->c, k);
26c949f8 1936
829a60b9 1937 ret |= btree_insert_key(b, k, replace_key);
26c949f8
KO
1938 bch_keylist_pop_front(insert_keys);
1939 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
26c949f8 1940 BKEY_PADDED(key) temp;
c2f95ae2 1941 bkey_copy(&temp.key, insert_keys->keys);
26c949f8
KO
1942
1943 bch_cut_back(&b->key, &temp.key);
c2f95ae2 1944 bch_cut_front(&b->key, insert_keys->keys);
26c949f8 1945
829a60b9 1946 ret |= btree_insert_key(b, &temp.key, replace_key);
26c949f8
KO
1947 break;
1948 } else {
1949 break;
1950 }
cafe5635
KO
1951 }
1952
829a60b9
KO
1953 if (!ret)
1954 op->insert_collision = true;
1955
403b6cde
KO
1956 BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
1957
dc9d98d6 1958 BUG_ON(bch_count_data(&b->keys) < oldsize);
cafe5635
KO
1959 return ret;
1960}
1961
26c949f8
KO
1962static int btree_split(struct btree *b, struct btree_op *op,
1963 struct keylist *insert_keys,
1b207d80 1964 struct bkey *replace_key)
cafe5635 1965{
d6fd3b11 1966 bool split;
cafe5635
KO
1967 struct btree *n1, *n2 = NULL, *n3 = NULL;
1968 uint64_t start_time = local_clock();
b54d6934 1969 struct closure cl;
17e21a9f 1970 struct keylist parent_keys;
b54d6934
KO
1971
1972 closure_init_stack(&cl);
17e21a9f 1973 bch_keylist_init(&parent_keys);
cafe5635 1974
0a63b66d
KO
1975 if (btree_check_reserve(b, op)) {
1976 if (!b->level)
1977 return -EINTR;
1978 else
1979 WARN(1, "insufficient reserve for split\n");
1980 }
78365411 1981
0a63b66d 1982 n1 = btree_node_alloc_replacement(b, op);
cafe5635
KO
1983 if (IS_ERR(n1))
1984 goto err;
1985
ee811287
KO
1986 split = set_blocks(btree_bset_first(n1),
1987 block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
cafe5635 1988
cafe5635
KO
1989 if (split) {
1990 unsigned keys = 0;
1991
ee811287 1992 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
c37511b8 1993
2452cc89 1994 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
cafe5635
KO
1995 if (IS_ERR(n2))
1996 goto err_free1;
1997
d6fd3b11 1998 if (!b->parent) {
2452cc89 1999 n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
cafe5635
KO
2000 if (IS_ERR(n3))
2001 goto err_free2;
2002 }
2003
2a285686
KO
2004 mutex_lock(&n1->write_lock);
2005 mutex_lock(&n2->write_lock);
2006
1b207d80 2007 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
cafe5635 2008
d6fd3b11
KO
2009 /*
2010 * Has to be a linear search because we don't have an auxiliary
cafe5635
KO
2011 * search tree yet
2012 */
2013
ee811287
KO
2014 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2015 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
fafff81c 2016 keys));
cafe5635 2017
fafff81c 2018 bkey_copy_key(&n1->key,
ee811287
KO
2019 bset_bkey_idx(btree_bset_first(n1), keys));
2020 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
cafe5635 2021
ee811287
KO
2022 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2023 btree_bset_first(n1)->keys = keys;
cafe5635 2024
ee811287
KO
2025 memcpy(btree_bset_first(n2)->start,
2026 bset_bkey_last(btree_bset_first(n1)),
2027 btree_bset_first(n2)->keys * sizeof(uint64_t));
cafe5635
KO
2028
2029 bkey_copy_key(&n2->key, &b->key);
2030
17e21a9f 2031 bch_keylist_add(&parent_keys, &n2->key);
b54d6934 2032 bch_btree_node_write(n2, &cl);
2a285686 2033 mutex_unlock(&n2->write_lock);
cafe5635 2034 rw_unlock(true, n2);
c37511b8 2035 } else {
ee811287 2036 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
c37511b8 2037
2a285686 2038 mutex_lock(&n1->write_lock);
1b207d80 2039 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
c37511b8 2040 }
cafe5635 2041
17e21a9f 2042 bch_keylist_add(&parent_keys, &n1->key);
b54d6934 2043 bch_btree_node_write(n1, &cl);
2a285686 2044 mutex_unlock(&n1->write_lock);
cafe5635
KO
2045
2046 if (n3) {
d6fd3b11 2047 /* Depth increases, make a new root */
2a285686 2048 mutex_lock(&n3->write_lock);
cafe5635 2049 bkey_copy_key(&n3->key, &MAX_KEY);
17e21a9f 2050 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
b54d6934 2051 bch_btree_node_write(n3, &cl);
2a285686 2052 mutex_unlock(&n3->write_lock);
cafe5635 2053
b54d6934 2054 closure_sync(&cl);
cafe5635
KO
2055 bch_btree_set_root(n3);
2056 rw_unlock(true, n3);
d6fd3b11
KO
2057 } else if (!b->parent) {
2058 /* Root filled up but didn't need to be split */
b54d6934 2059 closure_sync(&cl);
cafe5635
KO
2060 bch_btree_set_root(n1);
2061 } else {
17e21a9f 2062 /* Split a non root node */
b54d6934 2063 closure_sync(&cl);
17e21a9f
KO
2064 make_btree_freeing_key(b, parent_keys.top);
2065 bch_keylist_push(&parent_keys);
2066
17e21a9f
KO
2067 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2068 BUG_ON(!bch_keylist_empty(&parent_keys));
cafe5635
KO
2069 }
2070
05335cff 2071 btree_node_free(b);
cafe5635 2072 rw_unlock(true, n1);
cafe5635 2073
169ef1cf 2074 bch_time_stats_update(&b->c->btree_split_time, start_time);
cafe5635
KO
2075
2076 return 0;
2077err_free2:
5f5837d2 2078 bkey_put(b->c, &n2->key);
e8e1d468 2079 btree_node_free(n2);
cafe5635
KO
2080 rw_unlock(true, n2);
2081err_free1:
5f5837d2 2082 bkey_put(b->c, &n1->key);
e8e1d468 2083 btree_node_free(n1);
cafe5635
KO
2084 rw_unlock(true, n1);
2085err:
0a63b66d 2086 WARN(1, "bcache: btree split failed (level %u)", b->level);
5f5837d2 2087
cafe5635
KO
2088 if (n3 == ERR_PTR(-EAGAIN) ||
2089 n2 == ERR_PTR(-EAGAIN) ||
2090 n1 == ERR_PTR(-EAGAIN))
2091 return -EAGAIN;
2092
cafe5635
KO
2093 return -ENOMEM;
2094}
2095
26c949f8 2096static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
c18536a7 2097 struct keylist *insert_keys,
1b207d80
KO
2098 atomic_t *journal_ref,
2099 struct bkey *replace_key)
cafe5635 2100{
2a285686
KO
2101 struct closure cl;
2102
17e21a9f
KO
2103 BUG_ON(b->level && replace_key);
2104
2a285686
KO
2105 closure_init_stack(&cl);
2106
2107 mutex_lock(&b->write_lock);
2108
2109 if (write_block(b) != btree_bset_last(b) &&
2110 b->keys.last_set_unwritten)
2111 bch_btree_init_next(b); /* just wrote a set */
2112
59158fde 2113 if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2a285686
KO
2114 mutex_unlock(&b->write_lock);
2115 goto split;
2116 }
3b3e9e50 2117
2a285686 2118 BUG_ON(write_block(b) != btree_bset_last(b));
cafe5635 2119
2a285686
KO
2120 if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2121 if (!b->level)
2122 bch_btree_leaf_dirty(b, journal_ref);
2123 else
2124 bch_btree_node_write(b, &cl);
2125 }
17e21a9f 2126
2a285686
KO
2127 mutex_unlock(&b->write_lock);
2128
2129 /* wait for btree node write if necessary, after unlock */
2130 closure_sync(&cl);
2131
2132 return 0;
2133split:
2134 if (current->bio_list) {
2135 op->lock = b->c->root->level + 1;
2136 return -EAGAIN;
2137 } else if (op->lock <= b->c->root->level) {
2138 op->lock = b->c->root->level + 1;
2139 return -EINTR;
2140 } else {
2141 /* Invalidated all iterators */
2142 int ret = btree_split(b, op, insert_keys, replace_key);
2143
2144 if (bch_keylist_empty(insert_keys))
2145 return 0;
2146 else if (!ret)
2147 return -EINTR;
2148 return ret;
17e21a9f 2149 }
26c949f8 2150}
cafe5635 2151
e7c590eb
KO
2152int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2153 struct bkey *check_key)
2154{
2155 int ret = -EINTR;
2156 uint64_t btree_ptr = b->key.ptr[0];
2157 unsigned long seq = b->seq;
2158 struct keylist insert;
2159 bool upgrade = op->lock == -1;
2160
2161 bch_keylist_init(&insert);
2162
2163 if (upgrade) {
2164 rw_unlock(false, b);
2165 rw_lock(true, b, b->level);
2166
2167 if (b->key.ptr[0] != btree_ptr ||
2ef9ccbf
ZL
2168 b->seq != seq + 1) {
2169 op->lock = b->level;
e7c590eb 2170 goto out;
2ef9ccbf 2171 }
e7c590eb
KO
2172 }
2173
2174 SET_KEY_PTRS(check_key, 1);
2175 get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2176
2177 SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2178
2179 bch_keylist_add(&insert, check_key);
2180
1b207d80 2181 ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
e7c590eb
KO
2182
2183 BUG_ON(!ret && !bch_keylist_empty(&insert));
2184out:
2185 if (upgrade)
2186 downgrade_write(&b->lock);
2187 return ret;
2188}
2189
cc7b8819
KO
2190struct btree_insert_op {
2191 struct btree_op op;
2192 struct keylist *keys;
2193 atomic_t *journal_ref;
2194 struct bkey *replace_key;
2195};
cafe5635 2196
08239ca2 2197static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
cc7b8819
KO
2198{
2199 struct btree_insert_op *op = container_of(b_op,
2200 struct btree_insert_op, op);
cafe5635 2201
cc7b8819
KO
2202 int ret = bch_btree_insert_node(b, &op->op, op->keys,
2203 op->journal_ref, op->replace_key);
2204 if (ret && !bch_keylist_empty(op->keys))
2205 return ret;
2206 else
2207 return MAP_DONE;
cafe5635
KO
2208}
2209
cc7b8819
KO
2210int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2211 atomic_t *journal_ref, struct bkey *replace_key)
cafe5635 2212{
cc7b8819 2213 struct btree_insert_op op;
cafe5635 2214 int ret = 0;
cafe5635 2215
cc7b8819 2216 BUG_ON(current->bio_list);
4f3d4014 2217 BUG_ON(bch_keylist_empty(keys));
cafe5635 2218
cc7b8819
KO
2219 bch_btree_op_init(&op.op, 0);
2220 op.keys = keys;
2221 op.journal_ref = journal_ref;
2222 op.replace_key = replace_key;
cafe5635 2223
cc7b8819
KO
2224 while (!ret && !bch_keylist_empty(keys)) {
2225 op.op.lock = 0;
2226 ret = bch_btree_map_leaf_nodes(&op.op, c,
2227 &START_KEY(keys->keys),
2228 btree_insert_fn);
2229 }
cafe5635 2230
cc7b8819
KO
2231 if (ret) {
2232 struct bkey *k;
cafe5635 2233
cc7b8819 2234 pr_err("error %i", ret);
cafe5635 2235
cc7b8819 2236 while ((k = bch_keylist_pop(keys)))
3a3b6a4e 2237 bkey_put(c, k);
cc7b8819
KO
2238 } else if (op.op.insert_collision)
2239 ret = -ESRCH;
6054c6d4 2240
cafe5635
KO
2241 return ret;
2242}
2243
2244void bch_btree_set_root(struct btree *b)
2245{
2246 unsigned i;
e49c7c37
KO
2247 struct closure cl;
2248
2249 closure_init_stack(&cl);
cafe5635 2250
c37511b8
KO
2251 trace_bcache_btree_set_root(b);
2252
cafe5635
KO
2253 BUG_ON(!b->written);
2254
2255 for (i = 0; i < KEY_PTRS(&b->key); i++)
2256 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2257
2258 mutex_lock(&b->c->bucket_lock);
2259 list_del_init(&b->list);
2260 mutex_unlock(&b->c->bucket_lock);
2261
2262 b->c->root = b;
cafe5635 2263
e49c7c37
KO
2264 bch_journal_meta(b->c, &cl);
2265 closure_sync(&cl);
cafe5635
KO
2266}
2267
48dad8ba
KO
2268/* Map across nodes or keys */
2269
2270static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2271 struct bkey *from,
2272 btree_map_nodes_fn *fn, int flags)
2273{
2274 int ret = MAP_CONTINUE;
2275
2276 if (b->level) {
2277 struct bkey *k;
2278 struct btree_iter iter;
2279
c052dd9a 2280 bch_btree_iter_init(&b->keys, &iter, from);
48dad8ba 2281
a85e968e 2282 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
48dad8ba
KO
2283 bch_ptr_bad))) {
2284 ret = btree(map_nodes_recurse, k, b,
2285 op, from, fn, flags);
2286 from = NULL;
2287
2288 if (ret != MAP_CONTINUE)
2289 return ret;
2290 }
2291 }
2292
2293 if (!b->level || flags == MAP_ALL_NODES)
2294 ret = fn(op, b);
2295
2296 return ret;
2297}
2298
2299int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2300 struct bkey *from, btree_map_nodes_fn *fn, int flags)
2301{
b54d6934 2302 return btree_root(map_nodes_recurse, c, op, from, fn, flags);
48dad8ba
KO
2303}
2304
2305static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2306 struct bkey *from, btree_map_keys_fn *fn,
2307 int flags)
2308{
2309 int ret = MAP_CONTINUE;
2310 struct bkey *k;
2311 struct btree_iter iter;
2312
c052dd9a 2313 bch_btree_iter_init(&b->keys, &iter, from);
48dad8ba 2314
a85e968e 2315 while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
48dad8ba
KO
2316 ret = !b->level
2317 ? fn(op, b, k)
2318 : btree(map_keys_recurse, k, b, op, from, fn, flags);
2319 from = NULL;
2320
2321 if (ret != MAP_CONTINUE)
2322 return ret;
2323 }
2324
2325 if (!b->level && (flags & MAP_END_KEY))
2326 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2327 KEY_OFFSET(&b->key), 0));
2328
2329 return ret;
2330}
2331
2332int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2333 struct bkey *from, btree_map_keys_fn *fn, int flags)
2334{
b54d6934 2335 return btree_root(map_keys_recurse, c, op, from, fn, flags);
48dad8ba
KO
2336}
2337
cafe5635
KO
2338/* Keybuf code */
2339
2340static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2341{
2342 /* Overlapping keys compare equal */
2343 if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2344 return -1;
2345 if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2346 return 1;
2347 return 0;
2348}
2349
2350static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2351 struct keybuf_key *r)
2352{
2353 return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2354}
2355
48dad8ba
KO
2356struct refill {
2357 struct btree_op op;
48a915a8 2358 unsigned nr_found;
48dad8ba
KO
2359 struct keybuf *buf;
2360 struct bkey *end;
2361 keybuf_pred_fn *pred;
2362};
cafe5635 2363
48dad8ba
KO
2364static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2365 struct bkey *k)
2366{
2367 struct refill *refill = container_of(op, struct refill, op);
2368 struct keybuf *buf = refill->buf;
2369 int ret = MAP_CONTINUE;
cafe5635 2370
48dad8ba
KO
2371 if (bkey_cmp(k, refill->end) >= 0) {
2372 ret = MAP_DONE;
2373 goto out;
2374 }
cafe5635 2375
48dad8ba
KO
2376 if (!KEY_SIZE(k)) /* end key */
2377 goto out;
cafe5635 2378
48dad8ba
KO
2379 if (refill->pred(buf, k)) {
2380 struct keybuf_key *w;
cafe5635 2381
48dad8ba 2382 spin_lock(&buf->lock);
cafe5635 2383
48dad8ba
KO
2384 w = array_alloc(&buf->freelist);
2385 if (!w) {
2386 spin_unlock(&buf->lock);
2387 return MAP_DONE;
2388 }
cafe5635 2389
48dad8ba
KO
2390 w->private = NULL;
2391 bkey_copy(&w->key, k);
cafe5635 2392
48dad8ba
KO
2393 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2394 array_free(&buf->freelist, w);
48a915a8
KO
2395 else
2396 refill->nr_found++;
cafe5635 2397
48dad8ba
KO
2398 if (array_freelist_empty(&buf->freelist))
2399 ret = MAP_DONE;
cafe5635 2400
48dad8ba 2401 spin_unlock(&buf->lock);
cafe5635 2402 }
48dad8ba
KO
2403out:
2404 buf->last_scanned = *k;
2405 return ret;
cafe5635
KO
2406}
2407
2408void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
72c27061 2409 struct bkey *end, keybuf_pred_fn *pred)
cafe5635
KO
2410{
2411 struct bkey start = buf->last_scanned;
48dad8ba 2412 struct refill refill;
cafe5635
KO
2413
2414 cond_resched();
2415
b54d6934 2416 bch_btree_op_init(&refill.op, -1);
48a915a8
KO
2417 refill.nr_found = 0;
2418 refill.buf = buf;
2419 refill.end = end;
2420 refill.pred = pred;
48dad8ba
KO
2421
2422 bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2423 refill_keybuf_fn, MAP_END_KEY);
cafe5635 2424
48a915a8
KO
2425 trace_bcache_keyscan(refill.nr_found,
2426 KEY_INODE(&start), KEY_OFFSET(&start),
2427 KEY_INODE(&buf->last_scanned),
2428 KEY_OFFSET(&buf->last_scanned));
cafe5635
KO
2429
2430 spin_lock(&buf->lock);
2431
2432 if (!RB_EMPTY_ROOT(&buf->keys)) {
2433 struct keybuf_key *w;
2434 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2435 buf->start = START_KEY(&w->key);
2436
2437 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2438 buf->end = w->key;
2439 } else {
2440 buf->start = MAX_KEY;
2441 buf->end = MAX_KEY;
2442 }
2443
2444 spin_unlock(&buf->lock);
2445}
2446
2447static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2448{
2449 rb_erase(&w->node, &buf->keys);
2450 array_free(&buf->freelist, w);
2451}
2452
2453void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2454{
2455 spin_lock(&buf->lock);
2456 __bch_keybuf_del(buf, w);
2457 spin_unlock(&buf->lock);
2458}
2459
2460bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2461 struct bkey *end)
2462{
2463 bool ret = false;
2464 struct keybuf_key *p, *w, s;
2465 s.key = *start;
2466
2467 if (bkey_cmp(end, &buf->start) <= 0 ||
2468 bkey_cmp(start, &buf->end) >= 0)
2469 return false;
2470
2471 spin_lock(&buf->lock);
2472 w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2473
2474 while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2475 p = w;
2476 w = RB_NEXT(w, node);
2477
2478 if (p->private)
2479 ret = true;
2480 else
2481 __bch_keybuf_del(buf, p);
2482 }
2483
2484 spin_unlock(&buf->lock);
2485 return ret;
2486}
2487
2488struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2489{
2490 struct keybuf_key *w;
2491 spin_lock(&buf->lock);
2492
2493 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2494
2495 while (w && w->private)
2496 w = RB_NEXT(w, node);
2497
2498 if (w)
2499 w->private = ERR_PTR(-EINTR);
2500
2501 spin_unlock(&buf->lock);
2502 return w;
2503}
2504
2505struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
48dad8ba
KO
2506 struct keybuf *buf,
2507 struct bkey *end,
2508 keybuf_pred_fn *pred)
cafe5635
KO
2509{
2510 struct keybuf_key *ret;
2511
2512 while (1) {
2513 ret = bch_keybuf_next(buf);
2514 if (ret)
2515 break;
2516
2517 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2518 pr_debug("scan finished");
2519 break;
2520 }
2521
72c27061 2522 bch_refill_keybuf(c, buf, end, pred);
cafe5635
KO
2523 }
2524
2525 return ret;
2526}
2527
72c27061 2528void bch_keybuf_init(struct keybuf *buf)
cafe5635 2529{
cafe5635
KO
2530 buf->last_scanned = MAX_KEY;
2531 buf->keys = RB_ROOT;
2532
2533 spin_lock_init(&buf->lock);
2534 array_allocator_init(&buf->freelist);
2535}