1 // SPDX-License-Identifier: GPL-2.0
3 * bcache setup/teardown code, and some metadata io - read a superblock and
4 * figure out what to do with it.
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
15 #include "writeback.h"
17 #include <linux/blkdev.h>
18 #include <linux/debugfs.h>
19 #include <linux/genhd.h>
20 #include <linux/idr.h>
21 #include <linux/kthread.h>
22 #include <linux/workqueue.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/reboot.h>
26 #include <linux/sysfs.h>
28 unsigned int bch_cutoff_writeback
;
29 unsigned int bch_cutoff_writeback_sync
;
31 static const char bcache_magic
[] = {
32 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
33 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
36 static const char invalid_uuid
[] = {
37 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
38 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
41 static struct kobject
*bcache_kobj
;
42 struct mutex bch_register_lock
;
43 bool bcache_is_reboot
;
44 LIST_HEAD(bch_cache_sets
);
45 static LIST_HEAD(uncached_devices
);
47 static int bcache_major
;
48 static DEFINE_IDA(bcache_device_idx
);
49 static wait_queue_head_t unregister_wait
;
50 struct workqueue_struct
*bcache_wq
;
51 struct workqueue_struct
*bch_journal_wq
;
54 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
55 /* limitation of partitions number on single bcache device */
56 #define BCACHE_MINORS 128
57 /* limitation of bcache devices number on single system */
58 #define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS)
62 static const char *read_super(struct cache_sb
*sb
, struct block_device
*bdev
,
63 struct cache_sb_disk
**res
)
66 struct cache_sb_disk
*s
;
70 page
= read_cache_page_gfp(bdev
->bd_inode
->i_mapping
,
71 SB_OFFSET
>> PAGE_SHIFT
, GFP_KERNEL
);
74 s
= page_address(page
) + offset_in_page(SB_OFFSET
);
76 sb
->offset
= le64_to_cpu(s
->offset
);
77 sb
->version
= le64_to_cpu(s
->version
);
79 memcpy(sb
->magic
, s
->magic
, 16);
80 memcpy(sb
->uuid
, s
->uuid
, 16);
81 memcpy(sb
->set_uuid
, s
->set_uuid
, 16);
82 memcpy(sb
->label
, s
->label
, SB_LABEL_SIZE
);
84 sb
->flags
= le64_to_cpu(s
->flags
);
85 sb
->seq
= le64_to_cpu(s
->seq
);
86 sb
->last_mount
= le32_to_cpu(s
->last_mount
);
87 sb
->first_bucket
= le16_to_cpu(s
->first_bucket
);
88 sb
->keys
= le16_to_cpu(s
->keys
);
90 for (i
= 0; i
< SB_JOURNAL_BUCKETS
; i
++)
91 sb
->d
[i
] = le64_to_cpu(s
->d
[i
]);
93 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u\n",
94 sb
->version
, sb
->flags
, sb
->seq
, sb
->keys
);
96 err
= "Not a bcache superblock (bad offset)";
97 if (sb
->offset
!= SB_SECTOR
)
100 err
= "Not a bcache superblock (bad magic)";
101 if (memcmp(sb
->magic
, bcache_magic
, 16))
104 err
= "Too many journal buckets";
105 if (sb
->keys
> SB_JOURNAL_BUCKETS
)
108 err
= "Bad checksum";
109 if (s
->csum
!= csum_set(s
))
113 if (bch_is_zero(sb
->uuid
, 16))
116 sb
->block_size
= le16_to_cpu(s
->block_size
);
118 err
= "Superblock block size smaller than device block size";
119 if (sb
->block_size
<< 9 < bdev_logical_block_size(bdev
))
122 switch (sb
->version
) {
123 case BCACHE_SB_VERSION_BDEV
:
124 sb
->data_offset
= BDEV_DATA_START_DEFAULT
;
126 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET
:
127 sb
->data_offset
= le64_to_cpu(s
->data_offset
);
129 err
= "Bad data offset";
130 if (sb
->data_offset
< BDEV_DATA_START_DEFAULT
)
134 case BCACHE_SB_VERSION_CDEV
:
135 case BCACHE_SB_VERSION_CDEV_WITH_UUID
:
136 sb
->nbuckets
= le64_to_cpu(s
->nbuckets
);
137 sb
->bucket_size
= le16_to_cpu(s
->bucket_size
);
139 sb
->nr_in_set
= le16_to_cpu(s
->nr_in_set
);
140 sb
->nr_this_dev
= le16_to_cpu(s
->nr_this_dev
);
142 err
= "Too many buckets";
143 if (sb
->nbuckets
> LONG_MAX
)
146 err
= "Not enough buckets";
147 if (sb
->nbuckets
< 1 << 7)
150 err
= "Bad block/bucket size";
151 if (!is_power_of_2(sb
->block_size
) ||
152 sb
->block_size
> PAGE_SECTORS
||
153 !is_power_of_2(sb
->bucket_size
) ||
154 sb
->bucket_size
< PAGE_SECTORS
)
157 err
= "Invalid superblock: device too small";
158 if (get_capacity(bdev
->bd_disk
) <
159 sb
->bucket_size
* sb
->nbuckets
)
163 if (bch_is_zero(sb
->set_uuid
, 16))
166 err
= "Bad cache device number in set";
167 if (!sb
->nr_in_set
||
168 sb
->nr_in_set
<= sb
->nr_this_dev
||
169 sb
->nr_in_set
> MAX_CACHES_PER_SET
)
172 err
= "Journal buckets not sequential";
173 for (i
= 0; i
< sb
->keys
; i
++)
174 if (sb
->d
[i
] != sb
->first_bucket
+ i
)
177 err
= "Too many journal buckets";
178 if (sb
->first_bucket
+ sb
->keys
> sb
->nbuckets
)
181 err
= "Invalid superblock: first bucket comes before end of super";
182 if (sb
->first_bucket
* sb
->bucket_size
< 16)
187 err
= "Unsupported superblock version";
191 sb
->last_mount
= (u32
)ktime_get_real_seconds();
199 static void write_bdev_super_endio(struct bio
*bio
)
201 struct cached_dev
*dc
= bio
->bi_private
;
204 bch_count_backing_io_errors(dc
, bio
);
206 closure_put(&dc
->sb_write
);
209 static void __write_super(struct cache_sb
*sb
, struct cache_sb_disk
*out
,
214 bio
->bi_opf
= REQ_OP_WRITE
| REQ_SYNC
| REQ_META
;
215 bio
->bi_iter
.bi_sector
= SB_SECTOR
;
216 __bio_add_page(bio
, virt_to_page(out
), SB_SIZE
,
217 offset_in_page(out
));
219 out
->offset
= cpu_to_le64(sb
->offset
);
220 out
->version
= cpu_to_le64(sb
->version
);
222 memcpy(out
->uuid
, sb
->uuid
, 16);
223 memcpy(out
->set_uuid
, sb
->set_uuid
, 16);
224 memcpy(out
->label
, sb
->label
, SB_LABEL_SIZE
);
226 out
->flags
= cpu_to_le64(sb
->flags
);
227 out
->seq
= cpu_to_le64(sb
->seq
);
229 out
->last_mount
= cpu_to_le32(sb
->last_mount
);
230 out
->first_bucket
= cpu_to_le16(sb
->first_bucket
);
231 out
->keys
= cpu_to_le16(sb
->keys
);
233 for (i
= 0; i
< sb
->keys
; i
++)
234 out
->d
[i
] = cpu_to_le64(sb
->d
[i
]);
236 out
->csum
= csum_set(out
);
238 pr_debug("ver %llu, flags %llu, seq %llu\n",
239 sb
->version
, sb
->flags
, sb
->seq
);
244 static void bch_write_bdev_super_unlock(struct closure
*cl
)
246 struct cached_dev
*dc
= container_of(cl
, struct cached_dev
, sb_write
);
248 up(&dc
->sb_write_mutex
);
251 void bch_write_bdev_super(struct cached_dev
*dc
, struct closure
*parent
)
253 struct closure
*cl
= &dc
->sb_write
;
254 struct bio
*bio
= &dc
->sb_bio
;
256 down(&dc
->sb_write_mutex
);
257 closure_init(cl
, parent
);
259 bio_init(bio
, dc
->sb_bv
, 1);
260 bio_set_dev(bio
, dc
->bdev
);
261 bio
->bi_end_io
= write_bdev_super_endio
;
262 bio
->bi_private
= dc
;
265 /* I/O request sent to backing device */
266 __write_super(&dc
->sb
, dc
->sb_disk
, bio
);
268 closure_return_with_destructor(cl
, bch_write_bdev_super_unlock
);
271 static void write_super_endio(struct bio
*bio
)
273 struct cache
*ca
= bio
->bi_private
;
276 bch_count_io_errors(ca
, bio
->bi_status
, 0,
277 "writing superblock");
278 closure_put(&ca
->set
->sb_write
);
281 static void bcache_write_super_unlock(struct closure
*cl
)
283 struct cache_set
*c
= container_of(cl
, struct cache_set
, sb_write
);
285 up(&c
->sb_write_mutex
);
288 void bcache_write_super(struct cache_set
*c
)
290 struct closure
*cl
= &c
->sb_write
;
294 down(&c
->sb_write_mutex
);
295 closure_init(cl
, &c
->cl
);
299 for_each_cache(ca
, c
, i
) {
300 struct bio
*bio
= &ca
->sb_bio
;
302 ca
->sb
.version
= BCACHE_SB_VERSION_CDEV_WITH_UUID
;
303 ca
->sb
.seq
= c
->sb
.seq
;
304 ca
->sb
.last_mount
= c
->sb
.last_mount
;
306 SET_CACHE_SYNC(&ca
->sb
, CACHE_SYNC(&c
->sb
));
308 bio_init(bio
, ca
->sb_bv
, 1);
309 bio_set_dev(bio
, ca
->bdev
);
310 bio
->bi_end_io
= write_super_endio
;
311 bio
->bi_private
= ca
;
314 __write_super(&ca
->sb
, ca
->sb_disk
, bio
);
317 closure_return_with_destructor(cl
, bcache_write_super_unlock
);
322 static void uuid_endio(struct bio
*bio
)
324 struct closure
*cl
= bio
->bi_private
;
325 struct cache_set
*c
= container_of(cl
, struct cache_set
, uuid_write
);
327 cache_set_err_on(bio
->bi_status
, c
, "accessing uuids");
328 bch_bbio_free(bio
, c
);
332 static void uuid_io_unlock(struct closure
*cl
)
334 struct cache_set
*c
= container_of(cl
, struct cache_set
, uuid_write
);
336 up(&c
->uuid_write_mutex
);
339 static void uuid_io(struct cache_set
*c
, int op
, unsigned long op_flags
,
340 struct bkey
*k
, struct closure
*parent
)
342 struct closure
*cl
= &c
->uuid_write
;
343 struct uuid_entry
*u
;
348 down(&c
->uuid_write_mutex
);
349 closure_init(cl
, parent
);
351 for (i
= 0; i
< KEY_PTRS(k
); i
++) {
352 struct bio
*bio
= bch_bbio_alloc(c
);
354 bio
->bi_opf
= REQ_SYNC
| REQ_META
| op_flags
;
355 bio
->bi_iter
.bi_size
= KEY_SIZE(k
) << 9;
357 bio
->bi_end_io
= uuid_endio
;
358 bio
->bi_private
= cl
;
359 bio_set_op_attrs(bio
, op
, REQ_SYNC
|REQ_META
|op_flags
);
360 bch_bio_map(bio
, c
->uuids
);
362 bch_submit_bbio(bio
, c
, k
, i
);
364 if (op
!= REQ_OP_WRITE
)
368 bch_extent_to_text(buf
, sizeof(buf
), k
);
369 pr_debug("%s UUIDs at %s\n", op
== REQ_OP_WRITE
? "wrote" : "read", buf
);
371 for (u
= c
->uuids
; u
< c
->uuids
+ c
->nr_uuids
; u
++)
372 if (!bch_is_zero(u
->uuid
, 16))
373 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u\n",
374 u
- c
->uuids
, u
->uuid
, u
->label
,
375 u
->first_reg
, u
->last_reg
, u
->invalidated
);
377 closure_return_with_destructor(cl
, uuid_io_unlock
);
380 static char *uuid_read(struct cache_set
*c
, struct jset
*j
, struct closure
*cl
)
382 struct bkey
*k
= &j
->uuid_bucket
;
384 if (__bch_btree_ptr_invalid(c
, k
))
385 return "bad uuid pointer";
387 bkey_copy(&c
->uuid_bucket
, k
);
388 uuid_io(c
, REQ_OP_READ
, 0, k
, cl
);
390 if (j
->version
< BCACHE_JSET_VERSION_UUIDv1
) {
391 struct uuid_entry_v0
*u0
= (void *) c
->uuids
;
392 struct uuid_entry
*u1
= (void *) c
->uuids
;
398 * Since the new uuid entry is bigger than the old, we have to
399 * convert starting at the highest memory address and work down
400 * in order to do it in place
403 for (i
= c
->nr_uuids
- 1;
406 memcpy(u1
[i
].uuid
, u0
[i
].uuid
, 16);
407 memcpy(u1
[i
].label
, u0
[i
].label
, 32);
409 u1
[i
].first_reg
= u0
[i
].first_reg
;
410 u1
[i
].last_reg
= u0
[i
].last_reg
;
411 u1
[i
].invalidated
= u0
[i
].invalidated
;
421 static int __uuid_write(struct cache_set
*c
)
427 closure_init_stack(&cl
);
428 lockdep_assert_held(&bch_register_lock
);
430 if (bch_bucket_alloc_set(c
, RESERVE_BTREE
, &k
.key
, 1, true))
433 SET_KEY_SIZE(&k
.key
, c
->sb
.bucket_size
);
434 uuid_io(c
, REQ_OP_WRITE
, 0, &k
.key
, &cl
);
437 /* Only one bucket used for uuid write */
438 ca
= PTR_CACHE(c
, &k
.key
, 0);
439 atomic_long_add(ca
->sb
.bucket_size
, &ca
->meta_sectors_written
);
441 bkey_copy(&c
->uuid_bucket
, &k
.key
);
446 int bch_uuid_write(struct cache_set
*c
)
448 int ret
= __uuid_write(c
);
451 bch_journal_meta(c
, NULL
);
456 static struct uuid_entry
*uuid_find(struct cache_set
*c
, const char *uuid
)
458 struct uuid_entry
*u
;
461 u
< c
->uuids
+ c
->nr_uuids
; u
++)
462 if (!memcmp(u
->uuid
, uuid
, 16))
468 static struct uuid_entry
*uuid_find_empty(struct cache_set
*c
)
470 static const char zero_uuid
[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
472 return uuid_find(c
, zero_uuid
);
476 * Bucket priorities/gens:
478 * For each bucket, we store on disk its
482 * See alloc.c for an explanation of the gen. The priority is used to implement
483 * lru (and in the future other) cache replacement policies; for most purposes
484 * it's just an opaque integer.
486 * The gens and the priorities don't have a whole lot to do with each other, and
487 * it's actually the gens that must be written out at specific times - it's no
488 * big deal if the priorities don't get written, if we lose them we just reuse
489 * buckets in suboptimal order.
491 * On disk they're stored in a packed array, and in as many buckets are required
492 * to fit them all. The buckets we use to store them form a list; the journal
493 * header points to the first bucket, the first bucket points to the second
496 * This code is used by the allocation code; periodically (whenever it runs out
497 * of buckets to allocate from) the allocation code will invalidate some
498 * buckets, but it can't use those buckets until their new gens are safely on
502 static void prio_endio(struct bio
*bio
)
504 struct cache
*ca
= bio
->bi_private
;
506 cache_set_err_on(bio
->bi_status
, ca
->set
, "accessing priorities");
507 bch_bbio_free(bio
, ca
->set
);
508 closure_put(&ca
->prio
);
511 static void prio_io(struct cache
*ca
, uint64_t bucket
, int op
,
512 unsigned long op_flags
)
514 struct closure
*cl
= &ca
->prio
;
515 struct bio
*bio
= bch_bbio_alloc(ca
->set
);
517 closure_init_stack(cl
);
519 bio
->bi_iter
.bi_sector
= bucket
* ca
->sb
.bucket_size
;
520 bio_set_dev(bio
, ca
->bdev
);
521 bio
->bi_iter
.bi_size
= bucket_bytes(ca
);
523 bio
->bi_end_io
= prio_endio
;
524 bio
->bi_private
= ca
;
525 bio_set_op_attrs(bio
, op
, REQ_SYNC
|REQ_META
|op_flags
);
526 bch_bio_map(bio
, ca
->disk_buckets
);
528 closure_bio_submit(ca
->set
, bio
, &ca
->prio
);
532 int bch_prio_write(struct cache
*ca
, bool wait
)
538 pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu\n",
539 fifo_used(&ca
->free
[RESERVE_PRIO
]),
540 fifo_used(&ca
->free
[RESERVE_NONE
]),
541 fifo_used(&ca
->free_inc
));
544 * Pre-check if there are enough free buckets. In the non-blocking
545 * scenario it's better to fail early rather than starting to allocate
546 * buckets and do a cleanup later in case of failure.
549 size_t avail
= fifo_used(&ca
->free
[RESERVE_PRIO
]) +
550 fifo_used(&ca
->free
[RESERVE_NONE
]);
551 if (prio_buckets(ca
) > avail
)
555 closure_init_stack(&cl
);
557 lockdep_assert_held(&ca
->set
->bucket_lock
);
559 ca
->disk_buckets
->seq
++;
561 atomic_long_add(ca
->sb
.bucket_size
* prio_buckets(ca
),
562 &ca
->meta_sectors_written
);
564 for (i
= prio_buckets(ca
) - 1; i
>= 0; --i
) {
566 struct prio_set
*p
= ca
->disk_buckets
;
567 struct bucket_disk
*d
= p
->data
;
568 struct bucket_disk
*end
= d
+ prios_per_bucket(ca
);
570 for (b
= ca
->buckets
+ i
* prios_per_bucket(ca
);
571 b
< ca
->buckets
+ ca
->sb
.nbuckets
&& d
< end
;
573 d
->prio
= cpu_to_le16(b
->prio
);
577 p
->next_bucket
= ca
->prio_buckets
[i
+ 1];
578 p
->magic
= pset_magic(&ca
->sb
);
579 p
->csum
= bch_crc64(&p
->magic
, bucket_bytes(ca
) - 8);
581 bucket
= bch_bucket_alloc(ca
, RESERVE_PRIO
, wait
);
582 BUG_ON(bucket
== -1);
584 mutex_unlock(&ca
->set
->bucket_lock
);
585 prio_io(ca
, bucket
, REQ_OP_WRITE
, 0);
586 mutex_lock(&ca
->set
->bucket_lock
);
588 ca
->prio_buckets
[i
] = bucket
;
589 atomic_dec_bug(&ca
->buckets
[bucket
].pin
);
592 mutex_unlock(&ca
->set
->bucket_lock
);
594 bch_journal_meta(ca
->set
, &cl
);
597 mutex_lock(&ca
->set
->bucket_lock
);
600 * Don't want the old priorities to get garbage collected until after we
601 * finish writing the new ones, and they're journalled
603 for (i
= 0; i
< prio_buckets(ca
); i
++) {
604 if (ca
->prio_last_buckets
[i
])
605 __bch_bucket_free(ca
,
606 &ca
->buckets
[ca
->prio_last_buckets
[i
]]);
608 ca
->prio_last_buckets
[i
] = ca
->prio_buckets
[i
];
613 static int prio_read(struct cache
*ca
, uint64_t bucket
)
615 struct prio_set
*p
= ca
->disk_buckets
;
616 struct bucket_disk
*d
= p
->data
+ prios_per_bucket(ca
), *end
= d
;
618 unsigned int bucket_nr
= 0;
621 for (b
= ca
->buckets
;
622 b
< ca
->buckets
+ ca
->sb
.nbuckets
;
625 ca
->prio_buckets
[bucket_nr
] = bucket
;
626 ca
->prio_last_buckets
[bucket_nr
] = bucket
;
629 prio_io(ca
, bucket
, REQ_OP_READ
, 0);
632 bch_crc64(&p
->magic
, bucket_bytes(ca
) - 8)) {
633 pr_warn("bad csum reading priorities\n");
637 if (p
->magic
!= pset_magic(&ca
->sb
)) {
638 pr_warn("bad magic reading priorities\n");
642 bucket
= p
->next_bucket
;
646 b
->prio
= le16_to_cpu(d
->prio
);
647 b
->gen
= b
->last_gc
= d
->gen
;
657 static int open_dev(struct block_device
*b
, fmode_t mode
)
659 struct bcache_device
*d
= b
->bd_disk
->private_data
;
661 if (test_bit(BCACHE_DEV_CLOSING
, &d
->flags
))
668 static void release_dev(struct gendisk
*b
, fmode_t mode
)
670 struct bcache_device
*d
= b
->private_data
;
675 static int ioctl_dev(struct block_device
*b
, fmode_t mode
,
676 unsigned int cmd
, unsigned long arg
)
678 struct bcache_device
*d
= b
->bd_disk
->private_data
;
680 return d
->ioctl(d
, mode
, cmd
, arg
);
683 static const struct block_device_operations bcache_cached_ops
= {
684 .submit_bio
= cached_dev_submit_bio
,
686 .release
= release_dev
,
688 .owner
= THIS_MODULE
,
691 static const struct block_device_operations bcache_flash_ops
= {
692 .submit_bio
= flash_dev_submit_bio
,
694 .release
= release_dev
,
696 .owner
= THIS_MODULE
,
699 void bcache_device_stop(struct bcache_device
*d
)
701 if (!test_and_set_bit(BCACHE_DEV_CLOSING
, &d
->flags
))
704 * - cached device: cached_dev_flush()
705 * - flash dev: flash_dev_flush()
707 closure_queue(&d
->cl
);
710 static void bcache_device_unlink(struct bcache_device
*d
)
712 lockdep_assert_held(&bch_register_lock
);
714 if (d
->c
&& !test_and_set_bit(BCACHE_DEV_UNLINK_DONE
, &d
->flags
)) {
718 sysfs_remove_link(&d
->c
->kobj
, d
->name
);
719 sysfs_remove_link(&d
->kobj
, "cache");
721 for_each_cache(ca
, d
->c
, i
)
722 bd_unlink_disk_holder(ca
->bdev
, d
->disk
);
726 static void bcache_device_link(struct bcache_device
*d
, struct cache_set
*c
,
733 for_each_cache(ca
, d
->c
, i
)
734 bd_link_disk_holder(ca
->bdev
, d
->disk
);
736 snprintf(d
->name
, BCACHEDEVNAME_SIZE
,
737 "%s%u", name
, d
->id
);
739 ret
= sysfs_create_link(&d
->kobj
, &c
->kobj
, "cache");
741 pr_err("Couldn't create device -> cache set symlink\n");
743 ret
= sysfs_create_link(&c
->kobj
, &d
->kobj
, d
->name
);
745 pr_err("Couldn't create cache set -> device symlink\n");
747 clear_bit(BCACHE_DEV_UNLINK_DONE
, &d
->flags
);
750 static void bcache_device_detach(struct bcache_device
*d
)
752 lockdep_assert_held(&bch_register_lock
);
754 atomic_dec(&d
->c
->attached_dev_nr
);
756 if (test_bit(BCACHE_DEV_DETACHING
, &d
->flags
)) {
757 struct uuid_entry
*u
= d
->c
->uuids
+ d
->id
;
759 SET_UUID_FLASH_ONLY(u
, 0);
760 memcpy(u
->uuid
, invalid_uuid
, 16);
761 u
->invalidated
= cpu_to_le32((u32
)ktime_get_real_seconds());
762 bch_uuid_write(d
->c
);
765 bcache_device_unlink(d
);
767 d
->c
->devices
[d
->id
] = NULL
;
768 closure_put(&d
->c
->caching
);
772 static void bcache_device_attach(struct bcache_device
*d
, struct cache_set
*c
,
779 if (id
>= c
->devices_max_used
)
780 c
->devices_max_used
= id
+ 1;
782 closure_get(&c
->caching
);
785 static inline int first_minor_to_idx(int first_minor
)
787 return (first_minor
/BCACHE_MINORS
);
790 static inline int idx_to_first_minor(int idx
)
792 return (idx
* BCACHE_MINORS
);
795 static void bcache_device_free(struct bcache_device
*d
)
797 struct gendisk
*disk
= d
->disk
;
799 lockdep_assert_held(&bch_register_lock
);
802 pr_info("%s stopped\n", disk
->disk_name
);
804 pr_err("bcache device (NULL gendisk) stopped\n");
807 bcache_device_detach(d
);
810 bool disk_added
= (disk
->flags
& GENHD_FL_UP
) != 0;
816 blk_cleanup_queue(disk
->queue
);
818 ida_simple_remove(&bcache_device_idx
,
819 first_minor_to_idx(disk
->first_minor
));
824 bioset_exit(&d
->bio_split
);
825 kvfree(d
->full_dirty_stripes
);
826 kvfree(d
->stripe_sectors_dirty
);
828 closure_debug_destroy(&d
->cl
);
831 static int bcache_device_init(struct bcache_device
*d
, unsigned int block_size
,
832 sector_t sectors
, struct block_device
*cached_bdev
,
833 const struct block_device_operations
*ops
)
835 struct request_queue
*q
;
836 const size_t max_stripes
= min_t(size_t, INT_MAX
,
837 SIZE_MAX
/ sizeof(atomic_t
));
842 d
->stripe_size
= 1 << 31;
844 d
->nr_stripes
= DIV_ROUND_UP_ULL(sectors
, d
->stripe_size
);
846 if (!d
->nr_stripes
|| d
->nr_stripes
> max_stripes
) {
847 pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)\n",
848 (unsigned int)d
->nr_stripes
);
852 n
= d
->nr_stripes
* sizeof(atomic_t
);
853 d
->stripe_sectors_dirty
= kvzalloc(n
, GFP_KERNEL
);
854 if (!d
->stripe_sectors_dirty
)
857 n
= BITS_TO_LONGS(d
->nr_stripes
) * sizeof(unsigned long);
858 d
->full_dirty_stripes
= kvzalloc(n
, GFP_KERNEL
);
859 if (!d
->full_dirty_stripes
)
862 idx
= ida_simple_get(&bcache_device_idx
, 0,
863 BCACHE_DEVICE_IDX_MAX
, GFP_KERNEL
);
867 if (bioset_init(&d
->bio_split
, 4, offsetof(struct bbio
, bio
),
868 BIOSET_NEED_BVECS
|BIOSET_NEED_RESCUER
))
871 d
->disk
= alloc_disk(BCACHE_MINORS
);
875 set_capacity(d
->disk
, sectors
);
876 snprintf(d
->disk
->disk_name
, DISK_NAME_LEN
, "bcache%i", idx
);
878 d
->disk
->major
= bcache_major
;
879 d
->disk
->first_minor
= idx_to_first_minor(idx
);
881 d
->disk
->private_data
= d
;
883 q
= blk_alloc_queue(NUMA_NO_NODE
);
888 q
->backing_dev_info
->congested_data
= d
;
889 q
->limits
.max_hw_sectors
= UINT_MAX
;
890 q
->limits
.max_sectors
= UINT_MAX
;
891 q
->limits
.max_segment_size
= UINT_MAX
;
892 q
->limits
.max_segments
= BIO_MAX_PAGES
;
893 blk_queue_max_discard_sectors(q
, UINT_MAX
);
894 q
->limits
.discard_granularity
= 512;
895 q
->limits
.io_min
= block_size
;
896 q
->limits
.logical_block_size
= block_size
;
897 q
->limits
.physical_block_size
= block_size
;
899 if (q
->limits
.logical_block_size
> PAGE_SIZE
&& cached_bdev
) {
901 * This should only happen with BCACHE_SB_VERSION_BDEV.
902 * Block/page size is checked for BCACHE_SB_VERSION_CDEV.
904 pr_info("%s: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n",
905 d
->disk
->disk_name
, q
->limits
.logical_block_size
,
906 PAGE_SIZE
, bdev_logical_block_size(cached_bdev
));
908 /* This also adjusts physical block size/min io size if needed */
909 blk_queue_logical_block_size(q
, bdev_logical_block_size(cached_bdev
));
912 blk_queue_flag_set(QUEUE_FLAG_NONROT
, d
->disk
->queue
);
913 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM
, d
->disk
->queue
);
914 blk_queue_flag_set(QUEUE_FLAG_DISCARD
, d
->disk
->queue
);
916 blk_queue_write_cache(q
, true, true);
921 ida_simple_remove(&bcache_device_idx
, idx
);
928 static void calc_cached_dev_sectors(struct cache_set
*c
)
930 uint64_t sectors
= 0;
931 struct cached_dev
*dc
;
933 list_for_each_entry(dc
, &c
->cached_devs
, list
)
934 sectors
+= bdev_sectors(dc
->bdev
);
936 c
->cached_dev_sectors
= sectors
;
939 #define BACKING_DEV_OFFLINE_TIMEOUT 5
940 static int cached_dev_status_update(void *arg
)
942 struct cached_dev
*dc
= arg
;
943 struct request_queue
*q
;
946 * If this delayed worker is stopping outside, directly quit here.
947 * dc->io_disable might be set via sysfs interface, so check it
950 while (!kthread_should_stop() && !dc
->io_disable
) {
951 q
= bdev_get_queue(dc
->bdev
);
952 if (blk_queue_dying(q
))
953 dc
->offline_seconds
++;
955 dc
->offline_seconds
= 0;
957 if (dc
->offline_seconds
>= BACKING_DEV_OFFLINE_TIMEOUT
) {
958 pr_err("%s: device offline for %d seconds\n",
959 dc
->backing_dev_name
,
960 BACKING_DEV_OFFLINE_TIMEOUT
);
961 pr_err("%s: disable I/O request due to backing device offline\n",
963 dc
->io_disable
= true;
964 /* let others know earlier that io_disable is true */
966 bcache_device_stop(&dc
->disk
);
969 schedule_timeout_interruptible(HZ
);
972 wait_for_kthread_stop();
977 int bch_cached_dev_run(struct cached_dev
*dc
)
979 struct bcache_device
*d
= &dc
->disk
;
980 char *buf
= kmemdup_nul(dc
->sb
.label
, SB_LABEL_SIZE
, GFP_KERNEL
);
983 kasprintf(GFP_KERNEL
, "CACHED_UUID=%pU", dc
->sb
.uuid
),
984 kasprintf(GFP_KERNEL
, "CACHED_LABEL=%s", buf
? : ""),
988 if (dc
->io_disable
) {
989 pr_err("I/O disabled on cached dev %s\n",
990 dc
->backing_dev_name
);
997 if (atomic_xchg(&dc
->running
, 1)) {
1001 pr_info("cached dev %s is running already\n",
1002 dc
->backing_dev_name
);
1007 BDEV_STATE(&dc
->sb
) != BDEV_STATE_NONE
) {
1010 closure_init_stack(&cl
);
1012 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_STALE
);
1013 bch_write_bdev_super(dc
, &cl
);
1018 bd_link_disk_holder(dc
->bdev
, dc
->disk
.disk
);
1020 * won't show up in the uevent file, use udevadm monitor -e instead
1021 * only class / kset properties are persistent
1023 kobject_uevent_env(&disk_to_dev(d
->disk
)->kobj
, KOBJ_CHANGE
, env
);
1028 if (sysfs_create_link(&d
->kobj
, &disk_to_dev(d
->disk
)->kobj
, "dev") ||
1029 sysfs_create_link(&disk_to_dev(d
->disk
)->kobj
,
1030 &d
->kobj
, "bcache")) {
1031 pr_err("Couldn't create bcache dev <-> disk sysfs symlinks\n");
1035 dc
->status_update_thread
= kthread_run(cached_dev_status_update
,
1036 dc
, "bcache_status_update");
1037 if (IS_ERR(dc
->status_update_thread
)) {
1038 pr_warn("failed to create bcache_status_update kthread, continue to run without monitoring backing device status\n");
1045 * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed
1046 * work dc->writeback_rate_update is running. Wait until the routine
1047 * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to
1048 * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out
1049 * seconds, give up waiting here and continue to cancel it too.
1051 static void cancel_writeback_rate_update_dwork(struct cached_dev
*dc
)
1053 int time_out
= WRITEBACK_RATE_UPDATE_SECS_MAX
* HZ
;
1056 if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING
,
1060 schedule_timeout_interruptible(1);
1061 } while (time_out
> 0);
1064 pr_warn("give up waiting for dc->writeback_write_update to quit\n");
1066 cancel_delayed_work_sync(&dc
->writeback_rate_update
);
1069 static void cached_dev_detach_finish(struct work_struct
*w
)
1071 struct cached_dev
*dc
= container_of(w
, struct cached_dev
, detach
);
1074 closure_init_stack(&cl
);
1076 BUG_ON(!test_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
));
1077 BUG_ON(refcount_read(&dc
->count
));
1080 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING
, &dc
->disk
.flags
))
1081 cancel_writeback_rate_update_dwork(dc
);
1083 if (!IS_ERR_OR_NULL(dc
->writeback_thread
)) {
1084 kthread_stop(dc
->writeback_thread
);
1085 dc
->writeback_thread
= NULL
;
1088 memset(&dc
->sb
.set_uuid
, 0, 16);
1089 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_NONE
);
1091 bch_write_bdev_super(dc
, &cl
);
1094 mutex_lock(&bch_register_lock
);
1096 calc_cached_dev_sectors(dc
->disk
.c
);
1097 bcache_device_detach(&dc
->disk
);
1098 list_move(&dc
->list
, &uncached_devices
);
1100 clear_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
);
1101 clear_bit(BCACHE_DEV_UNLINK_DONE
, &dc
->disk
.flags
);
1103 mutex_unlock(&bch_register_lock
);
1105 pr_info("Caching disabled for %s\n", dc
->backing_dev_name
);
1107 /* Drop ref we took in cached_dev_detach() */
1108 closure_put(&dc
->disk
.cl
);
1111 void bch_cached_dev_detach(struct cached_dev
*dc
)
1113 lockdep_assert_held(&bch_register_lock
);
1115 if (test_bit(BCACHE_DEV_CLOSING
, &dc
->disk
.flags
))
1118 if (test_and_set_bit(BCACHE_DEV_DETACHING
, &dc
->disk
.flags
))
1122 * Block the device from being closed and freed until we're finished
1125 closure_get(&dc
->disk
.cl
);
1127 bch_writeback_queue(dc
);
1132 int bch_cached_dev_attach(struct cached_dev
*dc
, struct cache_set
*c
,
1135 uint32_t rtime
= cpu_to_le32((u32
)ktime_get_real_seconds());
1136 struct uuid_entry
*u
;
1137 struct cached_dev
*exist_dc
, *t
;
1140 if ((set_uuid
&& memcmp(set_uuid
, c
->sb
.set_uuid
, 16)) ||
1141 (!set_uuid
&& memcmp(dc
->sb
.set_uuid
, c
->sb
.set_uuid
, 16)))
1145 pr_err("Can't attach %s: already attached\n",
1146 dc
->backing_dev_name
);
1150 if (test_bit(CACHE_SET_STOPPING
, &c
->flags
)) {
1151 pr_err("Can't attach %s: shutting down\n",
1152 dc
->backing_dev_name
);
1156 if (dc
->sb
.block_size
< c
->sb
.block_size
) {
1158 pr_err("Couldn't attach %s: block size less than set's block size\n",
1159 dc
->backing_dev_name
);
1163 /* Check whether already attached */
1164 list_for_each_entry_safe(exist_dc
, t
, &c
->cached_devs
, list
) {
1165 if (!memcmp(dc
->sb
.uuid
, exist_dc
->sb
.uuid
, 16)) {
1166 pr_err("Tried to attach %s but duplicate UUID already attached\n",
1167 dc
->backing_dev_name
);
1173 u
= uuid_find(c
, dc
->sb
.uuid
);
1176 (BDEV_STATE(&dc
->sb
) == BDEV_STATE_STALE
||
1177 BDEV_STATE(&dc
->sb
) == BDEV_STATE_NONE
)) {
1178 memcpy(u
->uuid
, invalid_uuid
, 16);
1179 u
->invalidated
= cpu_to_le32((u32
)ktime_get_real_seconds());
1184 if (BDEV_STATE(&dc
->sb
) == BDEV_STATE_DIRTY
) {
1185 pr_err("Couldn't find uuid for %s in set\n",
1186 dc
->backing_dev_name
);
1190 u
= uuid_find_empty(c
);
1192 pr_err("Not caching %s, no room for UUID\n",
1193 dc
->backing_dev_name
);
1199 * Deadlocks since we're called via sysfs...
1200 * sysfs_remove_file(&dc->kobj, &sysfs_attach);
1203 if (bch_is_zero(u
->uuid
, 16)) {
1206 closure_init_stack(&cl
);
1208 memcpy(u
->uuid
, dc
->sb
.uuid
, 16);
1209 memcpy(u
->label
, dc
->sb
.label
, SB_LABEL_SIZE
);
1210 u
->first_reg
= u
->last_reg
= rtime
;
1213 memcpy(dc
->sb
.set_uuid
, c
->sb
.set_uuid
, 16);
1214 SET_BDEV_STATE(&dc
->sb
, BDEV_STATE_CLEAN
);
1216 bch_write_bdev_super(dc
, &cl
);
1219 u
->last_reg
= rtime
;
1223 bcache_device_attach(&dc
->disk
, c
, u
- c
->uuids
);
1224 list_move(&dc
->list
, &c
->cached_devs
);
1225 calc_cached_dev_sectors(c
);
1228 * dc->c must be set before dc->count != 0 - paired with the mb in
1232 refcount_set(&dc
->count
, 1);
1234 /* Block writeback thread, but spawn it */
1235 down_write(&dc
->writeback_lock
);
1236 if (bch_cached_dev_writeback_start(dc
)) {
1237 up_write(&dc
->writeback_lock
);
1238 pr_err("Couldn't start writeback facilities for %s\n",
1239 dc
->disk
.disk
->disk_name
);
1243 if (BDEV_STATE(&dc
->sb
) == BDEV_STATE_DIRTY
) {
1244 atomic_set(&dc
->has_dirty
, 1);
1245 bch_writeback_queue(dc
);
1248 bch_sectors_dirty_init(&dc
->disk
);
1250 ret
= bch_cached_dev_run(dc
);
1251 if (ret
&& (ret
!= -EBUSY
)) {
1252 up_write(&dc
->writeback_lock
);
1254 * bch_register_lock is held, bcache_device_stop() is not
1255 * able to be directly called. The kthread and kworker
1256 * created previously in bch_cached_dev_writeback_start()
1257 * have to be stopped manually here.
1259 kthread_stop(dc
->writeback_thread
);
1260 cancel_writeback_rate_update_dwork(dc
);
1261 pr_err("Couldn't run cached device %s\n",
1262 dc
->backing_dev_name
);
1266 bcache_device_link(&dc
->disk
, c
, "bdev");
1267 atomic_inc(&c
->attached_dev_nr
);
1269 /* Allow the writeback thread to proceed */
1270 up_write(&dc
->writeback_lock
);
1272 pr_info("Caching %s as %s on set %pU\n",
1273 dc
->backing_dev_name
,
1274 dc
->disk
.disk
->disk_name
,
1275 dc
->disk
.c
->sb
.set_uuid
);
1279 /* when dc->disk.kobj released */
1280 void bch_cached_dev_release(struct kobject
*kobj
)
1282 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
1285 module_put(THIS_MODULE
);
1288 static void cached_dev_free(struct closure
*cl
)
1290 struct cached_dev
*dc
= container_of(cl
, struct cached_dev
, disk
.cl
);
1292 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING
, &dc
->disk
.flags
))
1293 cancel_writeback_rate_update_dwork(dc
);
1295 if (!IS_ERR_OR_NULL(dc
->writeback_thread
))
1296 kthread_stop(dc
->writeback_thread
);
1297 if (!IS_ERR_OR_NULL(dc
->status_update_thread
))
1298 kthread_stop(dc
->status_update_thread
);
1300 mutex_lock(&bch_register_lock
);
1302 if (atomic_read(&dc
->running
))
1303 bd_unlink_disk_holder(dc
->bdev
, dc
->disk
.disk
);
1304 bcache_device_free(&dc
->disk
);
1305 list_del(&dc
->list
);
1307 mutex_unlock(&bch_register_lock
);
1310 put_page(virt_to_page(dc
->sb_disk
));
1312 if (!IS_ERR_OR_NULL(dc
->bdev
))
1313 blkdev_put(dc
->bdev
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
);
1315 wake_up(&unregister_wait
);
1317 kobject_put(&dc
->disk
.kobj
);
1320 static void cached_dev_flush(struct closure
*cl
)
1322 struct cached_dev
*dc
= container_of(cl
, struct cached_dev
, disk
.cl
);
1323 struct bcache_device
*d
= &dc
->disk
;
1325 mutex_lock(&bch_register_lock
);
1326 bcache_device_unlink(d
);
1327 mutex_unlock(&bch_register_lock
);
1329 bch_cache_accounting_destroy(&dc
->accounting
);
1330 kobject_del(&d
->kobj
);
1332 continue_at(cl
, cached_dev_free
, system_wq
);
1335 static int cached_dev_init(struct cached_dev
*dc
, unsigned int block_size
)
1339 struct request_queue
*q
= bdev_get_queue(dc
->bdev
);
1341 __module_get(THIS_MODULE
);
1342 INIT_LIST_HEAD(&dc
->list
);
1343 closure_init(&dc
->disk
.cl
, NULL
);
1344 set_closure_fn(&dc
->disk
.cl
, cached_dev_flush
, system_wq
);
1345 kobject_init(&dc
->disk
.kobj
, &bch_cached_dev_ktype
);
1346 INIT_WORK(&dc
->detach
, cached_dev_detach_finish
);
1347 sema_init(&dc
->sb_write_mutex
, 1);
1348 INIT_LIST_HEAD(&dc
->io_lru
);
1349 spin_lock_init(&dc
->io_lock
);
1350 bch_cache_accounting_init(&dc
->accounting
, &dc
->disk
.cl
);
1352 dc
->sequential_cutoff
= 4 << 20;
1354 for (io
= dc
->io
; io
< dc
->io
+ RECENT_IO
; io
++) {
1355 list_add(&io
->lru
, &dc
->io_lru
);
1356 hlist_add_head(&io
->hash
, dc
->io_hash
+ RECENT_IO
);
1359 dc
->disk
.stripe_size
= q
->limits
.io_opt
>> 9;
1361 if (dc
->disk
.stripe_size
)
1362 dc
->partial_stripes_expensive
=
1363 q
->limits
.raid_partial_stripes_expensive
;
1365 ret
= bcache_device_init(&dc
->disk
, block_size
,
1366 dc
->bdev
->bd_part
->nr_sects
- dc
->sb
.data_offset
,
1367 dc
->bdev
, &bcache_cached_ops
);
1371 dc
->disk
.disk
->queue
->backing_dev_info
->ra_pages
=
1372 max(dc
->disk
.disk
->queue
->backing_dev_info
->ra_pages
,
1373 q
->backing_dev_info
->ra_pages
);
1375 atomic_set(&dc
->io_errors
, 0);
1376 dc
->io_disable
= false;
1377 dc
->error_limit
= DEFAULT_CACHED_DEV_ERROR_LIMIT
;
1378 /* default to auto */
1379 dc
->stop_when_cache_set_failed
= BCH_CACHED_DEV_STOP_AUTO
;
1381 bch_cached_dev_request_init(dc
);
1382 bch_cached_dev_writeback_init(dc
);
1386 /* Cached device - bcache superblock */
1388 static int register_bdev(struct cache_sb
*sb
, struct cache_sb_disk
*sb_disk
,
1389 struct block_device
*bdev
,
1390 struct cached_dev
*dc
)
1392 const char *err
= "cannot allocate memory";
1393 struct cache_set
*c
;
1396 bdevname(bdev
, dc
->backing_dev_name
);
1397 memcpy(&dc
->sb
, sb
, sizeof(struct cache_sb
));
1399 dc
->bdev
->bd_holder
= dc
;
1400 dc
->sb_disk
= sb_disk
;
1402 if (cached_dev_init(dc
, sb
->block_size
<< 9))
1405 err
= "error creating kobject";
1406 if (kobject_add(&dc
->disk
.kobj
, &part_to_dev(bdev
->bd_part
)->kobj
,
1409 if (bch_cache_accounting_add_kobjs(&dc
->accounting
, &dc
->disk
.kobj
))
1412 pr_info("registered backing device %s\n", dc
->backing_dev_name
);
1414 list_add(&dc
->list
, &uncached_devices
);
1415 /* attach to a matched cache set if it exists */
1416 list_for_each_entry(c
, &bch_cache_sets
, list
)
1417 bch_cached_dev_attach(dc
, c
, NULL
);
1419 if (BDEV_STATE(&dc
->sb
) == BDEV_STATE_NONE
||
1420 BDEV_STATE(&dc
->sb
) == BDEV_STATE_STALE
) {
1421 err
= "failed to run cached device";
1422 ret
= bch_cached_dev_run(dc
);
1429 pr_notice("error %s: %s\n", dc
->backing_dev_name
, err
);
1430 bcache_device_stop(&dc
->disk
);
1434 /* Flash only volumes */
1436 /* When d->kobj released */
1437 void bch_flash_dev_release(struct kobject
*kobj
)
1439 struct bcache_device
*d
= container_of(kobj
, struct bcache_device
,
1444 static void flash_dev_free(struct closure
*cl
)
1446 struct bcache_device
*d
= container_of(cl
, struct bcache_device
, cl
);
1448 mutex_lock(&bch_register_lock
);
1449 atomic_long_sub(bcache_dev_sectors_dirty(d
),
1450 &d
->c
->flash_dev_dirty_sectors
);
1451 bcache_device_free(d
);
1452 mutex_unlock(&bch_register_lock
);
1453 kobject_put(&d
->kobj
);
1456 static void flash_dev_flush(struct closure
*cl
)
1458 struct bcache_device
*d
= container_of(cl
, struct bcache_device
, cl
);
1460 mutex_lock(&bch_register_lock
);
1461 bcache_device_unlink(d
);
1462 mutex_unlock(&bch_register_lock
);
1463 kobject_del(&d
->kobj
);
1464 continue_at(cl
, flash_dev_free
, system_wq
);
1467 static int flash_dev_run(struct cache_set
*c
, struct uuid_entry
*u
)
1469 struct bcache_device
*d
= kzalloc(sizeof(struct bcache_device
),
1474 closure_init(&d
->cl
, NULL
);
1475 set_closure_fn(&d
->cl
, flash_dev_flush
, system_wq
);
1477 kobject_init(&d
->kobj
, &bch_flash_dev_ktype
);
1479 if (bcache_device_init(d
, block_bytes(c
), u
->sectors
,
1480 NULL
, &bcache_flash_ops
))
1483 bcache_device_attach(d
, c
, u
- c
->uuids
);
1484 bch_sectors_dirty_init(d
);
1485 bch_flash_dev_request_init(d
);
1488 if (kobject_add(&d
->kobj
, &disk_to_dev(d
->disk
)->kobj
, "bcache"))
1491 bcache_device_link(d
, c
, "volume");
1495 kobject_put(&d
->kobj
);
1499 static int flash_devs_run(struct cache_set
*c
)
1502 struct uuid_entry
*u
;
1505 u
< c
->uuids
+ c
->nr_uuids
&& !ret
;
1507 if (UUID_FLASH_ONLY(u
))
1508 ret
= flash_dev_run(c
, u
);
1513 int bch_flash_dev_create(struct cache_set
*c
, uint64_t size
)
1515 struct uuid_entry
*u
;
1517 if (test_bit(CACHE_SET_STOPPING
, &c
->flags
))
1520 if (!test_bit(CACHE_SET_RUNNING
, &c
->flags
))
1523 u
= uuid_find_empty(c
);
1525 pr_err("Can't create volume, no room for UUID\n");
1529 get_random_bytes(u
->uuid
, 16);
1530 memset(u
->label
, 0, 32);
1531 u
->first_reg
= u
->last_reg
= cpu_to_le32((u32
)ktime_get_real_seconds());
1533 SET_UUID_FLASH_ONLY(u
, 1);
1534 u
->sectors
= size
>> 9;
1538 return flash_dev_run(c
, u
);
1541 bool bch_cached_dev_error(struct cached_dev
*dc
)
1543 if (!dc
|| test_bit(BCACHE_DEV_CLOSING
, &dc
->disk
.flags
))
1546 dc
->io_disable
= true;
1547 /* make others know io_disable is true earlier */
1550 pr_err("stop %s: too many IO errors on backing device %s\n",
1551 dc
->disk
.disk
->disk_name
, dc
->backing_dev_name
);
1553 bcache_device_stop(&dc
->disk
);
1560 bool bch_cache_set_error(struct cache_set
*c
, const char *fmt
, ...)
1562 struct va_format vaf
;
1565 if (c
->on_error
!= ON_ERROR_PANIC
&&
1566 test_bit(CACHE_SET_STOPPING
, &c
->flags
))
1569 if (test_and_set_bit(CACHE_SET_IO_DISABLE
, &c
->flags
))
1570 pr_info("CACHE_SET_IO_DISABLE already set\n");
1573 * XXX: we can be called from atomic context
1574 * acquire_console_sem();
1577 va_start(args
, fmt
);
1582 pr_err("error on %pU: %pV, disabling caching\n",
1583 c
->sb
.set_uuid
, &vaf
);
1587 if (c
->on_error
== ON_ERROR_PANIC
)
1588 panic("panic forced after error\n");
1590 bch_cache_set_unregister(c
);
1594 /* When c->kobj released */
1595 void bch_cache_set_release(struct kobject
*kobj
)
1597 struct cache_set
*c
= container_of(kobj
, struct cache_set
, kobj
);
1600 module_put(THIS_MODULE
);
1603 static void cache_set_free(struct closure
*cl
)
1605 struct cache_set
*c
= container_of(cl
, struct cache_set
, cl
);
1609 debugfs_remove(c
->debug
);
1611 bch_open_buckets_free(c
);
1612 bch_btree_cache_free(c
);
1613 bch_journal_free(c
);
1615 mutex_lock(&bch_register_lock
);
1616 for_each_cache(ca
, c
, i
)
1619 c
->cache
[ca
->sb
.nr_this_dev
] = NULL
;
1620 kobject_put(&ca
->kobj
);
1623 bch_bset_sort_state_free(&c
->sort
);
1624 free_pages((unsigned long) c
->uuids
, ilog2(bucket_pages(c
)));
1626 if (c
->moving_gc_wq
)
1627 destroy_workqueue(c
->moving_gc_wq
);
1628 bioset_exit(&c
->bio_split
);
1629 mempool_exit(&c
->fill_iter
);
1630 mempool_exit(&c
->bio_meta
);
1631 mempool_exit(&c
->search
);
1635 mutex_unlock(&bch_register_lock
);
1637 pr_info("Cache set %pU unregistered\n", c
->sb
.set_uuid
);
1638 wake_up(&unregister_wait
);
1640 closure_debug_destroy(&c
->cl
);
1641 kobject_put(&c
->kobj
);
1644 static void cache_set_flush(struct closure
*cl
)
1646 struct cache_set
*c
= container_of(cl
, struct cache_set
, caching
);
1651 bch_cache_accounting_destroy(&c
->accounting
);
1653 kobject_put(&c
->internal
);
1654 kobject_del(&c
->kobj
);
1656 if (!IS_ERR_OR_NULL(c
->gc_thread
))
1657 kthread_stop(c
->gc_thread
);
1659 if (!IS_ERR_OR_NULL(c
->root
))
1660 list_add(&c
->root
->list
, &c
->btree_cache
);
1663 * Avoid flushing cached nodes if cache set is retiring
1664 * due to too many I/O errors detected.
1666 if (!test_bit(CACHE_SET_IO_DISABLE
, &c
->flags
))
1667 list_for_each_entry(b
, &c
->btree_cache
, list
) {
1668 mutex_lock(&b
->write_lock
);
1669 if (btree_node_dirty(b
))
1670 __bch_btree_node_write(b
, NULL
);
1671 mutex_unlock(&b
->write_lock
);
1674 for_each_cache(ca
, c
, i
)
1675 if (ca
->alloc_thread
)
1676 kthread_stop(ca
->alloc_thread
);
1678 if (c
->journal
.cur
) {
1679 cancel_delayed_work_sync(&c
->journal
.work
);
1680 /* flush last journal entry if needed */
1681 c
->journal
.work
.work
.func(&c
->journal
.work
.work
);
1688 * This function is only called when CACHE_SET_IO_DISABLE is set, which means
1689 * cache set is unregistering due to too many I/O errors. In this condition,
1690 * the bcache device might be stopped, it depends on stop_when_cache_set_failed
1691 * value and whether the broken cache has dirty data:
1693 * dc->stop_when_cache_set_failed dc->has_dirty stop bcache device
1694 * BCH_CACHED_STOP_AUTO 0 NO
1695 * BCH_CACHED_STOP_AUTO 1 YES
1696 * BCH_CACHED_DEV_STOP_ALWAYS 0 YES
1697 * BCH_CACHED_DEV_STOP_ALWAYS 1 YES
1699 * The expected behavior is, if stop_when_cache_set_failed is configured to
1700 * "auto" via sysfs interface, the bcache device will not be stopped if the
1701 * backing device is clean on the broken cache device.
1703 static void conditional_stop_bcache_device(struct cache_set
*c
,
1704 struct bcache_device
*d
,
1705 struct cached_dev
*dc
)
1707 if (dc
->stop_when_cache_set_failed
== BCH_CACHED_DEV_STOP_ALWAYS
) {
1708 pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n",
1709 d
->disk
->disk_name
, c
->sb
.set_uuid
);
1710 bcache_device_stop(d
);
1711 } else if (atomic_read(&dc
->has_dirty
)) {
1713 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
1714 * and dc->has_dirty == 1
1716 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.\n",
1717 d
->disk
->disk_name
);
1719 * There might be a small time gap that cache set is
1720 * released but bcache device is not. Inside this time
1721 * gap, regular I/O requests will directly go into
1722 * backing device as no cache set attached to. This
1723 * behavior may also introduce potential inconsistence
1724 * data in writeback mode while cache is dirty.
1725 * Therefore before calling bcache_device_stop() due
1726 * to a broken cache device, dc->io_disable should be
1727 * explicitly set to true.
1729 dc
->io_disable
= true;
1730 /* make others know io_disable is true earlier */
1732 bcache_device_stop(d
);
1735 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
1736 * and dc->has_dirty == 0
1738 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.\n",
1739 d
->disk
->disk_name
);
1743 static void __cache_set_unregister(struct closure
*cl
)
1745 struct cache_set
*c
= container_of(cl
, struct cache_set
, caching
);
1746 struct cached_dev
*dc
;
1747 struct bcache_device
*d
;
1750 mutex_lock(&bch_register_lock
);
1752 for (i
= 0; i
< c
->devices_max_used
; i
++) {
1757 if (!UUID_FLASH_ONLY(&c
->uuids
[i
]) &&
1758 test_bit(CACHE_SET_UNREGISTERING
, &c
->flags
)) {
1759 dc
= container_of(d
, struct cached_dev
, disk
);
1760 bch_cached_dev_detach(dc
);
1761 if (test_bit(CACHE_SET_IO_DISABLE
, &c
->flags
))
1762 conditional_stop_bcache_device(c
, d
, dc
);
1764 bcache_device_stop(d
);
1768 mutex_unlock(&bch_register_lock
);
1770 continue_at(cl
, cache_set_flush
, system_wq
);
1773 void bch_cache_set_stop(struct cache_set
*c
)
1775 if (!test_and_set_bit(CACHE_SET_STOPPING
, &c
->flags
))
1776 /* closure_fn set to __cache_set_unregister() */
1777 closure_queue(&c
->caching
);
1780 void bch_cache_set_unregister(struct cache_set
*c
)
1782 set_bit(CACHE_SET_UNREGISTERING
, &c
->flags
);
1783 bch_cache_set_stop(c
);
1786 #define alloc_bucket_pages(gfp, c) \
1787 ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
1789 struct cache_set
*bch_cache_set_alloc(struct cache_sb
*sb
)
1792 struct cache_set
*c
= kzalloc(sizeof(struct cache_set
), GFP_KERNEL
);
1797 __module_get(THIS_MODULE
);
1798 closure_init(&c
->cl
, NULL
);
1799 set_closure_fn(&c
->cl
, cache_set_free
, system_wq
);
1801 closure_init(&c
->caching
, &c
->cl
);
1802 set_closure_fn(&c
->caching
, __cache_set_unregister
, system_wq
);
1804 /* Maybe create continue_at_noreturn() and use it here? */
1805 closure_set_stopped(&c
->cl
);
1806 closure_put(&c
->cl
);
1808 kobject_init(&c
->kobj
, &bch_cache_set_ktype
);
1809 kobject_init(&c
->internal
, &bch_cache_set_internal_ktype
);
1811 bch_cache_accounting_init(&c
->accounting
, &c
->cl
);
1813 memcpy(c
->sb
.set_uuid
, sb
->set_uuid
, 16);
1814 c
->sb
.block_size
= sb
->block_size
;
1815 c
->sb
.bucket_size
= sb
->bucket_size
;
1816 c
->sb
.nr_in_set
= sb
->nr_in_set
;
1817 c
->sb
.last_mount
= sb
->last_mount
;
1818 c
->bucket_bits
= ilog2(sb
->bucket_size
);
1819 c
->block_bits
= ilog2(sb
->block_size
);
1820 c
->nr_uuids
= bucket_bytes(c
) / sizeof(struct uuid_entry
);
1821 c
->devices_max_used
= 0;
1822 atomic_set(&c
->attached_dev_nr
, 0);
1823 c
->btree_pages
= bucket_pages(c
);
1824 if (c
->btree_pages
> BTREE_MAX_PAGES
)
1825 c
->btree_pages
= max_t(int, c
->btree_pages
/ 4,
1828 sema_init(&c
->sb_write_mutex
, 1);
1829 mutex_init(&c
->bucket_lock
);
1830 init_waitqueue_head(&c
->btree_cache_wait
);
1831 spin_lock_init(&c
->btree_cannibalize_lock
);
1832 init_waitqueue_head(&c
->bucket_wait
);
1833 init_waitqueue_head(&c
->gc_wait
);
1834 sema_init(&c
->uuid_write_mutex
, 1);
1836 spin_lock_init(&c
->btree_gc_time
.lock
);
1837 spin_lock_init(&c
->btree_split_time
.lock
);
1838 spin_lock_init(&c
->btree_read_time
.lock
);
1840 bch_moving_init_cache_set(c
);
1842 INIT_LIST_HEAD(&c
->list
);
1843 INIT_LIST_HEAD(&c
->cached_devs
);
1844 INIT_LIST_HEAD(&c
->btree_cache
);
1845 INIT_LIST_HEAD(&c
->btree_cache_freeable
);
1846 INIT_LIST_HEAD(&c
->btree_cache_freed
);
1847 INIT_LIST_HEAD(&c
->data_buckets
);
1849 iter_size
= (sb
->bucket_size
/ sb
->block_size
+ 1) *
1850 sizeof(struct btree_iter_set
);
1852 if (!(c
->devices
= kcalloc(c
->nr_uuids
, sizeof(void *), GFP_KERNEL
)) ||
1853 mempool_init_slab_pool(&c
->search
, 32, bch_search_cache
) ||
1854 mempool_init_kmalloc_pool(&c
->bio_meta
, 2,
1855 sizeof(struct bbio
) + sizeof(struct bio_vec
) *
1857 mempool_init_kmalloc_pool(&c
->fill_iter
, 1, iter_size
) ||
1858 bioset_init(&c
->bio_split
, 4, offsetof(struct bbio
, bio
),
1859 BIOSET_NEED_BVECS
|BIOSET_NEED_RESCUER
) ||
1860 !(c
->uuids
= alloc_bucket_pages(GFP_KERNEL
, c
)) ||
1861 !(c
->moving_gc_wq
= alloc_workqueue("bcache_gc",
1862 WQ_MEM_RECLAIM
, 0)) ||
1863 bch_journal_alloc(c
) ||
1864 bch_btree_cache_alloc(c
) ||
1865 bch_open_buckets_alloc(c
) ||
1866 bch_bset_sort_state_init(&c
->sort
, ilog2(c
->btree_pages
)))
1869 c
->congested_read_threshold_us
= 2000;
1870 c
->congested_write_threshold_us
= 20000;
1871 c
->error_limit
= DEFAULT_IO_ERROR_LIMIT
;
1872 c
->idle_max_writeback_rate_enabled
= 1;
1873 WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE
, &c
->flags
));
1877 bch_cache_set_unregister(c
);
1881 static int run_cache_set(struct cache_set
*c
)
1883 const char *err
= "cannot allocate memory";
1884 struct cached_dev
*dc
, *t
;
1889 struct journal_replay
*l
;
1891 closure_init_stack(&cl
);
1893 for_each_cache(ca
, c
, i
)
1894 c
->nbuckets
+= ca
->sb
.nbuckets
;
1897 if (CACHE_SYNC(&c
->sb
)) {
1901 err
= "cannot allocate memory for journal";
1902 if (bch_journal_read(c
, &journal
))
1905 pr_debug("btree_journal_read() done\n");
1907 err
= "no journal entries found";
1908 if (list_empty(&journal
))
1911 j
= &list_entry(journal
.prev
, struct journal_replay
, list
)->j
;
1913 err
= "IO error reading priorities";
1914 for_each_cache(ca
, c
, i
) {
1915 if (prio_read(ca
, j
->prio_bucket
[ca
->sb
.nr_this_dev
]))
1920 * If prio_read() fails it'll call cache_set_error and we'll
1921 * tear everything down right away, but if we perhaps checked
1922 * sooner we could avoid journal replay.
1927 err
= "bad btree root";
1928 if (__bch_btree_ptr_invalid(c
, k
))
1931 err
= "error reading btree root";
1932 c
->root
= bch_btree_node_get(c
, NULL
, k
,
1935 if (IS_ERR_OR_NULL(c
->root
))
1938 list_del_init(&c
->root
->list
);
1939 rw_unlock(true, c
->root
);
1941 err
= uuid_read(c
, j
, &cl
);
1945 err
= "error in recovery";
1946 if (bch_btree_check(c
))
1949 bch_journal_mark(c
, &journal
);
1950 bch_initial_gc_finish(c
);
1951 pr_debug("btree_check() done\n");
1954 * bcache_journal_next() can't happen sooner, or
1955 * btree_gc_finish() will give spurious errors about last_gc >
1956 * gc_gen - this is a hack but oh well.
1958 bch_journal_next(&c
->journal
);
1960 err
= "error starting allocator thread";
1961 for_each_cache(ca
, c
, i
)
1962 if (bch_cache_allocator_start(ca
))
1966 * First place it's safe to allocate: btree_check() and
1967 * btree_gc_finish() have to run before we have buckets to
1968 * allocate, and bch_bucket_alloc_set() might cause a journal
1969 * entry to be written so bcache_journal_next() has to be called
1972 * If the uuids were in the old format we have to rewrite them
1973 * before the next journal entry is written:
1975 if (j
->version
< BCACHE_JSET_VERSION_UUID
)
1978 err
= "bcache: replay journal failed";
1979 if (bch_journal_replay(c
, &journal
))
1982 pr_notice("invalidating existing data\n");
1984 for_each_cache(ca
, c
, i
) {
1987 ca
->sb
.keys
= clamp_t(int, ca
->sb
.nbuckets
>> 7,
1988 2, SB_JOURNAL_BUCKETS
);
1990 for (j
= 0; j
< ca
->sb
.keys
; j
++)
1991 ca
->sb
.d
[j
] = ca
->sb
.first_bucket
+ j
;
1994 bch_initial_gc_finish(c
);
1996 err
= "error starting allocator thread";
1997 for_each_cache(ca
, c
, i
)
1998 if (bch_cache_allocator_start(ca
))
2001 mutex_lock(&c
->bucket_lock
);
2002 for_each_cache(ca
, c
, i
)
2003 bch_prio_write(ca
, true);
2004 mutex_unlock(&c
->bucket_lock
);
2006 err
= "cannot allocate new UUID bucket";
2007 if (__uuid_write(c
))
2010 err
= "cannot allocate new btree root";
2011 c
->root
= __bch_btree_node_alloc(c
, NULL
, 0, true, NULL
);
2012 if (IS_ERR_OR_NULL(c
->root
))
2015 mutex_lock(&c
->root
->write_lock
);
2016 bkey_copy_key(&c
->root
->key
, &MAX_KEY
);
2017 bch_btree_node_write(c
->root
, &cl
);
2018 mutex_unlock(&c
->root
->write_lock
);
2020 bch_btree_set_root(c
->root
);
2021 rw_unlock(true, c
->root
);
2024 * We don't want to write the first journal entry until
2025 * everything is set up - fortunately journal entries won't be
2026 * written until the SET_CACHE_SYNC() here:
2028 SET_CACHE_SYNC(&c
->sb
, true);
2030 bch_journal_next(&c
->journal
);
2031 bch_journal_meta(c
, &cl
);
2034 err
= "error starting gc thread";
2035 if (bch_gc_thread_start(c
))
2039 c
->sb
.last_mount
= (u32
)ktime_get_real_seconds();
2040 bcache_write_super(c
);
2042 list_for_each_entry_safe(dc
, t
, &uncached_devices
, list
)
2043 bch_cached_dev_attach(dc
, c
, NULL
);
2047 set_bit(CACHE_SET_RUNNING
, &c
->flags
);
2050 while (!list_empty(&journal
)) {
2051 l
= list_first_entry(&journal
, struct journal_replay
, list
);
2058 bch_cache_set_error(c
, "%s", err
);
2063 static bool can_attach_cache(struct cache
*ca
, struct cache_set
*c
)
2065 return ca
->sb
.block_size
== c
->sb
.block_size
&&
2066 ca
->sb
.bucket_size
== c
->sb
.bucket_size
&&
2067 ca
->sb
.nr_in_set
== c
->sb
.nr_in_set
;
2070 static const char *register_cache_set(struct cache
*ca
)
2073 const char *err
= "cannot allocate memory";
2074 struct cache_set
*c
;
2076 list_for_each_entry(c
, &bch_cache_sets
, list
)
2077 if (!memcmp(c
->sb
.set_uuid
, ca
->sb
.set_uuid
, 16)) {
2078 if (c
->cache
[ca
->sb
.nr_this_dev
])
2079 return "duplicate cache set member";
2081 if (!can_attach_cache(ca
, c
))
2082 return "cache sb does not match set";
2084 if (!CACHE_SYNC(&ca
->sb
))
2085 SET_CACHE_SYNC(&c
->sb
, false);
2090 c
= bch_cache_set_alloc(&ca
->sb
);
2094 err
= "error creating kobject";
2095 if (kobject_add(&c
->kobj
, bcache_kobj
, "%pU", c
->sb
.set_uuid
) ||
2096 kobject_add(&c
->internal
, &c
->kobj
, "internal"))
2099 if (bch_cache_accounting_add_kobjs(&c
->accounting
, &c
->kobj
))
2102 bch_debug_init_cache_set(c
);
2104 list_add(&c
->list
, &bch_cache_sets
);
2106 sprintf(buf
, "cache%i", ca
->sb
.nr_this_dev
);
2107 if (sysfs_create_link(&ca
->kobj
, &c
->kobj
, "set") ||
2108 sysfs_create_link(&c
->kobj
, &ca
->kobj
, buf
))
2111 if (ca
->sb
.seq
> c
->sb
.seq
) {
2112 c
->sb
.version
= ca
->sb
.version
;
2113 memcpy(c
->sb
.set_uuid
, ca
->sb
.set_uuid
, 16);
2114 c
->sb
.flags
= ca
->sb
.flags
;
2115 c
->sb
.seq
= ca
->sb
.seq
;
2116 pr_debug("set version = %llu\n", c
->sb
.version
);
2119 kobject_get(&ca
->kobj
);
2121 ca
->set
->cache
[ca
->sb
.nr_this_dev
] = ca
;
2122 c
->cache_by_alloc
[c
->caches_loaded
++] = ca
;
2124 if (c
->caches_loaded
== c
->sb
.nr_in_set
) {
2125 err
= "failed to run cache set";
2126 if (run_cache_set(c
) < 0)
2132 bch_cache_set_unregister(c
);
2138 /* When ca->kobj released */
2139 void bch_cache_release(struct kobject
*kobj
)
2141 struct cache
*ca
= container_of(kobj
, struct cache
, kobj
);
2145 BUG_ON(ca
->set
->cache
[ca
->sb
.nr_this_dev
] != ca
);
2146 ca
->set
->cache
[ca
->sb
.nr_this_dev
] = NULL
;
2149 free_pages((unsigned long) ca
->disk_buckets
, ilog2(bucket_pages(ca
)));
2150 kfree(ca
->prio_buckets
);
2153 free_heap(&ca
->heap
);
2154 free_fifo(&ca
->free_inc
);
2156 for (i
= 0; i
< RESERVE_NR
; i
++)
2157 free_fifo(&ca
->free
[i
]);
2160 put_page(virt_to_page(ca
->sb_disk
));
2162 if (!IS_ERR_OR_NULL(ca
->bdev
))
2163 blkdev_put(ca
->bdev
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
);
2166 module_put(THIS_MODULE
);
2169 static int cache_alloc(struct cache
*ca
)
2172 size_t btree_buckets
;
2175 const char *err
= NULL
;
2177 __module_get(THIS_MODULE
);
2178 kobject_init(&ca
->kobj
, &bch_cache_ktype
);
2180 bio_init(&ca
->journal
.bio
, ca
->journal
.bio
.bi_inline_vecs
, 8);
2183 * when ca->sb.njournal_buckets is not zero, journal exists,
2184 * and in bch_journal_replay(), tree node may split,
2185 * so bucket of RESERVE_BTREE type is needed,
2186 * the worst situation is all journal buckets are valid journal,
2187 * and all the keys need to replay,
2188 * so the number of RESERVE_BTREE type buckets should be as much
2189 * as journal buckets
2191 btree_buckets
= ca
->sb
.njournal_buckets
?: 8;
2192 free
= roundup_pow_of_two(ca
->sb
.nbuckets
) >> 10;
2195 err
= "ca->sb.nbuckets is too small";
2199 if (!init_fifo(&ca
->free
[RESERVE_BTREE
], btree_buckets
,
2201 err
= "ca->free[RESERVE_BTREE] alloc failed";
2202 goto err_btree_alloc
;
2205 if (!init_fifo_exact(&ca
->free
[RESERVE_PRIO
], prio_buckets(ca
),
2207 err
= "ca->free[RESERVE_PRIO] alloc failed";
2208 goto err_prio_alloc
;
2211 if (!init_fifo(&ca
->free
[RESERVE_MOVINGGC
], free
, GFP_KERNEL
)) {
2212 err
= "ca->free[RESERVE_MOVINGGC] alloc failed";
2213 goto err_movinggc_alloc
;
2216 if (!init_fifo(&ca
->free
[RESERVE_NONE
], free
, GFP_KERNEL
)) {
2217 err
= "ca->free[RESERVE_NONE] alloc failed";
2218 goto err_none_alloc
;
2221 if (!init_fifo(&ca
->free_inc
, free
<< 2, GFP_KERNEL
)) {
2222 err
= "ca->free_inc alloc failed";
2223 goto err_free_inc_alloc
;
2226 if (!init_heap(&ca
->heap
, free
<< 3, GFP_KERNEL
)) {
2227 err
= "ca->heap alloc failed";
2228 goto err_heap_alloc
;
2231 ca
->buckets
= vzalloc(array_size(sizeof(struct bucket
),
2234 err
= "ca->buckets alloc failed";
2235 goto err_buckets_alloc
;
2238 ca
->prio_buckets
= kzalloc(array3_size(sizeof(uint64_t),
2239 prio_buckets(ca
), 2),
2241 if (!ca
->prio_buckets
) {
2242 err
= "ca->prio_buckets alloc failed";
2243 goto err_prio_buckets_alloc
;
2246 ca
->disk_buckets
= alloc_bucket_pages(GFP_KERNEL
, ca
);
2247 if (!ca
->disk_buckets
) {
2248 err
= "ca->disk_buckets alloc failed";
2249 goto err_disk_buckets_alloc
;
2252 ca
->prio_last_buckets
= ca
->prio_buckets
+ prio_buckets(ca
);
2254 for_each_bucket(b
, ca
)
2255 atomic_set(&b
->pin
, 0);
2258 err_disk_buckets_alloc
:
2259 kfree(ca
->prio_buckets
);
2260 err_prio_buckets_alloc
:
2263 free_heap(&ca
->heap
);
2265 free_fifo(&ca
->free_inc
);
2267 free_fifo(&ca
->free
[RESERVE_NONE
]);
2269 free_fifo(&ca
->free
[RESERVE_MOVINGGC
]);
2271 free_fifo(&ca
->free
[RESERVE_PRIO
]);
2273 free_fifo(&ca
->free
[RESERVE_BTREE
]);
2276 module_put(THIS_MODULE
);
2278 pr_notice("error %s: %s\n", ca
->cache_dev_name
, err
);
2282 static int register_cache(struct cache_sb
*sb
, struct cache_sb_disk
*sb_disk
,
2283 struct block_device
*bdev
, struct cache
*ca
)
2285 const char *err
= NULL
; /* must be set for any error case */
2288 bdevname(bdev
, ca
->cache_dev_name
);
2289 memcpy(&ca
->sb
, sb
, sizeof(struct cache_sb
));
2291 ca
->bdev
->bd_holder
= ca
;
2292 ca
->sb_disk
= sb_disk
;
2294 if (blk_queue_discard(bdev_get_queue(bdev
)))
2295 ca
->discard
= CACHE_DISCARD(&ca
->sb
);
2297 ret
= cache_alloc(ca
);
2300 * If we failed here, it means ca->kobj is not initialized yet,
2301 * kobject_put() won't be called and there is no chance to
2302 * call blkdev_put() to bdev in bch_cache_release(). So we
2303 * explicitly call blkdev_put() here.
2305 blkdev_put(bdev
, FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
);
2307 err
= "cache_alloc(): -ENOMEM";
2308 else if (ret
== -EPERM
)
2309 err
= "cache_alloc(): cache device is too small";
2311 err
= "cache_alloc(): unknown error";
2315 if (kobject_add(&ca
->kobj
,
2316 &part_to_dev(bdev
->bd_part
)->kobj
,
2318 err
= "error calling kobject_add";
2323 mutex_lock(&bch_register_lock
);
2324 err
= register_cache_set(ca
);
2325 mutex_unlock(&bch_register_lock
);
2332 pr_info("registered cache device %s\n", ca
->cache_dev_name
);
2335 kobject_put(&ca
->kobj
);
2339 pr_notice("error %s: %s\n", ca
->cache_dev_name
, err
);
2344 /* Global interfaces/init */
2346 static ssize_t
register_bcache(struct kobject
*k
, struct kobj_attribute
*attr
,
2347 const char *buffer
, size_t size
);
2348 static ssize_t
bch_pending_bdevs_cleanup(struct kobject
*k
,
2349 struct kobj_attribute
*attr
,
2350 const char *buffer
, size_t size
);
2352 kobj_attribute_write(register, register_bcache
);
2353 kobj_attribute_write(register_quiet
, register_bcache
);
2354 kobj_attribute_write(register_async
, register_bcache
);
2355 kobj_attribute_write(pendings_cleanup
, bch_pending_bdevs_cleanup
);
2357 static bool bch_is_open_backing(struct block_device
*bdev
)
2359 struct cache_set
*c
, *tc
;
2360 struct cached_dev
*dc
, *t
;
2362 list_for_each_entry_safe(c
, tc
, &bch_cache_sets
, list
)
2363 list_for_each_entry_safe(dc
, t
, &c
->cached_devs
, list
)
2364 if (dc
->bdev
== bdev
)
2366 list_for_each_entry_safe(dc
, t
, &uncached_devices
, list
)
2367 if (dc
->bdev
== bdev
)
2372 static bool bch_is_open_cache(struct block_device
*bdev
)
2374 struct cache_set
*c
, *tc
;
2378 list_for_each_entry_safe(c
, tc
, &bch_cache_sets
, list
)
2379 for_each_cache(ca
, c
, i
)
2380 if (ca
->bdev
== bdev
)
2385 static bool bch_is_open(struct block_device
*bdev
)
2387 return bch_is_open_cache(bdev
) || bch_is_open_backing(bdev
);
2390 struct async_reg_args
{
2391 struct delayed_work reg_work
;
2393 struct cache_sb
*sb
;
2394 struct cache_sb_disk
*sb_disk
;
2395 struct block_device
*bdev
;
2398 static void register_bdev_worker(struct work_struct
*work
)
2401 struct async_reg_args
*args
=
2402 container_of(work
, struct async_reg_args
, reg_work
.work
);
2403 struct cached_dev
*dc
;
2405 dc
= kzalloc(sizeof(*dc
), GFP_KERNEL
);
2408 put_page(virt_to_page(args
->sb_disk
));
2409 blkdev_put(args
->bdev
, FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
2413 mutex_lock(&bch_register_lock
);
2414 if (register_bdev(args
->sb
, args
->sb_disk
, args
->bdev
, dc
) < 0)
2416 mutex_unlock(&bch_register_lock
);
2420 pr_info("error %s: fail to register backing device\n",
2425 module_put(THIS_MODULE
);
2428 static void register_cache_worker(struct work_struct
*work
)
2431 struct async_reg_args
*args
=
2432 container_of(work
, struct async_reg_args
, reg_work
.work
);
2435 ca
= kzalloc(sizeof(*ca
), GFP_KERNEL
);
2438 put_page(virt_to_page(args
->sb_disk
));
2439 blkdev_put(args
->bdev
, FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
2443 /* blkdev_put() will be called in bch_cache_release() */
2444 if (register_cache(args
->sb
, args
->sb_disk
, args
->bdev
, ca
) != 0)
2449 pr_info("error %s: fail to register cache device\n",
2454 module_put(THIS_MODULE
);
2457 static void register_device_aync(struct async_reg_args
*args
)
2459 if (SB_IS_BDEV(args
->sb
))
2460 INIT_DELAYED_WORK(&args
->reg_work
, register_bdev_worker
);
2462 INIT_DELAYED_WORK(&args
->reg_work
, register_cache_worker
);
2464 /* 10 jiffies is enough for a delay */
2465 queue_delayed_work(system_wq
, &args
->reg_work
, 10);
2468 static ssize_t
register_bcache(struct kobject
*k
, struct kobj_attribute
*attr
,
2469 const char *buffer
, size_t size
)
2473 struct cache_sb
*sb
;
2474 struct cache_sb_disk
*sb_disk
;
2475 struct block_device
*bdev
;
2479 err
= "failed to reference bcache module";
2480 if (!try_module_get(THIS_MODULE
))
2483 /* For latest state of bcache_is_reboot */
2485 err
= "bcache is in reboot";
2486 if (bcache_is_reboot
)
2487 goto out_module_put
;
2490 err
= "cannot allocate memory";
2491 path
= kstrndup(buffer
, size
, GFP_KERNEL
);
2493 goto out_module_put
;
2495 sb
= kmalloc(sizeof(struct cache_sb
), GFP_KERNEL
);
2500 err
= "failed to open device";
2501 bdev
= blkdev_get_by_path(strim(path
),
2502 FMODE_READ
|FMODE_WRITE
|FMODE_EXCL
,
2505 if (bdev
== ERR_PTR(-EBUSY
)) {
2506 bdev
= lookup_bdev(strim(path
));
2507 mutex_lock(&bch_register_lock
);
2508 if (!IS_ERR(bdev
) && bch_is_open(bdev
))
2509 err
= "device already registered";
2511 err
= "device busy";
2512 mutex_unlock(&bch_register_lock
);
2515 if (attr
== &ksysfs_register_quiet
)
2521 err
= "failed to set blocksize";
2522 if (set_blocksize(bdev
, 4096))
2523 goto out_blkdev_put
;
2525 err
= read_super(sb
, bdev
, &sb_disk
);
2527 goto out_blkdev_put
;
2529 err
= "failed to register device";
2530 if (attr
== &ksysfs_register_async
) {
2531 /* register in asynchronous way */
2532 struct async_reg_args
*args
=
2533 kzalloc(sizeof(struct async_reg_args
), GFP_KERNEL
);
2537 err
= "cannot allocate memory";
2538 goto out_put_sb_page
;
2543 args
->sb_disk
= sb_disk
;
2545 register_device_aync(args
);
2546 /* No wait and returns to user space */
2550 if (SB_IS_BDEV(sb
)) {
2551 struct cached_dev
*dc
= kzalloc(sizeof(*dc
), GFP_KERNEL
);
2554 goto out_put_sb_page
;
2556 mutex_lock(&bch_register_lock
);
2557 ret
= register_bdev(sb
, sb_disk
, bdev
, dc
);
2558 mutex_unlock(&bch_register_lock
);
2559 /* blkdev_put() will be called in cached_dev_free() */
2563 struct cache
*ca
= kzalloc(sizeof(*ca
), GFP_KERNEL
);
2566 goto out_put_sb_page
;
2568 /* blkdev_put() will be called in bch_cache_release() */
2569 if (register_cache(sb
, sb_disk
, bdev
, ca
) != 0)
2576 module_put(THIS_MODULE
);
2581 put_page(virt_to_page(sb_disk
));
2583 blkdev_put(bdev
, FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
2590 module_put(THIS_MODULE
);
2592 pr_info("error %s: %s\n", path
?path
:"", err
);
2598 struct list_head list
;
2599 struct cached_dev
*dc
;
2602 static ssize_t
bch_pending_bdevs_cleanup(struct kobject
*k
,
2603 struct kobj_attribute
*attr
,
2607 LIST_HEAD(pending_devs
);
2609 struct cached_dev
*dc
, *tdc
;
2610 struct pdev
*pdev
, *tpdev
;
2611 struct cache_set
*c
, *tc
;
2613 mutex_lock(&bch_register_lock
);
2614 list_for_each_entry_safe(dc
, tdc
, &uncached_devices
, list
) {
2615 pdev
= kmalloc(sizeof(struct pdev
), GFP_KERNEL
);
2619 list_add(&pdev
->list
, &pending_devs
);
2622 list_for_each_entry_safe(pdev
, tpdev
, &pending_devs
, list
) {
2623 list_for_each_entry_safe(c
, tc
, &bch_cache_sets
, list
) {
2624 char *pdev_set_uuid
= pdev
->dc
->sb
.set_uuid
;
2625 char *set_uuid
= c
->sb
.uuid
;
2627 if (!memcmp(pdev_set_uuid
, set_uuid
, 16)) {
2628 list_del(&pdev
->list
);
2634 mutex_unlock(&bch_register_lock
);
2636 list_for_each_entry_safe(pdev
, tpdev
, &pending_devs
, list
) {
2637 pr_info("delete pdev %p\n", pdev
);
2638 list_del(&pdev
->list
);
2639 bcache_device_stop(&pdev
->dc
->disk
);
2646 static int bcache_reboot(struct notifier_block
*n
, unsigned long code
, void *x
)
2648 if (bcache_is_reboot
)
2651 if (code
== SYS_DOWN
||
2653 code
== SYS_POWER_OFF
) {
2655 unsigned long start
= jiffies
;
2656 bool stopped
= false;
2658 struct cache_set
*c
, *tc
;
2659 struct cached_dev
*dc
, *tdc
;
2661 mutex_lock(&bch_register_lock
);
2663 if (bcache_is_reboot
)
2666 /* New registration is rejected since now */
2667 bcache_is_reboot
= true;
2669 * Make registering caller (if there is) on other CPU
2670 * core know bcache_is_reboot set to true earlier
2674 if (list_empty(&bch_cache_sets
) &&
2675 list_empty(&uncached_devices
))
2678 mutex_unlock(&bch_register_lock
);
2680 pr_info("Stopping all devices:\n");
2683 * The reason bch_register_lock is not held to call
2684 * bch_cache_set_stop() and bcache_device_stop() is to
2685 * avoid potential deadlock during reboot, because cache
2686 * set or bcache device stopping process will acqurie
2687 * bch_register_lock too.
2689 * We are safe here because bcache_is_reboot sets to
2690 * true already, register_bcache() will reject new
2691 * registration now. bcache_is_reboot also makes sure
2692 * bcache_reboot() won't be re-entered on by other thread,
2693 * so there is no race in following list iteration by
2694 * list_for_each_entry_safe().
2696 list_for_each_entry_safe(c
, tc
, &bch_cache_sets
, list
)
2697 bch_cache_set_stop(c
);
2699 list_for_each_entry_safe(dc
, tdc
, &uncached_devices
, list
)
2700 bcache_device_stop(&dc
->disk
);
2704 * Give an early chance for other kthreads and
2705 * kworkers to stop themselves
2709 /* What's a condition variable? */
2711 long timeout
= start
+ 10 * HZ
- jiffies
;
2713 mutex_lock(&bch_register_lock
);
2714 stopped
= list_empty(&bch_cache_sets
) &&
2715 list_empty(&uncached_devices
);
2717 if (timeout
< 0 || stopped
)
2720 prepare_to_wait(&unregister_wait
, &wait
,
2721 TASK_UNINTERRUPTIBLE
);
2723 mutex_unlock(&bch_register_lock
);
2724 schedule_timeout(timeout
);
2727 finish_wait(&unregister_wait
, &wait
);
2730 pr_info("All devices stopped\n");
2732 pr_notice("Timeout waiting for devices to be closed\n");
2734 mutex_unlock(&bch_register_lock
);
2740 static struct notifier_block reboot
= {
2741 .notifier_call
= bcache_reboot
,
2742 .priority
= INT_MAX
, /* before any real devices */
2745 static void bcache_exit(void)
2750 kobject_put(bcache_kobj
);
2752 destroy_workqueue(bcache_wq
);
2754 destroy_workqueue(bch_journal_wq
);
2757 unregister_blkdev(bcache_major
, "bcache");
2758 unregister_reboot_notifier(&reboot
);
2759 mutex_destroy(&bch_register_lock
);
2762 /* Check and fixup module parameters */
2763 static void check_module_parameters(void)
2765 if (bch_cutoff_writeback_sync
== 0)
2766 bch_cutoff_writeback_sync
= CUTOFF_WRITEBACK_SYNC
;
2767 else if (bch_cutoff_writeback_sync
> CUTOFF_WRITEBACK_SYNC_MAX
) {
2768 pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u\n",
2769 bch_cutoff_writeback_sync
, CUTOFF_WRITEBACK_SYNC_MAX
);
2770 bch_cutoff_writeback_sync
= CUTOFF_WRITEBACK_SYNC_MAX
;
2773 if (bch_cutoff_writeback
== 0)
2774 bch_cutoff_writeback
= CUTOFF_WRITEBACK
;
2775 else if (bch_cutoff_writeback
> CUTOFF_WRITEBACK_MAX
) {
2776 pr_warn("set bch_cutoff_writeback (%u) to max value %u\n",
2777 bch_cutoff_writeback
, CUTOFF_WRITEBACK_MAX
);
2778 bch_cutoff_writeback
= CUTOFF_WRITEBACK_MAX
;
2781 if (bch_cutoff_writeback
> bch_cutoff_writeback_sync
) {
2782 pr_warn("set bch_cutoff_writeback (%u) to %u\n",
2783 bch_cutoff_writeback
, bch_cutoff_writeback_sync
);
2784 bch_cutoff_writeback
= bch_cutoff_writeback_sync
;
2788 static int __init
bcache_init(void)
2790 static const struct attribute
*files
[] = {
2791 &ksysfs_register
.attr
,
2792 &ksysfs_register_quiet
.attr
,
2793 #ifdef CONFIG_BCACHE_ASYNC_REGISTRATION
2794 &ksysfs_register_async
.attr
,
2796 &ksysfs_pendings_cleanup
.attr
,
2800 check_module_parameters();
2802 mutex_init(&bch_register_lock
);
2803 init_waitqueue_head(&unregister_wait
);
2804 register_reboot_notifier(&reboot
);
2806 bcache_major
= register_blkdev(0, "bcache");
2807 if (bcache_major
< 0) {
2808 unregister_reboot_notifier(&reboot
);
2809 mutex_destroy(&bch_register_lock
);
2810 return bcache_major
;
2813 bcache_wq
= alloc_workqueue("bcache", WQ_MEM_RECLAIM
, 0);
2817 bch_journal_wq
= alloc_workqueue("bch_journal", WQ_MEM_RECLAIM
, 0);
2818 if (!bch_journal_wq
)
2821 bcache_kobj
= kobject_create_and_add("bcache", fs_kobj
);
2825 if (bch_request_init() ||
2826 sysfs_create_files(bcache_kobj
, files
))
2830 closure_debug_init();
2832 bcache_is_reboot
= false;
2843 module_exit(bcache_exit
);
2844 module_init(bcache_init
);
2846 module_param(bch_cutoff_writeback
, uint
, 0);
2847 MODULE_PARM_DESC(bch_cutoff_writeback
, "threshold to cutoff writeback");
2849 module_param(bch_cutoff_writeback_sync
, uint
, 0);
2850 MODULE_PARM_DESC(bch_cutoff_writeback_sync
, "hard threshold to cutoff writeback");
2852 MODULE_DESCRIPTION("Bcache: a Linux block layer cache");
2853 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
2854 MODULE_LICENSE("GPL");