1 // SPDX-License-Identifier: GPL-2.0
3 * bcache sysfs interfaces
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
13 #include "writeback.h"
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
19 /* Default is -1; we skip past it for struct cached_dev's cache mode */
20 static const char * const bch_cache_modes
[] = {
28 /* Default is -1; we skip past it for stop_when_cache_set_failed */
29 static const char * const bch_stop_on_failure_modes
[] = {
35 static const char * const cache_replacement_policies
[] = {
42 static const char * const error_actions
[] = {
48 write_attribute(attach
);
49 write_attribute(detach
);
50 write_attribute(unregister
);
51 write_attribute(stop
);
52 write_attribute(clear_stats
);
53 write_attribute(trigger_gc
);
54 write_attribute(prune_cache
);
55 write_attribute(flash_vol_create
);
57 read_attribute(bucket_size
);
58 read_attribute(block_size
);
59 read_attribute(nbuckets
);
60 read_attribute(tree_depth
);
61 read_attribute(root_usage_percent
);
62 read_attribute(priority_stats
);
63 read_attribute(btree_cache_size
);
64 read_attribute(btree_cache_max_chain
);
65 read_attribute(cache_available_percent
);
66 read_attribute(written
);
67 read_attribute(btree_written
);
68 read_attribute(metadata_written
);
69 read_attribute(active_journal_entries
);
71 sysfs_time_stats_attribute(btree_gc
, sec
, ms
);
72 sysfs_time_stats_attribute(btree_split
, sec
, us
);
73 sysfs_time_stats_attribute(btree_sort
, ms
, us
);
74 sysfs_time_stats_attribute(btree_read
, ms
, us
);
76 read_attribute(btree_nodes
);
77 read_attribute(btree_used_percent
);
78 read_attribute(average_key_size
);
79 read_attribute(dirty_data
);
80 read_attribute(bset_tree_stats
);
82 read_attribute(state
);
83 read_attribute(cache_read_races
);
84 read_attribute(reclaim
);
85 read_attribute(flush_write
);
86 read_attribute(retry_flush_write
);
87 read_attribute(writeback_keys_done
);
88 read_attribute(writeback_keys_failed
);
89 read_attribute(io_errors
);
90 read_attribute(congested
);
91 rw_attribute(congested_read_threshold_us
);
92 rw_attribute(congested_write_threshold_us
);
94 rw_attribute(sequential_cutoff
);
95 rw_attribute(data_csum
);
96 rw_attribute(cache_mode
);
97 rw_attribute(stop_when_cache_set_failed
);
98 rw_attribute(writeback_metadata
);
99 rw_attribute(writeback_running
);
100 rw_attribute(writeback_percent
);
101 rw_attribute(writeback_delay
);
102 rw_attribute(writeback_rate
);
104 rw_attribute(writeback_rate_update_seconds
);
105 rw_attribute(writeback_rate_i_term_inverse
);
106 rw_attribute(writeback_rate_p_term_inverse
);
107 rw_attribute(writeback_rate_minimum
);
108 read_attribute(writeback_rate_debug
);
110 read_attribute(stripe_size
);
111 read_attribute(partial_stripes_expensive
);
113 rw_attribute(synchronous
);
114 rw_attribute(journal_delay_ms
);
115 rw_attribute(io_disable
);
116 rw_attribute(discard
);
117 rw_attribute(running
);
119 rw_attribute(readahead
);
120 rw_attribute(errors
);
121 rw_attribute(io_error_limit
);
122 rw_attribute(io_error_halflife
);
123 rw_attribute(verify
);
124 rw_attribute(bypass_torture_test
);
125 rw_attribute(key_merging_disabled
);
126 rw_attribute(gc_always_rewrite
);
127 rw_attribute(expensive_debug_checks
);
128 rw_attribute(cache_replacement_policy
);
129 rw_attribute(btree_shrinker_disabled
);
130 rw_attribute(copy_gc_enabled
);
133 static ssize_t
bch_snprint_string_list(char *buf
,
135 const char * const list
[],
141 for (i
= 0; list
[i
]; i
++)
142 out
+= snprintf(out
, buf
+ size
- out
,
143 i
== selected
? "[%s] " : "%s ", list
[i
]);
149 SHOW(__bch_cached_dev
)
151 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
153 char const *states
[] = { "no cache", "clean", "dirty", "inconsistent" };
154 int wb
= dc
->writeback_running
;
156 #define var(stat) (dc->stat)
158 if (attr
== &sysfs_cache_mode
)
159 return bch_snprint_string_list(buf
, PAGE_SIZE
,
161 BDEV_CACHE_MODE(&dc
->sb
));
163 if (attr
== &sysfs_stop_when_cache_set_failed
)
164 return bch_snprint_string_list(buf
, PAGE_SIZE
,
165 bch_stop_on_failure_modes
,
166 dc
->stop_when_cache_set_failed
);
169 sysfs_printf(data_csum
, "%i", dc
->disk
.data_csum
);
170 var_printf(verify
, "%i");
171 var_printf(bypass_torture_test
, "%i");
172 var_printf(writeback_metadata
, "%i");
173 var_printf(writeback_running
, "%i");
174 var_print(writeback_delay
);
175 var_print(writeback_percent
);
176 sysfs_hprint(writeback_rate
,
177 wb
? atomic_long_read(&dc
->writeback_rate
.rate
) << 9 : 0);
178 sysfs_hprint(io_errors
, atomic_read(&dc
->io_errors
));
179 sysfs_printf(io_error_limit
, "%i", dc
->error_limit
);
180 sysfs_printf(io_disable
, "%i", dc
->io_disable
);
181 var_print(writeback_rate_update_seconds
);
182 var_print(writeback_rate_i_term_inverse
);
183 var_print(writeback_rate_p_term_inverse
);
184 var_print(writeback_rate_minimum
);
186 if (attr
== &sysfs_writeback_rate_debug
) {
190 char proportional
[20];
196 * Except for dirty and target, other values should
197 * be 0 if writeback is not running.
200 wb
? atomic_long_read(&dc
->writeback_rate
.rate
) << 9
202 bch_hprint(dirty
, bcache_dev_sectors_dirty(&dc
->disk
) << 9);
203 bch_hprint(target
, dc
->writeback_rate_target
<< 9);
204 bch_hprint(proportional
,
205 wb
? dc
->writeback_rate_proportional
<< 9 : 0);
207 wb
? dc
->writeback_rate_integral_scaled
<< 9 : 0);
208 bch_hprint(change
, wb
? dc
->writeback_rate_change
<< 9 : 0);
209 next_io
= wb
? div64_s64(dc
->writeback_rate
.next
-local_clock(),
216 "proportional:\t%s\n"
218 "change:\t\t%s/sec\n"
219 "next io:\t%llims\n",
220 rate
, dirty
, target
, proportional
,
221 integral
, change
, next_io
);
224 sysfs_hprint(dirty_data
,
225 bcache_dev_sectors_dirty(&dc
->disk
) << 9);
227 sysfs_hprint(stripe_size
, ((uint64_t)dc
->disk
.stripe_size
) << 9);
228 var_printf(partial_stripes_expensive
, "%u");
230 var_hprint(sequential_cutoff
);
231 var_hprint(readahead
);
233 sysfs_print(running
, atomic_read(&dc
->running
));
234 sysfs_print(state
, states
[BDEV_STATE(&dc
->sb
)]);
236 if (attr
== &sysfs_label
) {
237 memcpy(buf
, dc
->sb
.label
, SB_LABEL_SIZE
);
238 buf
[SB_LABEL_SIZE
+ 1] = '\0';
246 SHOW_LOCKED(bch_cached_dev
)
250 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
254 struct kobj_uevent_env
*env
;
256 #define d_strtoul(var) sysfs_strtoul(var, dc->var)
257 #define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
258 #define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
260 sysfs_strtoul(data_csum
, dc
->disk
.data_csum
);
262 d_strtoul(bypass_torture_test
);
263 d_strtoul(writeback_metadata
);
264 d_strtoul(writeback_running
);
265 d_strtoul(writeback_delay
);
267 sysfs_strtoul_clamp(writeback_percent
, dc
->writeback_percent
, 0, 40);
269 if (attr
== &sysfs_writeback_rate
) {
271 long int v
= atomic_long_read(&dc
->writeback_rate
.rate
);
273 ret
= strtoul_safe_clamp(buf
, v
, 1, INT_MAX
);
276 atomic_long_set(&dc
->writeback_rate
.rate
, v
);
283 sysfs_strtoul_clamp(writeback_rate_update_seconds
,
284 dc
->writeback_rate_update_seconds
,
285 1, WRITEBACK_RATE_UPDATE_SECS_MAX
);
286 sysfs_strtoul_clamp(writeback_rate_i_term_inverse
,
287 dc
->writeback_rate_i_term_inverse
,
289 sysfs_strtoul_clamp(writeback_rate_p_term_inverse
,
290 dc
->writeback_rate_p_term_inverse
,
292 d_strtoul_nonzero(writeback_rate_minimum
);
294 sysfs_strtoul_clamp(io_error_limit
, dc
->error_limit
, 0, INT_MAX
);
296 if (attr
== &sysfs_io_disable
) {
297 int v
= strtoul_or_return(buf
);
299 dc
->io_disable
= v
? 1 : 0;
302 sysfs_strtoul_clamp(sequential_cutoff
,
303 dc
->sequential_cutoff
,
305 d_strtoi_h(readahead
);
307 if (attr
== &sysfs_clear_stats
)
308 bch_cache_accounting_clear(&dc
->accounting
);
310 if (attr
== &sysfs_running
&&
311 strtoul_or_return(buf
))
312 bch_cached_dev_run(dc
);
314 if (attr
== &sysfs_cache_mode
) {
315 v
= __sysfs_match_string(bch_cache_modes
, -1, buf
);
319 if ((unsigned int) v
!= BDEV_CACHE_MODE(&dc
->sb
)) {
320 SET_BDEV_CACHE_MODE(&dc
->sb
, v
);
321 bch_write_bdev_super(dc
, NULL
);
325 if (attr
== &sysfs_stop_when_cache_set_failed
) {
326 v
= __sysfs_match_string(bch_stop_on_failure_modes
, -1, buf
);
330 dc
->stop_when_cache_set_failed
= v
;
333 if (attr
== &sysfs_label
) {
334 if (size
> SB_LABEL_SIZE
)
336 memcpy(dc
->sb
.label
, buf
, size
);
337 if (size
< SB_LABEL_SIZE
)
338 dc
->sb
.label
[size
] = '\0';
339 if (size
&& dc
->sb
.label
[size
- 1] == '\n')
340 dc
->sb
.label
[size
- 1] = '\0';
341 bch_write_bdev_super(dc
, NULL
);
343 memcpy(dc
->disk
.c
->uuids
[dc
->disk
.id
].label
,
345 bch_uuid_write(dc
->disk
.c
);
347 env
= kzalloc(sizeof(struct kobj_uevent_env
), GFP_KERNEL
);
350 add_uevent_var(env
, "DRIVER=bcache");
351 add_uevent_var(env
, "CACHED_UUID=%pU", dc
->sb
.uuid
),
352 add_uevent_var(env
, "CACHED_LABEL=%s", buf
);
353 kobject_uevent_env(&disk_to_dev(dc
->disk
.disk
)->kobj
,
359 if (attr
== &sysfs_attach
) {
360 uint8_t set_uuid
[16];
362 if (bch_parse_uuid(buf
, set_uuid
) < 16)
366 list_for_each_entry(c
, &bch_cache_sets
, list
) {
367 v
= bch_cached_dev_attach(dc
, c
, set_uuid
);
372 pr_err("Can't attach %s: cache set not found", buf
);
376 if (attr
== &sysfs_detach
&& dc
->disk
.c
)
377 bch_cached_dev_detach(dc
);
379 if (attr
== &sysfs_stop
)
380 bcache_device_stop(&dc
->disk
);
385 STORE(bch_cached_dev
)
387 struct cached_dev
*dc
= container_of(kobj
, struct cached_dev
,
390 mutex_lock(&bch_register_lock
);
391 size
= __cached_dev_store(kobj
, attr
, buf
, size
);
393 if (attr
== &sysfs_writeback_running
)
394 bch_writeback_queue(dc
);
396 if (attr
== &sysfs_writeback_percent
)
397 if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING
, &dc
->disk
.flags
))
398 schedule_delayed_work(&dc
->writeback_rate_update
,
399 dc
->writeback_rate_update_seconds
* HZ
);
401 mutex_unlock(&bch_register_lock
);
405 static struct attribute
*bch_cached_dev_files
[] = {
413 &sysfs_stop_when_cache_set_failed
,
414 &sysfs_writeback_metadata
,
415 &sysfs_writeback_running
,
416 &sysfs_writeback_delay
,
417 &sysfs_writeback_percent
,
418 &sysfs_writeback_rate
,
419 &sysfs_writeback_rate_update_seconds
,
420 &sysfs_writeback_rate_i_term_inverse
,
421 &sysfs_writeback_rate_p_term_inverse
,
422 &sysfs_writeback_rate_minimum
,
423 &sysfs_writeback_rate_debug
,
425 &sysfs_io_error_limit
,
429 &sysfs_partial_stripes_expensive
,
430 &sysfs_sequential_cutoff
,
436 #ifdef CONFIG_BCACHE_DEBUG
438 &sysfs_bypass_torture_test
,
442 KTYPE(bch_cached_dev
);
446 struct bcache_device
*d
= container_of(kobj
, struct bcache_device
,
448 struct uuid_entry
*u
= &d
->c
->uuids
[d
->id
];
450 sysfs_printf(data_csum
, "%i", d
->data_csum
);
451 sysfs_hprint(size
, u
->sectors
<< 9);
453 if (attr
== &sysfs_label
) {
454 memcpy(buf
, u
->label
, SB_LABEL_SIZE
);
455 buf
[SB_LABEL_SIZE
+ 1] = '\0';
463 STORE(__bch_flash_dev
)
465 struct bcache_device
*d
= container_of(kobj
, struct bcache_device
,
467 struct uuid_entry
*u
= &d
->c
->uuids
[d
->id
];
469 sysfs_strtoul(data_csum
, d
->data_csum
);
471 if (attr
== &sysfs_size
) {
474 strtoi_h_or_return(buf
, v
);
477 bch_uuid_write(d
->c
);
478 set_capacity(d
->disk
, u
->sectors
);
481 if (attr
== &sysfs_label
) {
482 memcpy(u
->label
, buf
, SB_LABEL_SIZE
);
483 bch_uuid_write(d
->c
);
486 if (attr
== &sysfs_unregister
) {
487 set_bit(BCACHE_DEV_DETACHING
, &d
->flags
);
488 bcache_device_stop(d
);
493 STORE_LOCKED(bch_flash_dev
)
495 static struct attribute
*bch_flash_dev_files
[] = {
504 KTYPE(bch_flash_dev
);
506 struct bset_stats_op
{
509 struct bset_stats stats
;
512 static int bch_btree_bset_stats(struct btree_op
*b_op
, struct btree
*b
)
514 struct bset_stats_op
*op
= container_of(b_op
, struct bset_stats_op
, op
);
517 bch_btree_keys_stats(&b
->keys
, &op
->stats
);
522 static int bch_bset_print_stats(struct cache_set
*c
, char *buf
)
524 struct bset_stats_op op
;
527 memset(&op
, 0, sizeof(op
));
528 bch_btree_op_init(&op
.op
, -1);
530 ret
= bch_btree_map_nodes(&op
.op
, c
, &ZERO_KEY
, bch_btree_bset_stats
);
534 return snprintf(buf
, PAGE_SIZE
,
536 "written sets: %zu\n"
537 "unwritten sets: %zu\n"
538 "written key bytes: %zu\n"
539 "unwritten key bytes: %zu\n"
543 op
.stats
.sets_written
, op
.stats
.sets_unwritten
,
544 op
.stats
.bytes_written
, op
.stats
.bytes_unwritten
,
545 op
.stats
.floats
, op
.stats
.failed
);
548 static unsigned int bch_root_usage(struct cache_set
*c
)
550 unsigned int bytes
= 0;
553 struct btree_iter iter
;
561 rw_lock(false, b
, b
->level
);
562 } while (b
!= c
->root
);
564 for_each_key_filter(&b
->keys
, k
, &iter
, bch_ptr_bad
)
565 bytes
+= bkey_bytes(k
);
569 return (bytes
* 100) / btree_bytes(c
);
572 static size_t bch_cache_size(struct cache_set
*c
)
577 mutex_lock(&c
->bucket_lock
);
578 list_for_each_entry(b
, &c
->btree_cache
, list
)
579 ret
+= 1 << (b
->keys
.page_order
+ PAGE_SHIFT
);
581 mutex_unlock(&c
->bucket_lock
);
585 static unsigned int bch_cache_max_chain(struct cache_set
*c
)
587 unsigned int ret
= 0;
588 struct hlist_head
*h
;
590 mutex_lock(&c
->bucket_lock
);
592 for (h
= c
->bucket_hash
;
593 h
< c
->bucket_hash
+ (1 << BUCKET_HASH_BITS
);
596 struct hlist_node
*p
;
604 mutex_unlock(&c
->bucket_lock
);
608 static unsigned int bch_btree_used(struct cache_set
*c
)
610 return div64_u64(c
->gc_stats
.key_bytes
* 100,
611 (c
->gc_stats
.nodes
?: 1) * btree_bytes(c
));
614 static unsigned int bch_average_key_size(struct cache_set
*c
)
616 return c
->gc_stats
.nkeys
617 ? div64_u64(c
->gc_stats
.data
, c
->gc_stats
.nkeys
)
621 SHOW(__bch_cache_set
)
623 struct cache_set
*c
= container_of(kobj
, struct cache_set
, kobj
);
625 sysfs_print(synchronous
, CACHE_SYNC(&c
->sb
));
626 sysfs_print(journal_delay_ms
, c
->journal_delay_ms
);
627 sysfs_hprint(bucket_size
, bucket_bytes(c
));
628 sysfs_hprint(block_size
, block_bytes(c
));
629 sysfs_print(tree_depth
, c
->root
->level
);
630 sysfs_print(root_usage_percent
, bch_root_usage(c
));
632 sysfs_hprint(btree_cache_size
, bch_cache_size(c
));
633 sysfs_print(btree_cache_max_chain
, bch_cache_max_chain(c
));
634 sysfs_print(cache_available_percent
, 100 - c
->gc_stats
.in_use
);
636 sysfs_print_time_stats(&c
->btree_gc_time
, btree_gc
, sec
, ms
);
637 sysfs_print_time_stats(&c
->btree_split_time
, btree_split
, sec
, us
);
638 sysfs_print_time_stats(&c
->sort
.time
, btree_sort
, ms
, us
);
639 sysfs_print_time_stats(&c
->btree_read_time
, btree_read
, ms
, us
);
641 sysfs_print(btree_used_percent
, bch_btree_used(c
));
642 sysfs_print(btree_nodes
, c
->gc_stats
.nodes
);
643 sysfs_hprint(average_key_size
, bch_average_key_size(c
));
645 sysfs_print(cache_read_races
,
646 atomic_long_read(&c
->cache_read_races
));
649 atomic_long_read(&c
->reclaim
));
651 sysfs_print(flush_write
,
652 atomic_long_read(&c
->flush_write
));
654 sysfs_print(retry_flush_write
,
655 atomic_long_read(&c
->retry_flush_write
));
657 sysfs_print(writeback_keys_done
,
658 atomic_long_read(&c
->writeback_keys_done
));
659 sysfs_print(writeback_keys_failed
,
660 atomic_long_read(&c
->writeback_keys_failed
));
662 if (attr
== &sysfs_errors
)
663 return bch_snprint_string_list(buf
, PAGE_SIZE
, error_actions
,
666 /* See count_io_errors for why 88 */
667 sysfs_print(io_error_halflife
, c
->error_decay
* 88);
668 sysfs_print(io_error_limit
, c
->error_limit
);
670 sysfs_hprint(congested
,
671 ((uint64_t) bch_get_congested(c
)) << 9);
672 sysfs_print(congested_read_threshold_us
,
673 c
->congested_read_threshold_us
);
674 sysfs_print(congested_write_threshold_us
,
675 c
->congested_write_threshold_us
);
677 sysfs_print(active_journal_entries
, fifo_used(&c
->journal
.pin
));
678 sysfs_printf(verify
, "%i", c
->verify
);
679 sysfs_printf(key_merging_disabled
, "%i", c
->key_merging_disabled
);
680 sysfs_printf(expensive_debug_checks
,
681 "%i", c
->expensive_debug_checks
);
682 sysfs_printf(gc_always_rewrite
, "%i", c
->gc_always_rewrite
);
683 sysfs_printf(btree_shrinker_disabled
, "%i", c
->shrinker_disabled
);
684 sysfs_printf(copy_gc_enabled
, "%i", c
->copy_gc_enabled
);
685 sysfs_printf(io_disable
, "%i",
686 test_bit(CACHE_SET_IO_DISABLE
, &c
->flags
));
688 if (attr
== &sysfs_bset_tree_stats
)
689 return bch_bset_print_stats(c
, buf
);
693 SHOW_LOCKED(bch_cache_set
)
695 STORE(__bch_cache_set
)
697 struct cache_set
*c
= container_of(kobj
, struct cache_set
, kobj
);
700 if (attr
== &sysfs_unregister
)
701 bch_cache_set_unregister(c
);
703 if (attr
== &sysfs_stop
)
704 bch_cache_set_stop(c
);
706 if (attr
== &sysfs_synchronous
) {
707 bool sync
= strtoul_or_return(buf
);
709 if (sync
!= CACHE_SYNC(&c
->sb
)) {
710 SET_CACHE_SYNC(&c
->sb
, sync
);
711 bcache_write_super(c
);
715 if (attr
== &sysfs_flash_vol_create
) {
719 strtoi_h_or_return(buf
, v
);
721 r
= bch_flash_dev_create(c
, v
);
726 if (attr
== &sysfs_clear_stats
) {
727 atomic_long_set(&c
->writeback_keys_done
, 0);
728 atomic_long_set(&c
->writeback_keys_failed
, 0);
730 memset(&c
->gc_stats
, 0, sizeof(struct gc_stat
));
731 bch_cache_accounting_clear(&c
->accounting
);
734 if (attr
== &sysfs_trigger_gc
) {
736 * Garbage collection thread only works when sectors_to_gc < 0,
737 * when users write to sysfs entry trigger_gc, most of time
738 * they want to forcibly triger gargage collection. Here -1 is
739 * set to c->sectors_to_gc, to make gc_should_run() give a
740 * chance to permit gc thread to run. "give a chance" means
741 * before going into gc_should_run(), there is still chance
742 * that c->sectors_to_gc being set to other positive value. So
743 * writing sysfs entry trigger_gc won't always make sure gc
744 * thread takes effect.
746 atomic_set(&c
->sectors_to_gc
, -1);
750 if (attr
== &sysfs_prune_cache
) {
751 struct shrink_control sc
;
753 sc
.gfp_mask
= GFP_KERNEL
;
754 sc
.nr_to_scan
= strtoul_or_return(buf
);
755 c
->shrink
.scan_objects(&c
->shrink
, &sc
);
758 sysfs_strtoul(congested_read_threshold_us
,
759 c
->congested_read_threshold_us
);
760 sysfs_strtoul(congested_write_threshold_us
,
761 c
->congested_write_threshold_us
);
763 if (attr
== &sysfs_errors
) {
764 v
= __sysfs_match_string(error_actions
, -1, buf
);
771 if (attr
== &sysfs_io_error_limit
)
772 c
->error_limit
= strtoul_or_return(buf
);
774 /* See count_io_errors() for why 88 */
775 if (attr
== &sysfs_io_error_halflife
) {
779 ret
= strtoul_safe_clamp(buf
, v
, 0, UINT_MAX
);
781 c
->error_decay
= v
/ 88;
787 if (attr
== &sysfs_io_disable
) {
788 v
= strtoul_or_return(buf
);
790 if (test_and_set_bit(CACHE_SET_IO_DISABLE
,
792 pr_warn("CACHE_SET_IO_DISABLE already set");
794 if (!test_and_clear_bit(CACHE_SET_IO_DISABLE
,
796 pr_warn("CACHE_SET_IO_DISABLE already cleared");
800 sysfs_strtoul(journal_delay_ms
, c
->journal_delay_ms
);
801 sysfs_strtoul(verify
, c
->verify
);
802 sysfs_strtoul(key_merging_disabled
, c
->key_merging_disabled
);
803 sysfs_strtoul(expensive_debug_checks
, c
->expensive_debug_checks
);
804 sysfs_strtoul(gc_always_rewrite
, c
->gc_always_rewrite
);
805 sysfs_strtoul(btree_shrinker_disabled
, c
->shrinker_disabled
);
806 sysfs_strtoul(copy_gc_enabled
, c
->copy_gc_enabled
);
810 STORE_LOCKED(bch_cache_set
)
812 SHOW(bch_cache_set_internal
)
814 struct cache_set
*c
= container_of(kobj
, struct cache_set
, internal
);
816 return bch_cache_set_show(&c
->kobj
, attr
, buf
);
819 STORE(bch_cache_set_internal
)
821 struct cache_set
*c
= container_of(kobj
, struct cache_set
, internal
);
823 return bch_cache_set_store(&c
->kobj
, attr
, buf
, size
);
826 static void bch_cache_set_internal_release(struct kobject
*k
)
830 static struct attribute
*bch_cache_set_files
[] = {
834 &sysfs_journal_delay_ms
,
835 &sysfs_flash_vol_create
,
840 &sysfs_root_usage_percent
,
841 &sysfs_btree_cache_size
,
842 &sysfs_cache_available_percent
,
844 &sysfs_average_key_size
,
847 &sysfs_io_error_limit
,
848 &sysfs_io_error_halflife
,
850 &sysfs_congested_read_threshold_us
,
851 &sysfs_congested_write_threshold_us
,
855 KTYPE(bch_cache_set
);
857 static struct attribute
*bch_cache_set_internal_files
[] = {
858 &sysfs_active_journal_entries
,
860 sysfs_time_stats_attribute_list(btree_gc
, sec
, ms
)
861 sysfs_time_stats_attribute_list(btree_split
, sec
, us
)
862 sysfs_time_stats_attribute_list(btree_sort
, ms
, us
)
863 sysfs_time_stats_attribute_list(btree_read
, ms
, us
)
866 &sysfs_btree_used_percent
,
867 &sysfs_btree_cache_max_chain
,
869 &sysfs_bset_tree_stats
,
870 &sysfs_cache_read_races
,
873 &sysfs_retry_flush_write
,
874 &sysfs_writeback_keys_done
,
875 &sysfs_writeback_keys_failed
,
879 #ifdef CONFIG_BCACHE_DEBUG
881 &sysfs_key_merging_disabled
,
882 &sysfs_expensive_debug_checks
,
884 &sysfs_gc_always_rewrite
,
885 &sysfs_btree_shrinker_disabled
,
886 &sysfs_copy_gc_enabled
,
890 KTYPE(bch_cache_set_internal
);
892 static int __bch_cache_cmp(const void *l
, const void *r
)
894 return *((uint16_t *)r
) - *((uint16_t *)l
);
899 struct cache
*ca
= container_of(kobj
, struct cache
, kobj
);
901 sysfs_hprint(bucket_size
, bucket_bytes(ca
));
902 sysfs_hprint(block_size
, block_bytes(ca
));
903 sysfs_print(nbuckets
, ca
->sb
.nbuckets
);
904 sysfs_print(discard
, ca
->discard
);
905 sysfs_hprint(written
, atomic_long_read(&ca
->sectors_written
) << 9);
906 sysfs_hprint(btree_written
,
907 atomic_long_read(&ca
->btree_sectors_written
) << 9);
908 sysfs_hprint(metadata_written
,
909 (atomic_long_read(&ca
->meta_sectors_written
) +
910 atomic_long_read(&ca
->btree_sectors_written
)) << 9);
912 sysfs_print(io_errors
,
913 atomic_read(&ca
->io_errors
) >> IO_ERROR_SHIFT
);
915 if (attr
== &sysfs_cache_replacement_policy
)
916 return bch_snprint_string_list(buf
, PAGE_SIZE
,
917 cache_replacement_policies
,
918 CACHE_REPLACEMENT(&ca
->sb
));
920 if (attr
== &sysfs_priority_stats
) {
922 size_t n
= ca
->sb
.nbuckets
, i
;
923 size_t unused
= 0, available
= 0, dirty
= 0, meta
= 0;
925 /* Compute 31 quantiles */
926 uint16_t q
[31], *p
, *cached
;
929 cached
= p
= vmalloc(array_size(sizeof(uint16_t),
934 mutex_lock(&ca
->set
->bucket_lock
);
935 for_each_bucket(b
, ca
) {
936 if (!GC_SECTORS_USED(b
))
938 if (GC_MARK(b
) == GC_MARK_RECLAIMABLE
)
940 if (GC_MARK(b
) == GC_MARK_DIRTY
)
942 if (GC_MARK(b
) == GC_MARK_METADATA
)
946 for (i
= ca
->sb
.first_bucket
; i
< n
; i
++)
947 p
[i
] = ca
->buckets
[i
].prio
;
948 mutex_unlock(&ca
->set
->bucket_lock
);
950 sort(p
, n
, sizeof(uint16_t), __bch_cache_cmp
, NULL
);
956 unused
= ca
->sb
.nbuckets
- n
;
958 while (cached
< p
+ n
&&
959 *cached
== BTREE_PRIO
)
962 for (i
= 0; i
< n
; i
++)
963 sum
+= INITIAL_PRIO
- cached
[i
];
968 for (i
= 0; i
< ARRAY_SIZE(q
); i
++)
969 q
[i
] = INITIAL_PRIO
- cached
[n
* (i
+ 1) /
970 (ARRAY_SIZE(q
) + 1)];
974 ret
= scnprintf(buf
, PAGE_SIZE
,
980 "Sectors per Q: %zu\n"
982 unused
* 100 / (size_t) ca
->sb
.nbuckets
,
983 available
* 100 / (size_t) ca
->sb
.nbuckets
,
984 dirty
* 100 / (size_t) ca
->sb
.nbuckets
,
985 meta
* 100 / (size_t) ca
->sb
.nbuckets
, sum
,
986 n
* ca
->sb
.bucket_size
/ (ARRAY_SIZE(q
) + 1));
988 for (i
= 0; i
< ARRAY_SIZE(q
); i
++)
989 ret
+= scnprintf(buf
+ ret
, PAGE_SIZE
- ret
,
993 ret
+= scnprintf(buf
+ ret
, PAGE_SIZE
- ret
, "]\n");
1000 SHOW_LOCKED(bch_cache
)
1004 struct cache
*ca
= container_of(kobj
, struct cache
, kobj
);
1007 if (attr
== &sysfs_discard
) {
1008 bool v
= strtoul_or_return(buf
);
1010 if (blk_queue_discard(bdev_get_queue(ca
->bdev
)))
1013 if (v
!= CACHE_DISCARD(&ca
->sb
)) {
1014 SET_CACHE_DISCARD(&ca
->sb
, v
);
1015 bcache_write_super(ca
->set
);
1019 if (attr
== &sysfs_cache_replacement_policy
) {
1020 v
= __sysfs_match_string(cache_replacement_policies
, -1, buf
);
1024 if ((unsigned int) v
!= CACHE_REPLACEMENT(&ca
->sb
)) {
1025 mutex_lock(&ca
->set
->bucket_lock
);
1026 SET_CACHE_REPLACEMENT(&ca
->sb
, v
);
1027 mutex_unlock(&ca
->set
->bucket_lock
);
1029 bcache_write_super(ca
->set
);
1033 if (attr
== &sysfs_clear_stats
) {
1034 atomic_long_set(&ca
->sectors_written
, 0);
1035 atomic_long_set(&ca
->btree_sectors_written
, 0);
1036 atomic_long_set(&ca
->meta_sectors_written
, 0);
1037 atomic_set(&ca
->io_count
, 0);
1038 atomic_set(&ca
->io_errors
, 0);
1043 STORE_LOCKED(bch_cache
)
1045 static struct attribute
*bch_cache_files
[] = {
1049 &sysfs_priority_stats
,
1052 &sysfs_btree_written
,
1053 &sysfs_metadata_written
,
1056 &sysfs_cache_replacement_policy
,