]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - fs/bcachefs/sysfs.c
Merge tag 'kvm-x86-generic-6.8' of https://github.com/kvm-x86/linux into HEAD
[thirdparty/kernel/stable.git] / fs / bcachefs / sysfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * bcache sysfs interfaces
4 *
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
7 */
8
9 #ifndef NO_BCACHEFS_SYSFS
10
11 #include "bcachefs.h"
12 #include "alloc_background.h"
13 #include "alloc_foreground.h"
14 #include "sysfs.h"
15 #include "btree_cache.h"
16 #include "btree_io.h"
17 #include "btree_iter.h"
18 #include "btree_key_cache.h"
19 #include "btree_update.h"
20 #include "btree_update_interior.h"
21 #include "btree_gc.h"
22 #include "buckets.h"
23 #include "clock.h"
24 #include "disk_groups.h"
25 #include "ec.h"
26 #include "inode.h"
27 #include "journal.h"
28 #include "keylist.h"
29 #include "move.h"
30 #include "movinggc.h"
31 #include "nocow_locking.h"
32 #include "opts.h"
33 #include "rebalance.h"
34 #include "replicas.h"
35 #include "super-io.h"
36 #include "tests.h"
37
38 #include <linux/blkdev.h>
39 #include <linux/sort.h>
40 #include <linux/sched/clock.h>
41
42 #include "util.h"
43
44 #define SYSFS_OPS(type) \
45 const struct sysfs_ops type ## _sysfs_ops = { \
46 .show = type ## _show, \
47 .store = type ## _store \
48 }
49
50 #define SHOW(fn) \
51 static ssize_t fn ## _to_text(struct printbuf *, \
52 struct kobject *, struct attribute *); \
53 \
54 static ssize_t fn ## _show(struct kobject *kobj, struct attribute *attr,\
55 char *buf) \
56 { \
57 struct printbuf out = PRINTBUF; \
58 ssize_t ret = fn ## _to_text(&out, kobj, attr); \
59 \
60 if (out.pos && out.buf[out.pos - 1] != '\n') \
61 prt_newline(&out); \
62 \
63 if (!ret && out.allocation_failure) \
64 ret = -ENOMEM; \
65 \
66 if (!ret) { \
67 ret = min_t(size_t, out.pos, PAGE_SIZE - 1); \
68 memcpy(buf, out.buf, ret); \
69 } \
70 printbuf_exit(&out); \
71 return bch2_err_class(ret); \
72 } \
73 \
74 static ssize_t fn ## _to_text(struct printbuf *out, struct kobject *kobj,\
75 struct attribute *attr)
76
77 #define STORE(fn) \
78 static ssize_t fn ## _store_inner(struct kobject *, struct attribute *,\
79 const char *, size_t); \
80 \
81 static ssize_t fn ## _store(struct kobject *kobj, struct attribute *attr,\
82 const char *buf, size_t size) \
83 { \
84 return bch2_err_class(fn##_store_inner(kobj, attr, buf, size)); \
85 } \
86 \
87 static ssize_t fn ## _store_inner(struct kobject *kobj, struct attribute *attr,\
88 const char *buf, size_t size)
89
90 #define __sysfs_attribute(_name, _mode) \
91 static struct attribute sysfs_##_name = \
92 { .name = #_name, .mode = _mode }
93
94 #define write_attribute(n) __sysfs_attribute(n, 0200)
95 #define read_attribute(n) __sysfs_attribute(n, 0444)
96 #define rw_attribute(n) __sysfs_attribute(n, 0644)
97
98 #define sysfs_printf(file, fmt, ...) \
99 do { \
100 if (attr == &sysfs_ ## file) \
101 prt_printf(out, fmt "\n", __VA_ARGS__); \
102 } while (0)
103
104 #define sysfs_print(file, var) \
105 do { \
106 if (attr == &sysfs_ ## file) \
107 snprint(out, var); \
108 } while (0)
109
110 #define sysfs_hprint(file, val) \
111 do { \
112 if (attr == &sysfs_ ## file) \
113 prt_human_readable_s64(out, val); \
114 } while (0)
115
116 #define sysfs_strtoul(file, var) \
117 do { \
118 if (attr == &sysfs_ ## file) \
119 return strtoul_safe(buf, var) ?: (ssize_t) size; \
120 } while (0)
121
122 #define sysfs_strtoul_clamp(file, var, min, max) \
123 do { \
124 if (attr == &sysfs_ ## file) \
125 return strtoul_safe_clamp(buf, var, min, max) \
126 ?: (ssize_t) size; \
127 } while (0)
128
129 #define strtoul_or_return(cp) \
130 ({ \
131 unsigned long _v; \
132 int _r = kstrtoul(cp, 10, &_v); \
133 if (_r) \
134 return _r; \
135 _v; \
136 })
137
138 write_attribute(trigger_gc);
139 write_attribute(trigger_discards);
140 write_attribute(trigger_invalidates);
141 write_attribute(prune_cache);
142 write_attribute(btree_wakeup);
143 rw_attribute(btree_gc_periodic);
144 rw_attribute(gc_gens_pos);
145
146 read_attribute(uuid);
147 read_attribute(minor);
148 read_attribute(bucket_size);
149 read_attribute(first_bucket);
150 read_attribute(nbuckets);
151 rw_attribute(durability);
152 read_attribute(io_done);
153 read_attribute(io_errors);
154 write_attribute(io_errors_reset);
155
156 read_attribute(io_latency_read);
157 read_attribute(io_latency_write);
158 read_attribute(io_latency_stats_read);
159 read_attribute(io_latency_stats_write);
160 read_attribute(congested);
161
162 read_attribute(btree_write_stats);
163
164 read_attribute(btree_cache_size);
165 read_attribute(compression_stats);
166 read_attribute(journal_debug);
167 read_attribute(btree_updates);
168 read_attribute(btree_cache);
169 read_attribute(btree_key_cache);
170 read_attribute(stripes_heap);
171 read_attribute(open_buckets);
172 read_attribute(open_buckets_partial);
173 read_attribute(write_points);
174 read_attribute(nocow_lock_table);
175
176 #ifdef BCH_WRITE_REF_DEBUG
177 read_attribute(write_refs);
178
179 static const char * const bch2_write_refs[] = {
180 #define x(n) #n,
181 BCH_WRITE_REFS()
182 #undef x
183 NULL
184 };
185
186 static void bch2_write_refs_to_text(struct printbuf *out, struct bch_fs *c)
187 {
188 bch2_printbuf_tabstop_push(out, 24);
189
190 for (unsigned i = 0; i < ARRAY_SIZE(c->writes); i++) {
191 prt_str(out, bch2_write_refs[i]);
192 prt_tab(out);
193 prt_printf(out, "%li", atomic_long_read(&c->writes[i]));
194 prt_newline(out);
195 }
196 }
197 #endif
198
199 read_attribute(internal_uuid);
200 read_attribute(disk_groups);
201
202 read_attribute(has_data);
203 read_attribute(alloc_debug);
204
205 #define x(t, n, ...) read_attribute(t);
206 BCH_PERSISTENT_COUNTERS()
207 #undef x
208
209 rw_attribute(discard);
210 rw_attribute(label);
211
212 rw_attribute(copy_gc_enabled);
213 read_attribute(copy_gc_wait);
214
215 rw_attribute(rebalance_enabled);
216 sysfs_pd_controller_attribute(rebalance);
217 read_attribute(rebalance_status);
218 rw_attribute(promote_whole_extents);
219
220 read_attribute(new_stripes);
221
222 read_attribute(io_timers_read);
223 read_attribute(io_timers_write);
224
225 read_attribute(moving_ctxts);
226
227 #ifdef CONFIG_BCACHEFS_TESTS
228 write_attribute(perf_test);
229 #endif /* CONFIG_BCACHEFS_TESTS */
230
231 #define x(_name) \
232 static struct attribute sysfs_time_stat_##_name = \
233 { .name = #_name, .mode = 0444 };
234 BCH_TIME_STATS()
235 #undef x
236
237 static struct attribute sysfs_state_rw = {
238 .name = "state",
239 .mode = 0444,
240 };
241
242 static size_t bch2_btree_cache_size(struct bch_fs *c)
243 {
244 size_t ret = 0;
245 struct btree *b;
246
247 mutex_lock(&c->btree_cache.lock);
248 list_for_each_entry(b, &c->btree_cache.live, list)
249 ret += btree_bytes(c);
250
251 mutex_unlock(&c->btree_cache.lock);
252 return ret;
253 }
254
255 static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
256 {
257 struct btree_trans *trans;
258 struct btree_iter iter;
259 struct bkey_s_c k;
260 enum btree_id id;
261 u64 nr_uncompressed_extents = 0,
262 nr_compressed_extents = 0,
263 nr_incompressible_extents = 0,
264 uncompressed_sectors = 0,
265 incompressible_sectors = 0,
266 compressed_sectors_compressed = 0,
267 compressed_sectors_uncompressed = 0;
268 int ret = 0;
269
270 if (!test_bit(BCH_FS_STARTED, &c->flags))
271 return -EPERM;
272
273 trans = bch2_trans_get(c);
274
275 for (id = 0; id < BTREE_ID_NR; id++) {
276 if (!btree_type_has_ptrs(id))
277 continue;
278
279 ret = for_each_btree_key2(trans, iter, id, POS_MIN,
280 BTREE_ITER_ALL_SNAPSHOTS, k, ({
281 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
282 const union bch_extent_entry *entry;
283 struct extent_ptr_decoded p;
284 bool compressed = false, uncompressed = false, incompressible = false;
285
286 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
287 switch (p.crc.compression_type) {
288 case BCH_COMPRESSION_TYPE_none:
289 uncompressed = true;
290 uncompressed_sectors += k.k->size;
291 break;
292 case BCH_COMPRESSION_TYPE_incompressible:
293 incompressible = true;
294 incompressible_sectors += k.k->size;
295 break;
296 default:
297 compressed_sectors_compressed +=
298 p.crc.compressed_size;
299 compressed_sectors_uncompressed +=
300 p.crc.uncompressed_size;
301 compressed = true;
302 break;
303 }
304 }
305
306 if (incompressible)
307 nr_incompressible_extents++;
308 else if (uncompressed)
309 nr_uncompressed_extents++;
310 else if (compressed)
311 nr_compressed_extents++;
312 0;
313 }));
314 }
315
316 bch2_trans_put(trans);
317
318 if (ret)
319 return ret;
320
321 prt_printf(out, "uncompressed:\n");
322 prt_printf(out, " nr extents: %llu\n", nr_uncompressed_extents);
323 prt_printf(out, " size: ");
324 prt_human_readable_u64(out, uncompressed_sectors << 9);
325 prt_printf(out, "\n");
326
327 prt_printf(out, "compressed:\n");
328 prt_printf(out, " nr extents: %llu\n", nr_compressed_extents);
329 prt_printf(out, " compressed size: ");
330 prt_human_readable_u64(out, compressed_sectors_compressed << 9);
331 prt_printf(out, "\n");
332 prt_printf(out, " uncompressed size: ");
333 prt_human_readable_u64(out, compressed_sectors_uncompressed << 9);
334 prt_printf(out, "\n");
335
336 prt_printf(out, "incompressible:\n");
337 prt_printf(out, " nr extents: %llu\n", nr_incompressible_extents);
338 prt_printf(out, " size: ");
339 prt_human_readable_u64(out, incompressible_sectors << 9);
340 prt_printf(out, "\n");
341 return 0;
342 }
343
344 static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
345 {
346 prt_printf(out, "%s: ", bch2_btree_id_str(c->gc_gens_btree));
347 bch2_bpos_to_text(out, c->gc_gens_pos);
348 prt_printf(out, "\n");
349 }
350
351 static void bch2_btree_wakeup_all(struct bch_fs *c)
352 {
353 struct btree_trans *trans;
354
355 seqmutex_lock(&c->btree_trans_lock);
356 list_for_each_entry(trans, &c->btree_trans_list, list) {
357 struct btree_bkey_cached_common *b = READ_ONCE(trans->locking);
358
359 if (b)
360 six_lock_wakeup_all(&b->lock);
361
362 }
363 seqmutex_unlock(&c->btree_trans_lock);
364 }
365
366 SHOW(bch2_fs)
367 {
368 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
369
370 sysfs_print(minor, c->minor);
371 sysfs_printf(internal_uuid, "%pU", c->sb.uuid.b);
372
373 sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
374
375 if (attr == &sysfs_btree_write_stats)
376 bch2_btree_write_stats_to_text(out, c);
377
378 sysfs_printf(btree_gc_periodic, "%u", (int) c->btree_gc_periodic);
379
380 if (attr == &sysfs_gc_gens_pos)
381 bch2_gc_gens_pos_to_text(out, c);
382
383 sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled);
384
385 sysfs_printf(rebalance_enabled, "%i", c->rebalance.enabled);
386 sysfs_pd_controller_show(rebalance, &c->rebalance.pd); /* XXX */
387
388 if (attr == &sysfs_copy_gc_wait)
389 bch2_copygc_wait_to_text(out, c);
390
391 if (attr == &sysfs_rebalance_status)
392 bch2_rebalance_status_to_text(out, c);
393
394 sysfs_print(promote_whole_extents, c->promote_whole_extents);
395
396 /* Debugging: */
397
398 if (attr == &sysfs_journal_debug)
399 bch2_journal_debug_to_text(out, &c->journal);
400
401 if (attr == &sysfs_btree_updates)
402 bch2_btree_updates_to_text(out, c);
403
404 if (attr == &sysfs_btree_cache)
405 bch2_btree_cache_to_text(out, c);
406
407 if (attr == &sysfs_btree_key_cache)
408 bch2_btree_key_cache_to_text(out, &c->btree_key_cache);
409
410 if (attr == &sysfs_stripes_heap)
411 bch2_stripes_heap_to_text(out, c);
412
413 if (attr == &sysfs_open_buckets)
414 bch2_open_buckets_to_text(out, c);
415
416 if (attr == &sysfs_open_buckets_partial)
417 bch2_open_buckets_partial_to_text(out, c);
418
419 if (attr == &sysfs_write_points)
420 bch2_write_points_to_text(out, c);
421
422 if (attr == &sysfs_compression_stats)
423 bch2_compression_stats_to_text(out, c);
424
425 if (attr == &sysfs_new_stripes)
426 bch2_new_stripes_to_text(out, c);
427
428 if (attr == &sysfs_io_timers_read)
429 bch2_io_timers_to_text(out, &c->io_clock[READ]);
430
431 if (attr == &sysfs_io_timers_write)
432 bch2_io_timers_to_text(out, &c->io_clock[WRITE]);
433
434 if (attr == &sysfs_moving_ctxts)
435 bch2_fs_moving_ctxts_to_text(out, c);
436
437 #ifdef BCH_WRITE_REF_DEBUG
438 if (attr == &sysfs_write_refs)
439 bch2_write_refs_to_text(out, c);
440 #endif
441
442 if (attr == &sysfs_nocow_lock_table)
443 bch2_nocow_locks_to_text(out, &c->nocow_locks);
444
445 if (attr == &sysfs_disk_groups)
446 bch2_disk_groups_to_text(out, c);
447
448 return 0;
449 }
450
451 STORE(bch2_fs)
452 {
453 struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
454
455 if (attr == &sysfs_btree_gc_periodic) {
456 ssize_t ret = strtoul_safe(buf, c->btree_gc_periodic)
457 ?: (ssize_t) size;
458
459 wake_up_process(c->gc_thread);
460 return ret;
461 }
462
463 if (attr == &sysfs_copy_gc_enabled) {
464 ssize_t ret = strtoul_safe(buf, c->copy_gc_enabled)
465 ?: (ssize_t) size;
466
467 if (c->copygc_thread)
468 wake_up_process(c->copygc_thread);
469 return ret;
470 }
471
472 if (attr == &sysfs_rebalance_enabled) {
473 ssize_t ret = strtoul_safe(buf, c->rebalance.enabled)
474 ?: (ssize_t) size;
475
476 rebalance_wakeup(c);
477 return ret;
478 }
479
480 sysfs_pd_controller_store(rebalance, &c->rebalance.pd);
481
482 sysfs_strtoul(promote_whole_extents, c->promote_whole_extents);
483
484 /* Debugging: */
485
486 if (!test_bit(BCH_FS_STARTED, &c->flags))
487 return -EPERM;
488
489 /* Debugging: */
490
491 if (!test_bit(BCH_FS_RW, &c->flags))
492 return -EROFS;
493
494 if (attr == &sysfs_prune_cache) {
495 struct shrink_control sc;
496
497 sc.gfp_mask = GFP_KERNEL;
498 sc.nr_to_scan = strtoul_or_return(buf);
499 c->btree_cache.shrink->scan_objects(c->btree_cache.shrink, &sc);
500 }
501
502 if (attr == &sysfs_btree_wakeup)
503 bch2_btree_wakeup_all(c);
504
505 if (attr == &sysfs_trigger_gc) {
506 /*
507 * Full gc is currently incompatible with btree key cache:
508 */
509 #if 0
510 down_read(&c->state_lock);
511 bch2_gc(c, false, false);
512 up_read(&c->state_lock);
513 #else
514 bch2_gc_gens(c);
515 #endif
516 }
517
518 if (attr == &sysfs_trigger_discards)
519 bch2_do_discards(c);
520
521 if (attr == &sysfs_trigger_invalidates)
522 bch2_do_invalidates(c);
523
524 #ifdef CONFIG_BCACHEFS_TESTS
525 if (attr == &sysfs_perf_test) {
526 char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;
527 char *test = strsep(&p, " \t\n");
528 char *nr_str = strsep(&p, " \t\n");
529 char *threads_str = strsep(&p, " \t\n");
530 unsigned threads;
531 u64 nr;
532 int ret = -EINVAL;
533
534 if (threads_str &&
535 !(ret = kstrtouint(threads_str, 10, &threads)) &&
536 !(ret = bch2_strtoull_h(nr_str, &nr)))
537 ret = bch2_btree_perf_test(c, test, nr, threads);
538 kfree(tmp);
539
540 if (ret)
541 size = ret;
542 }
543 #endif
544 return size;
545 }
546 SYSFS_OPS(bch2_fs);
547
548 struct attribute *bch2_fs_files[] = {
549 &sysfs_minor,
550 &sysfs_btree_cache_size,
551 &sysfs_btree_write_stats,
552
553 &sysfs_promote_whole_extents,
554
555 &sysfs_compression_stats,
556
557 #ifdef CONFIG_BCACHEFS_TESTS
558 &sysfs_perf_test,
559 #endif
560 NULL
561 };
562
563 /* counters dir */
564
565 SHOW(bch2_fs_counters)
566 {
567 struct bch_fs *c = container_of(kobj, struct bch_fs, counters_kobj);
568 u64 counter = 0;
569 u64 counter_since_mount = 0;
570
571 printbuf_tabstop_push(out, 32);
572
573 #define x(t, ...) \
574 if (attr == &sysfs_##t) { \
575 counter = percpu_u64_get(&c->counters[BCH_COUNTER_##t]);\
576 counter_since_mount = counter - c->counters_on_mount[BCH_COUNTER_##t];\
577 prt_printf(out, "since mount:"); \
578 prt_tab(out); \
579 prt_human_readable_u64(out, counter_since_mount); \
580 prt_newline(out); \
581 \
582 prt_printf(out, "since filesystem creation:"); \
583 prt_tab(out); \
584 prt_human_readable_u64(out, counter); \
585 prt_newline(out); \
586 }
587 BCH_PERSISTENT_COUNTERS()
588 #undef x
589 return 0;
590 }
591
592 STORE(bch2_fs_counters) {
593 return 0;
594 }
595
596 SYSFS_OPS(bch2_fs_counters);
597
598 struct attribute *bch2_fs_counters_files[] = {
599 #define x(t, ...) \
600 &sysfs_##t,
601 BCH_PERSISTENT_COUNTERS()
602 #undef x
603 NULL
604 };
605 /* internal dir - just a wrapper */
606
607 SHOW(bch2_fs_internal)
608 {
609 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
610
611 return bch2_fs_to_text(out, &c->kobj, attr);
612 }
613
614 STORE(bch2_fs_internal)
615 {
616 struct bch_fs *c = container_of(kobj, struct bch_fs, internal);
617
618 return bch2_fs_store(&c->kobj, attr, buf, size);
619 }
620 SYSFS_OPS(bch2_fs_internal);
621
622 struct attribute *bch2_fs_internal_files[] = {
623 &sysfs_journal_debug,
624 &sysfs_btree_updates,
625 &sysfs_btree_cache,
626 &sysfs_btree_key_cache,
627 &sysfs_new_stripes,
628 &sysfs_stripes_heap,
629 &sysfs_open_buckets,
630 &sysfs_open_buckets_partial,
631 &sysfs_write_points,
632 #ifdef BCH_WRITE_REF_DEBUG
633 &sysfs_write_refs,
634 #endif
635 &sysfs_nocow_lock_table,
636 &sysfs_io_timers_read,
637 &sysfs_io_timers_write,
638
639 &sysfs_trigger_gc,
640 &sysfs_trigger_discards,
641 &sysfs_trigger_invalidates,
642 &sysfs_prune_cache,
643 &sysfs_btree_wakeup,
644
645 &sysfs_gc_gens_pos,
646
647 &sysfs_copy_gc_enabled,
648 &sysfs_copy_gc_wait,
649
650 &sysfs_rebalance_enabled,
651 &sysfs_rebalance_status,
652 sysfs_pd_controller_files(rebalance),
653
654 &sysfs_moving_ctxts,
655
656 &sysfs_internal_uuid,
657
658 &sysfs_disk_groups,
659 NULL
660 };
661
662 /* options */
663
664 SHOW(bch2_fs_opts_dir)
665 {
666 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
667 const struct bch_option *opt = container_of(attr, struct bch_option, attr);
668 int id = opt - bch2_opt_table;
669 u64 v = bch2_opt_get_by_id(&c->opts, id);
670
671 bch2_opt_to_text(out, c, c->disk_sb.sb, opt, v, OPT_SHOW_FULL_LIST);
672 prt_char(out, '\n');
673
674 return 0;
675 }
676
677 STORE(bch2_fs_opts_dir)
678 {
679 struct bch_fs *c = container_of(kobj, struct bch_fs, opts_dir);
680 const struct bch_option *opt = container_of(attr, struct bch_option, attr);
681 int ret, id = opt - bch2_opt_table;
682 char *tmp;
683 u64 v;
684
685 /*
686 * We don't need to take c->writes for correctness, but it eliminates an
687 * unsightly error message in the dmesg log when we're RO:
688 */
689 if (unlikely(!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs)))
690 return -EROFS;
691
692 tmp = kstrdup(buf, GFP_KERNEL);
693 if (!tmp) {
694 ret = -ENOMEM;
695 goto err;
696 }
697
698 ret = bch2_opt_parse(c, opt, strim(tmp), &v, NULL);
699 kfree(tmp);
700
701 if (ret < 0)
702 goto err;
703
704 ret = bch2_opt_check_may_set(c, id, v);
705 if (ret < 0)
706 goto err;
707
708 bch2_opt_set_sb(c, opt, v);
709 bch2_opt_set_by_id(&c->opts, id, v);
710
711 if ((id == Opt_background_target ||
712 id == Opt_background_compression) && v)
713 bch2_set_rebalance_needs_scan(c, 0);
714
715 ret = size;
716 err:
717 bch2_write_ref_put(c, BCH_WRITE_REF_sysfs);
718 return ret;
719 }
720 SYSFS_OPS(bch2_fs_opts_dir);
721
722 struct attribute *bch2_fs_opts_dir_files[] = { NULL };
723
724 int bch2_opts_create_sysfs_files(struct kobject *kobj)
725 {
726 const struct bch_option *i;
727 int ret;
728
729 for (i = bch2_opt_table;
730 i < bch2_opt_table + bch2_opts_nr;
731 i++) {
732 if (!(i->flags & OPT_FS))
733 continue;
734
735 ret = sysfs_create_file(kobj, &i->attr);
736 if (ret)
737 return ret;
738 }
739
740 return 0;
741 }
742
743 /* time stats */
744
745 SHOW(bch2_fs_time_stats)
746 {
747 struct bch_fs *c = container_of(kobj, struct bch_fs, time_stats);
748
749 #define x(name) \
750 if (attr == &sysfs_time_stat_##name) \
751 bch2_time_stats_to_text(out, &c->times[BCH_TIME_##name]);
752 BCH_TIME_STATS()
753 #undef x
754
755 return 0;
756 }
757
758 STORE(bch2_fs_time_stats)
759 {
760 return size;
761 }
762 SYSFS_OPS(bch2_fs_time_stats);
763
764 struct attribute *bch2_fs_time_stats_files[] = {
765 #define x(name) \
766 &sysfs_time_stat_##name,
767 BCH_TIME_STATS()
768 #undef x
769 NULL
770 };
771
772 static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
773 {
774 struct bch_fs *c = ca->fs;
775 struct bch_dev_usage stats = bch2_dev_usage_read(ca);
776 unsigned i, nr[BCH_DATA_NR];
777
778 memset(nr, 0, sizeof(nr));
779
780 for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
781 nr[c->open_buckets[i].data_type]++;
782
783 printbuf_tabstop_push(out, 8);
784 printbuf_tabstop_push(out, 16);
785 printbuf_tabstop_push(out, 16);
786 printbuf_tabstop_push(out, 16);
787 printbuf_tabstop_push(out, 16);
788
789 prt_tab(out);
790 prt_str(out, "buckets");
791 prt_tab_rjust(out);
792 prt_str(out, "sectors");
793 prt_tab_rjust(out);
794 prt_str(out, "fragmented");
795 prt_tab_rjust(out);
796 prt_newline(out);
797
798 for (i = 0; i < BCH_DATA_NR; i++) {
799 prt_str(out, bch2_data_types[i]);
800 prt_tab(out);
801 prt_u64(out, stats.d[i].buckets);
802 prt_tab_rjust(out);
803 prt_u64(out, stats.d[i].sectors);
804 prt_tab_rjust(out);
805 prt_u64(out, stats.d[i].fragmented);
806 prt_tab_rjust(out);
807 prt_newline(out);
808 }
809
810 prt_str(out, "ec");
811 prt_tab(out);
812 prt_u64(out, stats.buckets_ec);
813 prt_tab_rjust(out);
814 prt_newline(out);
815
816 prt_newline(out);
817
818 prt_printf(out, "reserves:");
819 prt_newline(out);
820 for (i = 0; i < BCH_WATERMARK_NR; i++) {
821 prt_str(out, bch2_watermarks[i]);
822 prt_tab(out);
823 prt_u64(out, bch2_dev_buckets_reserved(ca, i));
824 prt_tab_rjust(out);
825 prt_newline(out);
826 }
827
828 prt_newline(out);
829
830 printbuf_tabstops_reset(out);
831 printbuf_tabstop_push(out, 24);
832
833 prt_str(out, "freelist_wait");
834 prt_tab(out);
835 prt_str(out, c->freelist_wait.list.first ? "waiting" : "empty");
836 prt_newline(out);
837
838 prt_str(out, "open buckets allocated");
839 prt_tab(out);
840 prt_u64(out, OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
841 prt_newline(out);
842
843 prt_str(out, "open buckets this dev");
844 prt_tab(out);
845 prt_u64(out, ca->nr_open_buckets);
846 prt_newline(out);
847
848 prt_str(out, "open buckets total");
849 prt_tab(out);
850 prt_u64(out, OPEN_BUCKETS_COUNT);
851 prt_newline(out);
852
853 prt_str(out, "open_buckets_wait");
854 prt_tab(out);
855 prt_str(out, c->open_buckets_wait.list.first ? "waiting" : "empty");
856 prt_newline(out);
857
858 prt_str(out, "open_buckets_btree");
859 prt_tab(out);
860 prt_u64(out, nr[BCH_DATA_btree]);
861 prt_newline(out);
862
863 prt_str(out, "open_buckets_user");
864 prt_tab(out);
865 prt_u64(out, nr[BCH_DATA_user]);
866 prt_newline(out);
867
868 prt_str(out, "buckets_to_invalidate");
869 prt_tab(out);
870 prt_u64(out, should_invalidate_buckets(ca, stats));
871 prt_newline(out);
872
873 prt_str(out, "btree reserve cache");
874 prt_tab(out);
875 prt_u64(out, c->btree_reserve_cache_nr);
876 prt_newline(out);
877 }
878
879 static const char * const bch2_rw[] = {
880 "read",
881 "write",
882 NULL
883 };
884
885 static void dev_io_done_to_text(struct printbuf *out, struct bch_dev *ca)
886 {
887 int rw, i;
888
889 for (rw = 0; rw < 2; rw++) {
890 prt_printf(out, "%s:\n", bch2_rw[rw]);
891
892 for (i = 1; i < BCH_DATA_NR; i++)
893 prt_printf(out, "%-12s:%12llu\n",
894 bch2_data_types[i],
895 percpu_u64_get(&ca->io_done->sectors[rw][i]) << 9);
896 }
897 }
898
899 SHOW(bch2_dev)
900 {
901 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
902 struct bch_fs *c = ca->fs;
903
904 sysfs_printf(uuid, "%pU\n", ca->uuid.b);
905
906 sysfs_print(bucket_size, bucket_bytes(ca));
907 sysfs_print(first_bucket, ca->mi.first_bucket);
908 sysfs_print(nbuckets, ca->mi.nbuckets);
909 sysfs_print(durability, ca->mi.durability);
910 sysfs_print(discard, ca->mi.discard);
911
912 if (attr == &sysfs_label) {
913 if (ca->mi.group)
914 bch2_disk_path_to_text(out, c, ca->mi.group - 1);
915 prt_char(out, '\n');
916 }
917
918 if (attr == &sysfs_has_data) {
919 prt_bitflags(out, bch2_data_types, bch2_dev_has_data(c, ca));
920 prt_char(out, '\n');
921 }
922
923 if (attr == &sysfs_state_rw) {
924 prt_string_option(out, bch2_member_states, ca->mi.state);
925 prt_char(out, '\n');
926 }
927
928 if (attr == &sysfs_io_done)
929 dev_io_done_to_text(out, ca);
930
931 if (attr == &sysfs_io_errors)
932 bch2_dev_io_errors_to_text(out, ca);
933
934 sysfs_print(io_latency_read, atomic64_read(&ca->cur_latency[READ]));
935 sysfs_print(io_latency_write, atomic64_read(&ca->cur_latency[WRITE]));
936
937 if (attr == &sysfs_io_latency_stats_read)
938 bch2_time_stats_to_text(out, &ca->io_latency[READ]);
939
940 if (attr == &sysfs_io_latency_stats_write)
941 bch2_time_stats_to_text(out, &ca->io_latency[WRITE]);
942
943 sysfs_printf(congested, "%u%%",
944 clamp(atomic_read(&ca->congested), 0, CONGESTED_MAX)
945 * 100 / CONGESTED_MAX);
946
947 if (attr == &sysfs_alloc_debug)
948 dev_alloc_debug_to_text(out, ca);
949
950 return 0;
951 }
952
953 STORE(bch2_dev)
954 {
955 struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
956 struct bch_fs *c = ca->fs;
957 struct bch_member *mi;
958
959 if (attr == &sysfs_discard) {
960 bool v = strtoul_or_return(buf);
961
962 mutex_lock(&c->sb_lock);
963 mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
964
965 if (v != BCH_MEMBER_DISCARD(mi)) {
966 SET_BCH_MEMBER_DISCARD(mi, v);
967 bch2_write_super(c);
968 }
969 mutex_unlock(&c->sb_lock);
970 }
971
972 if (attr == &sysfs_durability) {
973 u64 v = strtoul_or_return(buf);
974
975 mutex_lock(&c->sb_lock);
976 mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
977
978 if (v + 1 != BCH_MEMBER_DURABILITY(mi)) {
979 SET_BCH_MEMBER_DURABILITY(mi, v + 1);
980 bch2_write_super(c);
981 }
982 mutex_unlock(&c->sb_lock);
983 }
984
985 if (attr == &sysfs_label) {
986 char *tmp;
987 int ret;
988
989 tmp = kstrdup(buf, GFP_KERNEL);
990 if (!tmp)
991 return -ENOMEM;
992
993 ret = bch2_dev_group_set(c, ca, strim(tmp));
994 kfree(tmp);
995 if (ret)
996 return ret;
997 }
998
999 if (attr == &sysfs_io_errors_reset)
1000 bch2_dev_errors_reset(ca);
1001
1002 return size;
1003 }
1004 SYSFS_OPS(bch2_dev);
1005
1006 struct attribute *bch2_dev_files[] = {
1007 &sysfs_uuid,
1008 &sysfs_bucket_size,
1009 &sysfs_first_bucket,
1010 &sysfs_nbuckets,
1011 &sysfs_durability,
1012
1013 /* settings: */
1014 &sysfs_discard,
1015 &sysfs_state_rw,
1016 &sysfs_label,
1017
1018 &sysfs_has_data,
1019 &sysfs_io_done,
1020 &sysfs_io_errors,
1021 &sysfs_io_errors_reset,
1022
1023 &sysfs_io_latency_read,
1024 &sysfs_io_latency_write,
1025 &sysfs_io_latency_stats_read,
1026 &sysfs_io_latency_stats_write,
1027 &sysfs_congested,
1028
1029 /* debug: */
1030 &sysfs_alloc_debug,
1031 NULL
1032 };
1033
1034 #endif /* _BCACHEFS_SYSFS_H_ */