]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - fs/xfs/scrub/stats.c
Merge tag 'kvm-x86-docs-6.7' of https://github.com/kvm-x86/linux into HEAD
[thirdparty/kernel/stable.git] / fs / xfs / scrub / stats.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_sysfs.h"
13 #include "xfs_btree.h"
14 #include "xfs_super.h"
15 #include "scrub/scrub.h"
16 #include "scrub/stats.h"
17 #include "scrub/trace.h"
18
19 struct xchk_scrub_stats {
20 /* all 32-bit counters here */
21
22 /* checking stats */
23 uint32_t invocations;
24 uint32_t clean;
25 uint32_t corrupt;
26 uint32_t preen;
27 uint32_t xfail;
28 uint32_t xcorrupt;
29 uint32_t incomplete;
30 uint32_t warning;
31 uint32_t retries;
32
33 /* repair stats */
34 uint32_t repair_invocations;
35 uint32_t repair_success;
36
37 /* all 64-bit items here */
38
39 /* runtimes */
40 uint64_t checktime_us;
41 uint64_t repairtime_us;
42
43 /* non-counter state must go at the end for clearall */
44 spinlock_t css_lock;
45 };
46
47 struct xchk_stats {
48 struct dentry *cs_debugfs;
49 struct xchk_scrub_stats cs_stats[XFS_SCRUB_TYPE_NR];
50 };
51
52
53 static struct xchk_stats global_stats;
54
55 static const char *name_map[XFS_SCRUB_TYPE_NR] = {
56 [XFS_SCRUB_TYPE_SB] = "sb",
57 [XFS_SCRUB_TYPE_AGF] = "agf",
58 [XFS_SCRUB_TYPE_AGFL] = "agfl",
59 [XFS_SCRUB_TYPE_AGI] = "agi",
60 [XFS_SCRUB_TYPE_BNOBT] = "bnobt",
61 [XFS_SCRUB_TYPE_CNTBT] = "cntbt",
62 [XFS_SCRUB_TYPE_INOBT] = "inobt",
63 [XFS_SCRUB_TYPE_FINOBT] = "finobt",
64 [XFS_SCRUB_TYPE_RMAPBT] = "rmapbt",
65 [XFS_SCRUB_TYPE_REFCNTBT] = "refcountbt",
66 [XFS_SCRUB_TYPE_INODE] = "inode",
67 [XFS_SCRUB_TYPE_BMBTD] = "bmapbtd",
68 [XFS_SCRUB_TYPE_BMBTA] = "bmapbta",
69 [XFS_SCRUB_TYPE_BMBTC] = "bmapbtc",
70 [XFS_SCRUB_TYPE_DIR] = "directory",
71 [XFS_SCRUB_TYPE_XATTR] = "xattr",
72 [XFS_SCRUB_TYPE_SYMLINK] = "symlink",
73 [XFS_SCRUB_TYPE_PARENT] = "parent",
74 [XFS_SCRUB_TYPE_RTBITMAP] = "rtbitmap",
75 [XFS_SCRUB_TYPE_RTSUM] = "rtsummary",
76 [XFS_SCRUB_TYPE_UQUOTA] = "usrquota",
77 [XFS_SCRUB_TYPE_GQUOTA] = "grpquota",
78 [XFS_SCRUB_TYPE_PQUOTA] = "prjquota",
79 [XFS_SCRUB_TYPE_FSCOUNTERS] = "fscounters",
80 };
81
82 /* Format the scrub stats into a text buffer, similar to pcp style. */
83 STATIC ssize_t
84 xchk_stats_format(
85 struct xchk_stats *cs,
86 char *buf,
87 size_t remaining)
88 {
89 struct xchk_scrub_stats *css = &cs->cs_stats[0];
90 unsigned int i;
91 ssize_t copied = 0;
92 int ret = 0;
93
94 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
95 if (!name_map[i])
96 continue;
97
98 ret = scnprintf(buf, remaining,
99 "%s %u %u %u %u %u %u %u %u %u %llu %u %u %llu\n",
100 name_map[i],
101 (unsigned int)css->invocations,
102 (unsigned int)css->clean,
103 (unsigned int)css->corrupt,
104 (unsigned int)css->preen,
105 (unsigned int)css->xfail,
106 (unsigned int)css->xcorrupt,
107 (unsigned int)css->incomplete,
108 (unsigned int)css->warning,
109 (unsigned int)css->retries,
110 (unsigned long long)css->checktime_us,
111 (unsigned int)css->repair_invocations,
112 (unsigned int)css->repair_success,
113 (unsigned long long)css->repairtime_us);
114 if (ret <= 0)
115 break;
116
117 remaining -= ret;
118 copied += ret;
119 buf += ret;
120 }
121
122 return copied > 0 ? copied : ret;
123 }
124
125 /* Estimate the worst case buffer size required to hold the whole report. */
126 STATIC size_t
127 xchk_stats_estimate_bufsize(
128 struct xchk_stats *cs)
129 {
130 struct xchk_scrub_stats *css = &cs->cs_stats[0];
131 unsigned int i;
132 size_t field_width;
133 size_t ret = 0;
134
135 /* 4294967296 plus one space for each u32 field */
136 field_width = 11 * (offsetof(struct xchk_scrub_stats, checktime_us) /
137 sizeof(uint32_t));
138
139 /* 18446744073709551615 plus one space for each u64 field */
140 field_width += 21 * ((offsetof(struct xchk_scrub_stats, css_lock) -
141 offsetof(struct xchk_scrub_stats, checktime_us)) /
142 sizeof(uint64_t));
143
144 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
145 if (!name_map[i])
146 continue;
147
148 /* name plus one space */
149 ret += 1 + strlen(name_map[i]);
150
151 /* all fields, plus newline */
152 ret += field_width + 1;
153 }
154
155 return ret;
156 }
157
158 /* Clear all counters. */
159 STATIC void
160 xchk_stats_clearall(
161 struct xchk_stats *cs)
162 {
163 struct xchk_scrub_stats *css = &cs->cs_stats[0];
164 unsigned int i;
165
166 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++) {
167 spin_lock(&css->css_lock);
168 memset(css, 0, offsetof(struct xchk_scrub_stats, css_lock));
169 spin_unlock(&css->css_lock);
170 }
171 }
172
173 #define XFS_SCRUB_OFLAG_UNCLEAN (XFS_SCRUB_OFLAG_CORRUPT | \
174 XFS_SCRUB_OFLAG_PREEN | \
175 XFS_SCRUB_OFLAG_XFAIL | \
176 XFS_SCRUB_OFLAG_XCORRUPT | \
177 XFS_SCRUB_OFLAG_INCOMPLETE | \
178 XFS_SCRUB_OFLAG_WARNING)
179
180 STATIC void
181 xchk_stats_merge_one(
182 struct xchk_stats *cs,
183 const struct xfs_scrub_metadata *sm,
184 const struct xchk_stats_run *run)
185 {
186 struct xchk_scrub_stats *css;
187
188 if (sm->sm_type >= XFS_SCRUB_TYPE_NR) {
189 ASSERT(sm->sm_type < XFS_SCRUB_TYPE_NR);
190 return;
191 }
192
193 css = &cs->cs_stats[sm->sm_type];
194 spin_lock(&css->css_lock);
195 css->invocations++;
196 if (!(sm->sm_flags & XFS_SCRUB_OFLAG_UNCLEAN))
197 css->clean++;
198 if (sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
199 css->corrupt++;
200 if (sm->sm_flags & XFS_SCRUB_OFLAG_PREEN)
201 css->preen++;
202 if (sm->sm_flags & XFS_SCRUB_OFLAG_XFAIL)
203 css->xfail++;
204 if (sm->sm_flags & XFS_SCRUB_OFLAG_XCORRUPT)
205 css->xcorrupt++;
206 if (sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE)
207 css->incomplete++;
208 if (sm->sm_flags & XFS_SCRUB_OFLAG_WARNING)
209 css->warning++;
210 css->retries += run->retries;
211 css->checktime_us += howmany_64(run->scrub_ns, NSEC_PER_USEC);
212
213 if (run->repair_attempted)
214 css->repair_invocations++;
215 if (run->repair_succeeded)
216 css->repair_success++;
217 css->repairtime_us += howmany_64(run->repair_ns, NSEC_PER_USEC);
218 spin_unlock(&css->css_lock);
219 }
220
221 /* Merge these scrub-run stats into the global and mount stat data. */
222 void
223 xchk_stats_merge(
224 struct xfs_mount *mp,
225 const struct xfs_scrub_metadata *sm,
226 const struct xchk_stats_run *run)
227 {
228 xchk_stats_merge_one(&global_stats, sm, run);
229 xchk_stats_merge_one(mp->m_scrub_stats, sm, run);
230 }
231
232 /* debugfs boilerplate */
233
234 static ssize_t
235 xchk_scrub_stats_read(
236 struct file *file,
237 char __user *ubuf,
238 size_t count,
239 loff_t *ppos)
240 {
241 struct xchk_stats *cs = file->private_data;
242 char *buf;
243 size_t bufsize;
244 ssize_t avail, ret;
245
246 /*
247 * This generates stringly snapshot of all the scrub counters, so we
248 * do not want userspace to receive garbled text from multiple calls.
249 * If the file position is greater than 0, return a short read.
250 */
251 if (*ppos > 0)
252 return 0;
253
254 bufsize = xchk_stats_estimate_bufsize(cs);
255
256 buf = kvmalloc(bufsize, XCHK_GFP_FLAGS);
257 if (!buf)
258 return -ENOMEM;
259
260 avail = xchk_stats_format(cs, buf, bufsize);
261 if (avail < 0) {
262 ret = avail;
263 goto out;
264 }
265
266 ret = simple_read_from_buffer(ubuf, count, ppos, buf, avail);
267 out:
268 kvfree(buf);
269 return ret;
270 }
271
272 static const struct file_operations scrub_stats_fops = {
273 .open = simple_open,
274 .read = xchk_scrub_stats_read,
275 };
276
277 static ssize_t
278 xchk_clear_scrub_stats_write(
279 struct file *file,
280 const char __user *ubuf,
281 size_t count,
282 loff_t *ppos)
283 {
284 struct xchk_stats *cs = file->private_data;
285 unsigned int val;
286 int ret;
287
288 ret = kstrtouint_from_user(ubuf, count, 0, &val);
289 if (ret)
290 return ret;
291
292 if (val != 1)
293 return -EINVAL;
294
295 xchk_stats_clearall(cs);
296 return count;
297 }
298
299 static const struct file_operations clear_scrub_stats_fops = {
300 .open = simple_open,
301 .write = xchk_clear_scrub_stats_write,
302 };
303
304 /* Initialize the stats object. */
305 STATIC int
306 xchk_stats_init(
307 struct xchk_stats *cs,
308 struct xfs_mount *mp)
309 {
310 struct xchk_scrub_stats *css = &cs->cs_stats[0];
311 unsigned int i;
312
313 for (i = 0; i < XFS_SCRUB_TYPE_NR; i++, css++)
314 spin_lock_init(&css->css_lock);
315
316 return 0;
317 }
318
319 /* Connect the stats object to debugfs. */
320 void
321 xchk_stats_register(
322 struct xchk_stats *cs,
323 struct dentry *parent)
324 {
325 if (!parent)
326 return;
327
328 cs->cs_debugfs = xfs_debugfs_mkdir("scrub", parent);
329 if (!cs->cs_debugfs)
330 return;
331
332 debugfs_create_file("stats", 0644, cs->cs_debugfs, cs,
333 &scrub_stats_fops);
334 debugfs_create_file("clear_stats", 0400, cs->cs_debugfs, cs,
335 &clear_scrub_stats_fops);
336 }
337
338 /* Free all resources related to the stats object. */
339 STATIC int
340 xchk_stats_teardown(
341 struct xchk_stats *cs)
342 {
343 return 0;
344 }
345
346 /* Disconnect the stats object from debugfs. */
347 void
348 xchk_stats_unregister(
349 struct xchk_stats *cs)
350 {
351 debugfs_remove(cs->cs_debugfs);
352 }
353
354 /* Initialize global stats and register them */
355 int __init
356 xchk_global_stats_setup(
357 struct dentry *parent)
358 {
359 int error;
360
361 error = xchk_stats_init(&global_stats, NULL);
362 if (error)
363 return error;
364
365 xchk_stats_register(&global_stats, parent);
366 return 0;
367 }
368
369 /* Unregister global stats and tear them down */
370 void
371 xchk_global_stats_teardown(void)
372 {
373 xchk_stats_unregister(&global_stats);
374 xchk_stats_teardown(&global_stats);
375 }
376
377 /* Allocate per-mount stats */
378 int
379 xchk_mount_stats_alloc(
380 struct xfs_mount *mp)
381 {
382 struct xchk_stats *cs;
383 int error;
384
385 cs = kvzalloc(sizeof(struct xchk_stats), GFP_KERNEL);
386 if (!cs)
387 return -ENOMEM;
388
389 error = xchk_stats_init(cs, mp);
390 if (error)
391 goto out_free;
392
393 mp->m_scrub_stats = cs;
394 return 0;
395 out_free:
396 kvfree(cs);
397 return error;
398 }
399
400 /* Free per-mount stats */
401 void
402 xchk_mount_stats_free(
403 struct xfs_mount *mp)
404 {
405 xchk_stats_teardown(mp->m_scrub_stats);
406 kvfree(mp->m_scrub_stats);
407 mp->m_scrub_stats = NULL;
408 }