4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 #define DEBUG_SUBSYSTEM S_LLITE
38 #include "../include/lustre_lite.h"
39 #include "../include/lprocfs_status.h"
40 #include <linux/seq_file.h>
41 #include "../include/obd_support.h"
43 #include "llite_internal.h"
44 #include "vvp_internal.h"
46 /* debugfs llite mount point registration */
47 static struct file_operations ll_rw_extents_stats_fops
;
48 static struct file_operations ll_rw_extents_stats_pp_fops
;
49 static struct file_operations ll_rw_offset_stats_fops
;
51 static ssize_t
blocksize_show(struct kobject
*kobj
, struct attribute
*attr
,
54 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
56 struct obd_statfs osfs
;
59 rc
= ll_statfs_internal(sbi
->ll_sb
, &osfs
,
60 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS
),
63 return sprintf(buf
, "%u\n", osfs
.os_bsize
);
67 LUSTRE_RO_ATTR(blocksize
);
69 static ssize_t
kbytestotal_show(struct kobject
*kobj
, struct attribute
*attr
,
72 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
74 struct obd_statfs osfs
;
77 rc
= ll_statfs_internal(sbi
->ll_sb
, &osfs
,
78 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS
),
81 __u32 blk_size
= osfs
.os_bsize
>> 10;
82 __u64 result
= osfs
.os_blocks
;
84 while (blk_size
>>= 1)
87 rc
= sprintf(buf
, "%llu\n", result
);
92 LUSTRE_RO_ATTR(kbytestotal
);
94 static ssize_t
kbytesfree_show(struct kobject
*kobj
, struct attribute
*attr
,
97 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
99 struct obd_statfs osfs
;
102 rc
= ll_statfs_internal(sbi
->ll_sb
, &osfs
,
103 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS
),
106 __u32 blk_size
= osfs
.os_bsize
>> 10;
107 __u64 result
= osfs
.os_bfree
;
109 while (blk_size
>>= 1)
112 rc
= sprintf(buf
, "%llu\n", result
);
117 LUSTRE_RO_ATTR(kbytesfree
);
119 static ssize_t
kbytesavail_show(struct kobject
*kobj
, struct attribute
*attr
,
122 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
124 struct obd_statfs osfs
;
127 rc
= ll_statfs_internal(sbi
->ll_sb
, &osfs
,
128 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS
),
131 __u32 blk_size
= osfs
.os_bsize
>> 10;
132 __u64 result
= osfs
.os_bavail
;
134 while (blk_size
>>= 1)
137 rc
= sprintf(buf
, "%llu\n", result
);
142 LUSTRE_RO_ATTR(kbytesavail
);
144 static ssize_t
filestotal_show(struct kobject
*kobj
, struct attribute
*attr
,
147 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
149 struct obd_statfs osfs
;
152 rc
= ll_statfs_internal(sbi
->ll_sb
, &osfs
,
153 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS
),
156 return sprintf(buf
, "%llu\n", osfs
.os_files
);
160 LUSTRE_RO_ATTR(filestotal
);
162 static ssize_t
filesfree_show(struct kobject
*kobj
, struct attribute
*attr
,
165 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
167 struct obd_statfs osfs
;
170 rc
= ll_statfs_internal(sbi
->ll_sb
, &osfs
,
171 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS
),
174 return sprintf(buf
, "%llu\n", osfs
.os_ffree
);
178 LUSTRE_RO_ATTR(filesfree
);
180 static ssize_t
client_type_show(struct kobject
*kobj
, struct attribute
*attr
,
183 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
186 return sprintf(buf
, "%s client\n",
187 sbi
->ll_flags
& LL_SBI_RMT_CLIENT
? "remote" : "local");
189 LUSTRE_RO_ATTR(client_type
);
191 static ssize_t
fstype_show(struct kobject
*kobj
, struct attribute
*attr
,
194 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
197 return sprintf(buf
, "%s\n", sbi
->ll_sb
->s_type
->name
);
199 LUSTRE_RO_ATTR(fstype
);
201 static ssize_t
uuid_show(struct kobject
*kobj
, struct attribute
*attr
,
204 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
207 return sprintf(buf
, "%s\n", sbi
->ll_sb_uuid
.uuid
);
209 LUSTRE_RO_ATTR(uuid
);
211 static int ll_site_stats_seq_show(struct seq_file
*m
, void *v
)
213 struct super_block
*sb
= m
->private;
216 * See description of statistical counters in struct cl_site, and
219 return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb
)->ll_site
), m
);
222 LPROC_SEQ_FOPS_RO(ll_site_stats
);
224 static ssize_t
max_read_ahead_mb_show(struct kobject
*kobj
,
225 struct attribute
*attr
, char *buf
)
227 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
232 spin_lock(&sbi
->ll_lock
);
233 pages_number
= sbi
->ll_ra_info
.ra_max_pages
;
234 spin_unlock(&sbi
->ll_lock
);
236 mult
= 1 << (20 - PAGE_SHIFT
);
237 return lprocfs_read_frac_helper(buf
, PAGE_SIZE
, pages_number
, mult
);
240 static ssize_t
max_read_ahead_mb_store(struct kobject
*kobj
,
241 struct attribute
*attr
,
245 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
248 unsigned long pages_number
;
250 rc
= kstrtoul(buffer
, 10, &pages_number
);
254 pages_number
*= 1 << (20 - PAGE_SHIFT
); /* MB -> pages */
256 if (pages_number
> totalram_pages
/ 2) {
258 CERROR("can't set file readahead more than %lu MB\n",
259 totalram_pages
>> (20 - PAGE_SHIFT
+ 1)); /*1/2 of RAM*/
263 spin_lock(&sbi
->ll_lock
);
264 sbi
->ll_ra_info
.ra_max_pages
= pages_number
;
265 spin_unlock(&sbi
->ll_lock
);
269 LUSTRE_RW_ATTR(max_read_ahead_mb
);
271 static ssize_t
max_read_ahead_per_file_mb_show(struct kobject
*kobj
,
272 struct attribute
*attr
,
275 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
280 spin_lock(&sbi
->ll_lock
);
281 pages_number
= sbi
->ll_ra_info
.ra_max_pages_per_file
;
282 spin_unlock(&sbi
->ll_lock
);
284 mult
= 1 << (20 - PAGE_SHIFT
);
285 return lprocfs_read_frac_helper(buf
, PAGE_SIZE
, pages_number
, mult
);
288 static ssize_t
max_read_ahead_per_file_mb_store(struct kobject
*kobj
,
289 struct attribute
*attr
,
293 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
296 unsigned long pages_number
;
298 rc
= kstrtoul(buffer
, 10, &pages_number
);
302 if (pages_number
> sbi
->ll_ra_info
.ra_max_pages
) {
303 CERROR("can't set file readahead more than max_read_ahead_mb %lu MB\n",
304 sbi
->ll_ra_info
.ra_max_pages
);
308 spin_lock(&sbi
->ll_lock
);
309 sbi
->ll_ra_info
.ra_max_pages_per_file
= pages_number
;
310 spin_unlock(&sbi
->ll_lock
);
314 LUSTRE_RW_ATTR(max_read_ahead_per_file_mb
);
316 static ssize_t
max_read_ahead_whole_mb_show(struct kobject
*kobj
,
317 struct attribute
*attr
,
320 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
325 spin_lock(&sbi
->ll_lock
);
326 pages_number
= sbi
->ll_ra_info
.ra_max_read_ahead_whole_pages
;
327 spin_unlock(&sbi
->ll_lock
);
329 mult
= 1 << (20 - PAGE_SHIFT
);
330 return lprocfs_read_frac_helper(buf
, PAGE_SIZE
, pages_number
, mult
);
333 static ssize_t
max_read_ahead_whole_mb_store(struct kobject
*kobj
,
334 struct attribute
*attr
,
338 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
341 unsigned long pages_number
;
343 rc
= kstrtoul(buffer
, 10, &pages_number
);
347 /* Cap this at the current max readahead window size, the readahead
348 * algorithm does this anyway so it's pointless to set it larger.
350 if (pages_number
> sbi
->ll_ra_info
.ra_max_pages_per_file
) {
351 CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
352 sbi
->ll_ra_info
.ra_max_pages_per_file
>> (20 - PAGE_SHIFT
));
356 spin_lock(&sbi
->ll_lock
);
357 sbi
->ll_ra_info
.ra_max_read_ahead_whole_pages
= pages_number
;
358 spin_unlock(&sbi
->ll_lock
);
362 LUSTRE_RW_ATTR(max_read_ahead_whole_mb
);
364 static int ll_max_cached_mb_seq_show(struct seq_file
*m
, void *v
)
366 struct super_block
*sb
= m
->private;
367 struct ll_sb_info
*sbi
= ll_s2sbi(sb
);
368 struct cl_client_cache
*cache
= &sbi
->ll_cache
;
369 int shift
= 20 - PAGE_SHIFT
;
373 max_cached_mb
= cache
->ccc_lru_max
>> shift
;
374 unused_mb
= atomic_read(&cache
->ccc_lru_left
) >> shift
;
377 "max_cached_mb: %d\n"
380 "reclaim_count: %u\n",
381 atomic_read(&cache
->ccc_users
),
383 max_cached_mb
- unused_mb
,
385 cache
->ccc_lru_shrinkers
);
389 static ssize_t
ll_max_cached_mb_seq_write(struct file
*file
,
390 const char __user
*buffer
,
391 size_t count
, loff_t
*off
)
393 struct super_block
*sb
= ((struct seq_file
*)file
->private_data
)->private;
394 struct ll_sb_info
*sbi
= ll_s2sbi(sb
);
395 struct cl_client_cache
*cache
= &sbi
->ll_cache
;
396 int mult
, rc
, pages_number
;
401 if (count
>= sizeof(kernbuf
))
404 if (copy_from_user(kernbuf
, buffer
, count
))
408 mult
= 1 << (20 - PAGE_SHIFT
);
409 buffer
+= lprocfs_find_named_value(kernbuf
, "max_cached_mb:", &count
) -
411 rc
= lprocfs_write_frac_helper(buffer
, count
, &pages_number
, mult
);
415 if (pages_number
< 0 || pages_number
> totalram_pages
) {
416 CERROR("%s: can't set max cache more than %lu MB\n",
417 ll_get_fsname(sb
, NULL
, 0),
418 totalram_pages
>> (20 - PAGE_SHIFT
));
422 spin_lock(&sbi
->ll_lock
);
423 diff
= pages_number
- cache
->ccc_lru_max
;
424 spin_unlock(&sbi
->ll_lock
);
426 /* easy - add more LRU slots. */
428 atomic_add(diff
, &cache
->ccc_lru_left
);
437 /* reduce LRU budget from free slots. */
441 ov
= atomic_read(&cache
->ccc_lru_left
);
445 nv
= ov
> diff
? ov
- diff
: 0;
446 rc
= atomic_cmpxchg(&cache
->ccc_lru_left
, ov
, nv
);
447 if (likely(ov
== rc
)) {
457 if (!sbi
->ll_dt_exp
) { /* being initialized */
462 /* difficult - have to ask OSCs to drop LRU slots. */
464 rc
= obd_set_info_async(NULL
, sbi
->ll_dt_exp
,
465 sizeof(KEY_CACHE_LRU_SHRINK
),
466 KEY_CACHE_LRU_SHRINK
,
467 sizeof(tmp
), &tmp
, NULL
);
474 spin_lock(&sbi
->ll_lock
);
475 cache
->ccc_lru_max
= pages_number
;
476 spin_unlock(&sbi
->ll_lock
);
479 atomic_add(nrpages
, &cache
->ccc_lru_left
);
484 LPROC_SEQ_FOPS(ll_max_cached_mb
);
486 static ssize_t
checksum_pages_show(struct kobject
*kobj
, struct attribute
*attr
,
489 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
492 return sprintf(buf
, "%u\n", (sbi
->ll_flags
& LL_SBI_CHECKSUM
) ? 1 : 0);
495 static ssize_t
checksum_pages_store(struct kobject
*kobj
,
496 struct attribute
*attr
,
500 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
509 rc
= kstrtoul(buffer
, 10, &val
);
513 sbi
->ll_flags
|= LL_SBI_CHECKSUM
;
515 sbi
->ll_flags
&= ~LL_SBI_CHECKSUM
;
517 rc
= obd_set_info_async(NULL
, sbi
->ll_dt_exp
, sizeof(KEY_CHECKSUM
),
518 KEY_CHECKSUM
, sizeof(val
), &val
, NULL
);
520 CWARN("Failed to set OSC checksum flags: %d\n", rc
);
524 LUSTRE_RW_ATTR(checksum_pages
);
526 static ssize_t
ll_rd_track_id(struct kobject
*kobj
, char *buf
,
527 enum stats_track_type type
)
529 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
532 if (sbi
->ll_stats_track_type
== type
)
533 return sprintf(buf
, "%d\n", sbi
->ll_stats_track_id
);
534 else if (sbi
->ll_stats_track_type
== STATS_TRACK_ALL
)
535 return sprintf(buf
, "0 (all)\n");
537 return sprintf(buf
, "untracked\n");
540 static ssize_t
ll_wr_track_id(struct kobject
*kobj
, const char *buffer
,
542 enum stats_track_type type
)
544 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
549 rc
= kstrtoul(buffer
, 10, &pid
);
552 sbi
->ll_stats_track_id
= pid
;
554 sbi
->ll_stats_track_type
= STATS_TRACK_ALL
;
556 sbi
->ll_stats_track_type
= type
;
557 lprocfs_clear_stats(sbi
->ll_stats
);
561 static ssize_t
stats_track_pid_show(struct kobject
*kobj
,
562 struct attribute
*attr
,
565 return ll_rd_track_id(kobj
, buf
, STATS_TRACK_PID
);
568 static ssize_t
stats_track_pid_store(struct kobject
*kobj
,
569 struct attribute
*attr
,
573 return ll_wr_track_id(kobj
, buffer
, count
, STATS_TRACK_PID
);
575 LUSTRE_RW_ATTR(stats_track_pid
);
577 static ssize_t
stats_track_ppid_show(struct kobject
*kobj
,
578 struct attribute
*attr
,
581 return ll_rd_track_id(kobj
, buf
, STATS_TRACK_PPID
);
584 static ssize_t
stats_track_ppid_store(struct kobject
*kobj
,
585 struct attribute
*attr
,
589 return ll_wr_track_id(kobj
, buffer
, count
, STATS_TRACK_PPID
);
591 LUSTRE_RW_ATTR(stats_track_ppid
);
593 static ssize_t
stats_track_gid_show(struct kobject
*kobj
,
594 struct attribute
*attr
,
597 return ll_rd_track_id(kobj
, buf
, STATS_TRACK_GID
);
600 static ssize_t
stats_track_gid_store(struct kobject
*kobj
,
601 struct attribute
*attr
,
605 return ll_wr_track_id(kobj
, buffer
, count
, STATS_TRACK_GID
);
607 LUSTRE_RW_ATTR(stats_track_gid
);
609 static ssize_t
statahead_max_show(struct kobject
*kobj
,
610 struct attribute
*attr
,
613 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
616 return sprintf(buf
, "%u\n", sbi
->ll_sa_max
);
619 static ssize_t
statahead_max_store(struct kobject
*kobj
,
620 struct attribute
*attr
,
624 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
629 rc
= kstrtoul(buffer
, 10, &val
);
633 if (val
<= LL_SA_RPC_MAX
)
634 sbi
->ll_sa_max
= val
;
636 CERROR("Bad statahead_max value %lu. Valid values are in the range [0, %d]\n",
641 LUSTRE_RW_ATTR(statahead_max
);
643 static ssize_t
statahead_agl_show(struct kobject
*kobj
,
644 struct attribute
*attr
,
647 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
650 return sprintf(buf
, "%u\n", sbi
->ll_flags
& LL_SBI_AGL_ENABLED
? 1 : 0);
653 static ssize_t
statahead_agl_store(struct kobject
*kobj
,
654 struct attribute
*attr
,
658 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
663 rc
= kstrtoul(buffer
, 10, &val
);
668 sbi
->ll_flags
|= LL_SBI_AGL_ENABLED
;
670 sbi
->ll_flags
&= ~LL_SBI_AGL_ENABLED
;
674 LUSTRE_RW_ATTR(statahead_agl
);
676 static int ll_statahead_stats_seq_show(struct seq_file
*m
, void *v
)
678 struct super_block
*sb
= m
->private;
679 struct ll_sb_info
*sbi
= ll_s2sbi(sb
);
682 "statahead total: %u\n"
683 "statahead wrong: %u\n"
685 atomic_read(&sbi
->ll_sa_total
),
686 atomic_read(&sbi
->ll_sa_wrong
),
687 atomic_read(&sbi
->ll_agl_total
));
691 LPROC_SEQ_FOPS_RO(ll_statahead_stats
);
693 static ssize_t
lazystatfs_show(struct kobject
*kobj
,
694 struct attribute
*attr
,
697 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
700 return sprintf(buf
, "%u\n", sbi
->ll_flags
& LL_SBI_LAZYSTATFS
? 1 : 0);
703 static ssize_t
lazystatfs_store(struct kobject
*kobj
,
704 struct attribute
*attr
,
708 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
713 rc
= kstrtoul(buffer
, 10, &val
);
718 sbi
->ll_flags
|= LL_SBI_LAZYSTATFS
;
720 sbi
->ll_flags
&= ~LL_SBI_LAZYSTATFS
;
724 LUSTRE_RW_ATTR(lazystatfs
);
726 static ssize_t
max_easize_show(struct kobject
*kobj
,
727 struct attribute
*attr
,
730 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
735 rc
= ll_get_max_mdsize(sbi
, &ealen
);
739 return sprintf(buf
, "%u\n", ealen
);
741 LUSTRE_RO_ATTR(max_easize
);
743 static ssize_t
default_easize_show(struct kobject
*kobj
,
744 struct attribute
*attr
,
747 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
752 rc
= ll_get_default_mdsize(sbi
, &ealen
);
756 return sprintf(buf
, "%u\n", ealen
);
758 LUSTRE_RO_ATTR(default_easize
);
760 static int ll_sbi_flags_seq_show(struct seq_file
*m
, void *v
)
762 const char *str
[] = LL_SBI_FLAGS
;
763 struct super_block
*sb
= m
->private;
764 int flags
= ll_s2sbi(sb
)->ll_flags
;
768 if (ARRAY_SIZE(str
) <= i
) {
769 CERROR("%s: Revise array LL_SBI_FLAGS to match sbi flags please.\n",
770 ll_get_fsname(sb
, NULL
, 0));
775 seq_printf(m
, "%s ", str
[i
]);
779 seq_printf(m
, "\b\n");
783 LPROC_SEQ_FOPS_RO(ll_sbi_flags
);
785 static ssize_t
xattr_cache_show(struct kobject
*kobj
,
786 struct attribute
*attr
,
789 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
792 return sprintf(buf
, "%u\n", sbi
->ll_xattr_cache_enabled
);
795 static ssize_t
xattr_cache_store(struct kobject
*kobj
,
796 struct attribute
*attr
,
800 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
805 rc
= kstrtoul(buffer
, 10, &val
);
809 if (val
!= 0 && val
!= 1)
812 if (val
== 1 && !(sbi
->ll_flags
& LL_SBI_XATTR_CACHE
))
815 sbi
->ll_xattr_cache_enabled
= val
;
819 LUSTRE_RW_ATTR(xattr_cache
);
821 static struct lprocfs_vars lprocfs_llite_obd_vars
[] = {
822 /* { "mntpt_path", ll_rd_path, 0, 0 }, */
823 { "site", &ll_site_stats_fops
, NULL
, 0 },
824 /* { "filegroups", lprocfs_rd_filegroups, 0, 0 }, */
825 { "max_cached_mb", &ll_max_cached_mb_fops
, NULL
},
826 { "statahead_stats", &ll_statahead_stats_fops
, NULL
, 0 },
827 { "sbi_flags", &ll_sbi_flags_fops
, NULL
, 0 },
831 #define MAX_STRING_SIZE 128
833 static struct attribute
*llite_attrs
[] = {
834 &lustre_attr_blocksize
.attr
,
835 &lustre_attr_kbytestotal
.attr
,
836 &lustre_attr_kbytesfree
.attr
,
837 &lustre_attr_kbytesavail
.attr
,
838 &lustre_attr_filestotal
.attr
,
839 &lustre_attr_filesfree
.attr
,
840 &lustre_attr_client_type
.attr
,
841 &lustre_attr_fstype
.attr
,
842 &lustre_attr_uuid
.attr
,
843 &lustre_attr_max_read_ahead_mb
.attr
,
844 &lustre_attr_max_read_ahead_per_file_mb
.attr
,
845 &lustre_attr_max_read_ahead_whole_mb
.attr
,
846 &lustre_attr_checksum_pages
.attr
,
847 &lustre_attr_stats_track_pid
.attr
,
848 &lustre_attr_stats_track_ppid
.attr
,
849 &lustre_attr_stats_track_gid
.attr
,
850 &lustre_attr_statahead_max
.attr
,
851 &lustre_attr_statahead_agl
.attr
,
852 &lustre_attr_lazystatfs
.attr
,
853 &lustre_attr_max_easize
.attr
,
854 &lustre_attr_default_easize
.attr
,
855 &lustre_attr_xattr_cache
.attr
,
859 static void llite_sb_release(struct kobject
*kobj
)
861 struct ll_sb_info
*sbi
= container_of(kobj
, struct ll_sb_info
,
863 complete(&sbi
->ll_kobj_unregister
);
866 static struct kobj_type llite_ktype
= {
867 .default_attrs
= llite_attrs
,
868 .sysfs_ops
= &lustre_sysfs_ops
,
869 .release
= llite_sb_release
,
872 static const struct llite_file_opcode
{
876 } llite_opcode_table
[LPROC_LL_FILE_OPCODES
] = {
878 { LPROC_LL_DIRTY_HITS
, LPROCFS_TYPE_REGS
, "dirty_pages_hits" },
879 { LPROC_LL_DIRTY_MISSES
, LPROCFS_TYPE_REGS
, "dirty_pages_misses" },
880 { LPROC_LL_READ_BYTES
, LPROCFS_CNTR_AVGMINMAX
|LPROCFS_TYPE_BYTES
,
882 { LPROC_LL_WRITE_BYTES
, LPROCFS_CNTR_AVGMINMAX
|LPROCFS_TYPE_BYTES
,
884 { LPROC_LL_BRW_READ
, LPROCFS_CNTR_AVGMINMAX
|LPROCFS_TYPE_PAGES
,
886 { LPROC_LL_BRW_WRITE
, LPROCFS_CNTR_AVGMINMAX
|LPROCFS_TYPE_PAGES
,
888 { LPROC_LL_OSC_READ
, LPROCFS_CNTR_AVGMINMAX
|LPROCFS_TYPE_BYTES
,
890 { LPROC_LL_OSC_WRITE
, LPROCFS_CNTR_AVGMINMAX
|LPROCFS_TYPE_BYTES
,
892 { LPROC_LL_IOCTL
, LPROCFS_TYPE_REGS
, "ioctl" },
893 { LPROC_LL_OPEN
, LPROCFS_TYPE_REGS
, "open" },
894 { LPROC_LL_RELEASE
, LPROCFS_TYPE_REGS
, "close" },
895 { LPROC_LL_MAP
, LPROCFS_TYPE_REGS
, "mmap" },
896 { LPROC_LL_LLSEEK
, LPROCFS_TYPE_REGS
, "seek" },
897 { LPROC_LL_FSYNC
, LPROCFS_TYPE_REGS
, "fsync" },
898 { LPROC_LL_READDIR
, LPROCFS_TYPE_REGS
, "readdir" },
899 /* inode operation */
900 { LPROC_LL_SETATTR
, LPROCFS_TYPE_REGS
, "setattr" },
901 { LPROC_LL_TRUNC
, LPROCFS_TYPE_REGS
, "truncate" },
902 { LPROC_LL_FLOCK
, LPROCFS_TYPE_REGS
, "flock" },
903 { LPROC_LL_GETATTR
, LPROCFS_TYPE_REGS
, "getattr" },
904 /* dir inode operation */
905 { LPROC_LL_CREATE
, LPROCFS_TYPE_REGS
, "create" },
906 { LPROC_LL_LINK
, LPROCFS_TYPE_REGS
, "link" },
907 { LPROC_LL_UNLINK
, LPROCFS_TYPE_REGS
, "unlink" },
908 { LPROC_LL_SYMLINK
, LPROCFS_TYPE_REGS
, "symlink" },
909 { LPROC_LL_MKDIR
, LPROCFS_TYPE_REGS
, "mkdir" },
910 { LPROC_LL_RMDIR
, LPROCFS_TYPE_REGS
, "rmdir" },
911 { LPROC_LL_MKNOD
, LPROCFS_TYPE_REGS
, "mknod" },
912 { LPROC_LL_RENAME
, LPROCFS_TYPE_REGS
, "rename" },
913 /* special inode operation */
914 { LPROC_LL_STAFS
, LPROCFS_TYPE_REGS
, "statfs" },
915 { LPROC_LL_ALLOC_INODE
, LPROCFS_TYPE_REGS
, "alloc_inode" },
916 { LPROC_LL_SETXATTR
, LPROCFS_TYPE_REGS
, "setxattr" },
917 { LPROC_LL_GETXATTR
, LPROCFS_TYPE_REGS
, "getxattr" },
918 { LPROC_LL_GETXATTR_HITS
, LPROCFS_TYPE_REGS
, "getxattr_hits" },
919 { LPROC_LL_LISTXATTR
, LPROCFS_TYPE_REGS
, "listxattr" },
920 { LPROC_LL_REMOVEXATTR
, LPROCFS_TYPE_REGS
, "removexattr" },
921 { LPROC_LL_INODE_PERM
, LPROCFS_TYPE_REGS
, "inode_permission" },
924 void ll_stats_ops_tally(struct ll_sb_info
*sbi
, int op
, int count
)
928 if (sbi
->ll_stats_track_type
== STATS_TRACK_ALL
)
929 lprocfs_counter_add(sbi
->ll_stats
, op
, count
);
930 else if (sbi
->ll_stats_track_type
== STATS_TRACK_PID
&&
931 sbi
->ll_stats_track_id
== current
->pid
)
932 lprocfs_counter_add(sbi
->ll_stats
, op
, count
);
933 else if (sbi
->ll_stats_track_type
== STATS_TRACK_PPID
&&
934 sbi
->ll_stats_track_id
== current
->real_parent
->pid
)
935 lprocfs_counter_add(sbi
->ll_stats
, op
, count
);
936 else if (sbi
->ll_stats_track_type
== STATS_TRACK_GID
&&
937 sbi
->ll_stats_track_id
==
938 from_kgid(&init_user_ns
, current_gid()))
939 lprocfs_counter_add(sbi
->ll_stats
, op
, count
);
941 EXPORT_SYMBOL(ll_stats_ops_tally
);
943 static const char *ra_stat_string
[] = {
944 [RA_STAT_HIT
] = "hits",
945 [RA_STAT_MISS
] = "misses",
946 [RA_STAT_DISTANT_READPAGE
] = "readpage not consecutive",
947 [RA_STAT_MISS_IN_WINDOW
] = "miss inside window",
948 [RA_STAT_FAILED_GRAB_PAGE
] = "failed grab_cache_page",
949 [RA_STAT_FAILED_MATCH
] = "failed lock match",
950 [RA_STAT_DISCARDED
] = "read but discarded",
951 [RA_STAT_ZERO_LEN
] = "zero length file",
952 [RA_STAT_ZERO_WINDOW
] = "zero size window",
953 [RA_STAT_EOF
] = "read-ahead to EOF",
954 [RA_STAT_MAX_IN_FLIGHT
] = "hit max r-a issue",
955 [RA_STAT_WRONG_GRAB_PAGE
] = "wrong page from grab_cache_page",
958 int ldebugfs_register_mountpoint(struct dentry
*parent
,
959 struct super_block
*sb
, char *osc
, char *mdc
)
961 struct lustre_sb_info
*lsi
= s2lsi(sb
);
962 struct ll_sb_info
*sbi
= ll_s2sbi(sb
);
963 struct obd_device
*obd
;
965 char name
[MAX_STRING_SIZE
+ 1], *ptr
;
966 int err
, id
, len
, rc
;
968 name
[MAX_STRING_SIZE
] = '\0';
975 len
= strlen(lsi
->lsi_lmd
->lmd_profile
);
976 ptr
= strrchr(lsi
->lsi_lmd
->lmd_profile
, '-');
977 if (ptr
&& (strcmp(ptr
, "-client") == 0))
981 snprintf(name
, MAX_STRING_SIZE
, "%.*s-%p", len
,
982 lsi
->lsi_lmd
->lmd_profile
, sb
);
984 dir
= ldebugfs_register(name
, parent
, NULL
, NULL
);
985 if (IS_ERR_OR_NULL(dir
)) {
986 err
= dir
? PTR_ERR(dir
) : -ENOMEM
;
987 sbi
->ll_debugfs_entry
= NULL
;
990 sbi
->ll_debugfs_entry
= dir
;
992 rc
= ldebugfs_seq_create(sbi
->ll_debugfs_entry
, "dump_page_cache", 0444,
993 &vvp_dump_pgcache_file_ops
, sbi
);
995 CWARN("Error adding the dump_page_cache file\n");
997 rc
= ldebugfs_seq_create(sbi
->ll_debugfs_entry
, "extents_stats", 0644,
998 &ll_rw_extents_stats_fops
, sbi
);
1000 CWARN("Error adding the extent_stats file\n");
1002 rc
= ldebugfs_seq_create(sbi
->ll_debugfs_entry
,
1003 "extents_stats_per_process",
1004 0644, &ll_rw_extents_stats_pp_fops
, sbi
);
1006 CWARN("Error adding the extents_stats_per_process file\n");
1008 rc
= ldebugfs_seq_create(sbi
->ll_debugfs_entry
, "offset_stats", 0644,
1009 &ll_rw_offset_stats_fops
, sbi
);
1011 CWARN("Error adding the offset_stats file\n");
1013 /* File operations stats */
1014 sbi
->ll_stats
= lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES
,
1015 LPROCFS_STATS_FLAG_NONE
);
1016 if (!sbi
->ll_stats
) {
1020 /* do counter init */
1021 for (id
= 0; id
< LPROC_LL_FILE_OPCODES
; id
++) {
1022 __u32 type
= llite_opcode_table
[id
].type
;
1025 if (type
& LPROCFS_TYPE_REGS
)
1027 else if (type
& LPROCFS_TYPE_BYTES
)
1029 else if (type
& LPROCFS_TYPE_PAGES
)
1031 lprocfs_counter_init(sbi
->ll_stats
,
1032 llite_opcode_table
[id
].opcode
,
1033 (type
& LPROCFS_CNTR_AVGMINMAX
),
1034 llite_opcode_table
[id
].opname
, ptr
);
1036 err
= ldebugfs_register_stats(sbi
->ll_debugfs_entry
, "stats",
1041 sbi
->ll_ra_stats
= lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string
),
1042 LPROCFS_STATS_FLAG_NONE
);
1043 if (!sbi
->ll_ra_stats
) {
1048 for (id
= 0; id
< ARRAY_SIZE(ra_stat_string
); id
++)
1049 lprocfs_counter_init(sbi
->ll_ra_stats
, id
, 0,
1050 ra_stat_string
[id
], "pages");
1052 err
= ldebugfs_register_stats(sbi
->ll_debugfs_entry
, "read_ahead_stats",
1057 err
= ldebugfs_add_vars(sbi
->ll_debugfs_entry
,
1058 lprocfs_llite_obd_vars
, sb
);
1062 sbi
->ll_kobj
.kset
= llite_kset
;
1063 init_completion(&sbi
->ll_kobj_unregister
);
1064 err
= kobject_init_and_add(&sbi
->ll_kobj
, &llite_ktype
, NULL
,
1070 obd
= class_name2obd(mdc
);
1072 err
= sysfs_create_link(&sbi
->ll_kobj
, &obd
->obd_kobj
,
1073 obd
->obd_type
->typ_name
);
1078 obd
= class_name2obd(osc
);
1080 err
= sysfs_create_link(&sbi
->ll_kobj
, &obd
->obd_kobj
,
1081 obd
->obd_type
->typ_name
);
1084 ldebugfs_remove(&sbi
->ll_debugfs_entry
);
1085 lprocfs_free_stats(&sbi
->ll_ra_stats
);
1086 lprocfs_free_stats(&sbi
->ll_stats
);
1091 void ldebugfs_unregister_mountpoint(struct ll_sb_info
*sbi
)
1093 if (sbi
->ll_debugfs_entry
) {
1094 ldebugfs_remove(&sbi
->ll_debugfs_entry
);
1095 kobject_put(&sbi
->ll_kobj
);
1096 wait_for_completion(&sbi
->ll_kobj_unregister
);
1097 lprocfs_free_stats(&sbi
->ll_ra_stats
);
1098 lprocfs_free_stats(&sbi
->ll_stats
);
1102 #undef MAX_STRING_SIZE
1104 #define pct(a, b) (b ? a * 100 / b : 0)
1106 static void ll_display_extents_info(struct ll_rw_extents_info
*io_extents
,
1107 struct seq_file
*seq
, int which
)
1109 unsigned long read_tot
= 0, write_tot
= 0, read_cum
, write_cum
;
1110 unsigned long start
, end
, r
, w
;
1111 char *unitp
= "KMGTPEZY";
1113 struct per_process_info
*pp_info
= &io_extents
->pp_extents
[which
];
1119 for (i
= 0; i
< LL_HIST_MAX
; i
++) {
1120 read_tot
+= pp_info
->pp_r_hist
.oh_buckets
[i
];
1121 write_tot
+= pp_info
->pp_w_hist
.oh_buckets
[i
];
1124 for (i
= 0; i
< LL_HIST_MAX
; i
++) {
1125 r
= pp_info
->pp_r_hist
.oh_buckets
[i
];
1126 w
= pp_info
->pp_w_hist
.oh_buckets
[i
];
1129 end
= 1 << (i
+ LL_HIST_START
- units
);
1130 seq_printf(seq
, "%4lu%c - %4lu%c%c: %14lu %4lu %4lu | %14lu %4lu %4lu\n",
1131 start
, *unitp
, end
, *unitp
,
1132 (i
== LL_HIST_MAX
- 1) ? '+' : ' ',
1133 r
, pct(r
, read_tot
), pct(read_cum
, read_tot
),
1134 w
, pct(w
, write_tot
), pct(write_cum
, write_tot
));
1136 if (start
== 1<<10) {
1141 if (read_cum
== read_tot
&& write_cum
== write_tot
)
1146 static int ll_rw_extents_stats_pp_seq_show(struct seq_file
*seq
, void *v
)
1148 struct timespec64 now
;
1149 struct ll_sb_info
*sbi
= seq
->private;
1150 struct ll_rw_extents_info
*io_extents
= &sbi
->ll_rw_extents_info
;
1153 ktime_get_real_ts64(&now
);
1155 if (!sbi
->ll_rw_stats_on
) {
1156 seq_printf(seq
, "disabled\n"
1157 "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
1160 seq_printf(seq
, "snapshot_time: %llu.%09lu (secs.usecs)\n",
1161 (s64
)now
.tv_sec
, (unsigned long)now
.tv_nsec
);
1162 seq_printf(seq
, "%15s %19s | %20s\n", " ", "read", "write");
1163 seq_printf(seq
, "%13s %14s %4s %4s | %14s %4s %4s\n",
1164 "extents", "calls", "%", "cum%",
1165 "calls", "%", "cum%");
1166 spin_lock(&sbi
->ll_pp_extent_lock
);
1167 for (k
= 0; k
< LL_PROCESS_HIST_MAX
; k
++) {
1168 if (io_extents
->pp_extents
[k
].pid
!= 0) {
1169 seq_printf(seq
, "\nPID: %d\n",
1170 io_extents
->pp_extents
[k
].pid
);
1171 ll_display_extents_info(io_extents
, seq
, k
);
1174 spin_unlock(&sbi
->ll_pp_extent_lock
);
1178 static ssize_t
ll_rw_extents_stats_pp_seq_write(struct file
*file
,
1179 const char __user
*buf
,
1183 struct seq_file
*seq
= file
->private_data
;
1184 struct ll_sb_info
*sbi
= seq
->private;
1185 struct ll_rw_extents_info
*io_extents
= &sbi
->ll_rw_extents_info
;
1187 int value
= 1, rc
= 0;
1192 rc
= lprocfs_write_helper(buf
, len
, &value
);
1193 if (rc
< 0 && len
< 16) {
1196 if (copy_from_user(kernbuf
, buf
, len
))
1200 if (kernbuf
[len
- 1] == '\n')
1201 kernbuf
[len
- 1] = 0;
1203 if (strcmp(kernbuf
, "disabled") == 0 ||
1204 strcmp(kernbuf
, "Disabled") == 0)
1209 sbi
->ll_rw_stats_on
= 0;
1211 sbi
->ll_rw_stats_on
= 1;
1213 spin_lock(&sbi
->ll_pp_extent_lock
);
1214 for (i
= 0; i
< LL_PROCESS_HIST_MAX
; i
++) {
1215 io_extents
->pp_extents
[i
].pid
= 0;
1216 lprocfs_oh_clear(&io_extents
->pp_extents
[i
].pp_r_hist
);
1217 lprocfs_oh_clear(&io_extents
->pp_extents
[i
].pp_w_hist
);
1219 spin_unlock(&sbi
->ll_pp_extent_lock
);
1223 LPROC_SEQ_FOPS(ll_rw_extents_stats_pp
);
1225 static int ll_rw_extents_stats_seq_show(struct seq_file
*seq
, void *v
)
1227 struct timespec64 now
;
1228 struct ll_sb_info
*sbi
= seq
->private;
1229 struct ll_rw_extents_info
*io_extents
= &sbi
->ll_rw_extents_info
;
1231 ktime_get_real_ts64(&now
);
1233 if (!sbi
->ll_rw_stats_on
) {
1234 seq_printf(seq
, "disabled\n"
1235 "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
1238 seq_printf(seq
, "snapshot_time: %llu.%09lu (secs.usecs)\n",
1239 (u64
)now
.tv_sec
, (unsigned long)now
.tv_nsec
);
1241 seq_printf(seq
, "%15s %19s | %20s\n", " ", "read", "write");
1242 seq_printf(seq
, "%13s %14s %4s %4s | %14s %4s %4s\n",
1243 "extents", "calls", "%", "cum%",
1244 "calls", "%", "cum%");
1245 spin_lock(&sbi
->ll_lock
);
1246 ll_display_extents_info(io_extents
, seq
, LL_PROCESS_HIST_MAX
);
1247 spin_unlock(&sbi
->ll_lock
);
1252 static ssize_t
ll_rw_extents_stats_seq_write(struct file
*file
,
1253 const char __user
*buf
,
1254 size_t len
, loff_t
*off
)
1256 struct seq_file
*seq
= file
->private_data
;
1257 struct ll_sb_info
*sbi
= seq
->private;
1258 struct ll_rw_extents_info
*io_extents
= &sbi
->ll_rw_extents_info
;
1260 int value
= 1, rc
= 0;
1265 rc
= lprocfs_write_helper(buf
, len
, &value
);
1266 if (rc
< 0 && len
< 16) {
1269 if (copy_from_user(kernbuf
, buf
, len
))
1273 if (kernbuf
[len
- 1] == '\n')
1274 kernbuf
[len
- 1] = 0;
1276 if (strcmp(kernbuf
, "disabled") == 0 ||
1277 strcmp(kernbuf
, "Disabled") == 0)
1282 sbi
->ll_rw_stats_on
= 0;
1284 sbi
->ll_rw_stats_on
= 1;
1286 spin_lock(&sbi
->ll_pp_extent_lock
);
1287 for (i
= 0; i
<= LL_PROCESS_HIST_MAX
; i
++) {
1288 io_extents
->pp_extents
[i
].pid
= 0;
1289 lprocfs_oh_clear(&io_extents
->pp_extents
[i
].pp_r_hist
);
1290 lprocfs_oh_clear(&io_extents
->pp_extents
[i
].pp_w_hist
);
1292 spin_unlock(&sbi
->ll_pp_extent_lock
);
1297 LPROC_SEQ_FOPS(ll_rw_extents_stats
);
1299 void ll_rw_stats_tally(struct ll_sb_info
*sbi
, pid_t pid
,
1300 struct ll_file_data
*file
, loff_t pos
,
1301 size_t count
, int rw
)
1304 struct ll_rw_process_info
*process
;
1305 struct ll_rw_process_info
*offset
;
1306 int *off_count
= &sbi
->ll_rw_offset_entry_count
;
1307 int *process_count
= &sbi
->ll_offset_process_count
;
1308 struct ll_rw_extents_info
*io_extents
= &sbi
->ll_rw_extents_info
;
1310 if (!sbi
->ll_rw_stats_on
)
1312 process
= sbi
->ll_rw_process_info
;
1313 offset
= sbi
->ll_rw_offset_info
;
1315 spin_lock(&sbi
->ll_pp_extent_lock
);
1316 /* Extent statistics */
1317 for (i
= 0; i
< LL_PROCESS_HIST_MAX
; i
++) {
1318 if (io_extents
->pp_extents
[i
].pid
== pid
) {
1326 sbi
->ll_extent_process_count
=
1327 (sbi
->ll_extent_process_count
+ 1) % LL_PROCESS_HIST_MAX
;
1328 cur
= sbi
->ll_extent_process_count
;
1329 io_extents
->pp_extents
[cur
].pid
= pid
;
1330 lprocfs_oh_clear(&io_extents
->pp_extents
[cur
].pp_r_hist
);
1331 lprocfs_oh_clear(&io_extents
->pp_extents
[cur
].pp_w_hist
);
1334 for (i
= 0; (count
>= (1 << LL_HIST_START
<< i
)) &&
1335 (i
< (LL_HIST_MAX
- 1)); i
++)
1338 io_extents
->pp_extents
[cur
].pp_r_hist
.oh_buckets
[i
]++;
1339 io_extents
->pp_extents
[LL_PROCESS_HIST_MAX
].pp_r_hist
.oh_buckets
[i
]++;
1341 io_extents
->pp_extents
[cur
].pp_w_hist
.oh_buckets
[i
]++;
1342 io_extents
->pp_extents
[LL_PROCESS_HIST_MAX
].pp_w_hist
.oh_buckets
[i
]++;
1344 spin_unlock(&sbi
->ll_pp_extent_lock
);
1346 spin_lock(&sbi
->ll_process_lock
);
1347 /* Offset statistics */
1348 for (i
= 0; i
< LL_PROCESS_HIST_MAX
; i
++) {
1349 if (process
[i
].rw_pid
== pid
) {
1350 if (process
[i
].rw_last_file
!= file
) {
1351 process
[i
].rw_range_start
= pos
;
1352 process
[i
].rw_last_file_pos
= pos
+ count
;
1353 process
[i
].rw_smallest_extent
= count
;
1354 process
[i
].rw_largest_extent
= count
;
1355 process
[i
].rw_offset
= 0;
1356 process
[i
].rw_last_file
= file
;
1357 spin_unlock(&sbi
->ll_process_lock
);
1360 if (process
[i
].rw_last_file_pos
!= pos
) {
1362 (*off_count
+ 1) % LL_OFFSET_HIST_MAX
;
1363 offset
[*off_count
].rw_op
= process
[i
].rw_op
;
1364 offset
[*off_count
].rw_pid
= pid
;
1365 offset
[*off_count
].rw_range_start
=
1366 process
[i
].rw_range_start
;
1367 offset
[*off_count
].rw_range_end
=
1368 process
[i
].rw_last_file_pos
;
1369 offset
[*off_count
].rw_smallest_extent
=
1370 process
[i
].rw_smallest_extent
;
1371 offset
[*off_count
].rw_largest_extent
=
1372 process
[i
].rw_largest_extent
;
1373 offset
[*off_count
].rw_offset
=
1374 process
[i
].rw_offset
;
1375 process
[i
].rw_op
= rw
;
1376 process
[i
].rw_range_start
= pos
;
1377 process
[i
].rw_smallest_extent
= count
;
1378 process
[i
].rw_largest_extent
= count
;
1379 process
[i
].rw_offset
= pos
-
1380 process
[i
].rw_last_file_pos
;
1382 if (process
[i
].rw_smallest_extent
> count
)
1383 process
[i
].rw_smallest_extent
= count
;
1384 if (process
[i
].rw_largest_extent
< count
)
1385 process
[i
].rw_largest_extent
= count
;
1386 process
[i
].rw_last_file_pos
= pos
+ count
;
1387 spin_unlock(&sbi
->ll_process_lock
);
1391 *process_count
= (*process_count
+ 1) % LL_PROCESS_HIST_MAX
;
1392 process
[*process_count
].rw_pid
= pid
;
1393 process
[*process_count
].rw_op
= rw
;
1394 process
[*process_count
].rw_range_start
= pos
;
1395 process
[*process_count
].rw_last_file_pos
= pos
+ count
;
1396 process
[*process_count
].rw_smallest_extent
= count
;
1397 process
[*process_count
].rw_largest_extent
= count
;
1398 process
[*process_count
].rw_offset
= 0;
1399 process
[*process_count
].rw_last_file
= file
;
1400 spin_unlock(&sbi
->ll_process_lock
);
1403 static int ll_rw_offset_stats_seq_show(struct seq_file
*seq
, void *v
)
1405 struct timespec64 now
;
1406 struct ll_sb_info
*sbi
= seq
->private;
1407 struct ll_rw_process_info
*offset
= sbi
->ll_rw_offset_info
;
1408 struct ll_rw_process_info
*process
= sbi
->ll_rw_process_info
;
1411 ktime_get_real_ts64(&now
);
1413 if (!sbi
->ll_rw_stats_on
) {
1414 seq_printf(seq
, "disabled\n"
1415 "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
1418 spin_lock(&sbi
->ll_process_lock
);
1420 seq_printf(seq
, "snapshot_time: %llu.%09lu (secs.usecs)\n",
1421 (s64
)now
.tv_sec
, (unsigned long)now
.tv_nsec
);
1422 seq_printf(seq
, "%3s %10s %14s %14s %17s %17s %14s\n",
1423 "R/W", "PID", "RANGE START", "RANGE END",
1424 "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
1425 /* We stored the discontiguous offsets here; print them first */
1426 for (i
= 0; i
< LL_OFFSET_HIST_MAX
; i
++) {
1427 if (offset
[i
].rw_pid
!= 0)
1429 "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
1430 offset
[i
].rw_op
== READ
? 'R' : 'W',
1432 offset
[i
].rw_range_start
,
1433 offset
[i
].rw_range_end
,
1434 (unsigned long)offset
[i
].rw_smallest_extent
,
1435 (unsigned long)offset
[i
].rw_largest_extent
,
1436 offset
[i
].rw_offset
);
1438 /* Then print the current offsets for each process */
1439 for (i
= 0; i
< LL_PROCESS_HIST_MAX
; i
++) {
1440 if (process
[i
].rw_pid
!= 0)
1442 "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
1443 process
[i
].rw_op
== READ
? 'R' : 'W',
1445 process
[i
].rw_range_start
,
1446 process
[i
].rw_last_file_pos
,
1447 (unsigned long)process
[i
].rw_smallest_extent
,
1448 (unsigned long)process
[i
].rw_largest_extent
,
1449 process
[i
].rw_offset
);
1451 spin_unlock(&sbi
->ll_process_lock
);
1456 static ssize_t
ll_rw_offset_stats_seq_write(struct file
*file
,
1457 const char __user
*buf
,
1458 size_t len
, loff_t
*off
)
1460 struct seq_file
*seq
= file
->private_data
;
1461 struct ll_sb_info
*sbi
= seq
->private;
1462 struct ll_rw_process_info
*process_info
= sbi
->ll_rw_process_info
;
1463 struct ll_rw_process_info
*offset_info
= sbi
->ll_rw_offset_info
;
1464 int value
= 1, rc
= 0;
1469 rc
= lprocfs_write_helper(buf
, len
, &value
);
1471 if (rc
< 0 && len
< 16) {
1474 if (copy_from_user(kernbuf
, buf
, len
))
1478 if (kernbuf
[len
- 1] == '\n')
1479 kernbuf
[len
- 1] = 0;
1481 if (strcmp(kernbuf
, "disabled") == 0 ||
1482 strcmp(kernbuf
, "Disabled") == 0)
1487 sbi
->ll_rw_stats_on
= 0;
1489 sbi
->ll_rw_stats_on
= 1;
1491 spin_lock(&sbi
->ll_process_lock
);
1492 sbi
->ll_offset_process_count
= 0;
1493 sbi
->ll_rw_offset_entry_count
= 0;
1494 memset(process_info
, 0, sizeof(struct ll_rw_process_info
) *
1495 LL_PROCESS_HIST_MAX
);
1496 memset(offset_info
, 0, sizeof(struct ll_rw_process_info
) *
1497 LL_OFFSET_HIST_MAX
);
1498 spin_unlock(&sbi
->ll_process_lock
);
1503 LPROC_SEQ_FOPS(ll_rw_offset_stats
);
1505 void lprocfs_llite_init_vars(struct lprocfs_static_vars
*lvars
)
1507 lvars
->obd_vars
= lprocfs_llite_obd_vars
;