]> git.ipfire.org Git - people/ms/linux.git/blame - kernel/latencytop.c
Merge tag 'dt-bindings-aspeed-5.20' of git://git.kernel.org/pub/scm/linux/kernel...
[people/ms/linux.git] / kernel / latencytop.c
CommitLineData
b886d83c 1// SPDX-License-Identifier: GPL-2.0-only
9745512c
AV
2/*
3 * latencytop.c: Latency display infrastructure
4 *
5 * (C) Copyright 2008 Intel Corporation
6 * Author: Arjan van de Ven <arjan@linux.intel.com>
9745512c 7 */
ad0b0fd5
AV
8
9/*
10 * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
11 * used by the "latencytop" userspace tool. The latency that is tracked is not
12 * the 'traditional' interrupt latency (which is primarily caused by something
13 * else consuming CPU), but instead, it is the latency an application encounters
14 * because the kernel sleeps on its behalf for various reasons.
15 *
16 * This code tracks 2 levels of statistics:
17 * 1) System level latency
18 * 2) Per process latency
19 *
20 * The latency is stored in fixed sized data structures in an accumulated form;
21 * if the "same" latency cause is hit twice, this will be tracked as one entry
22 * in the data structure. Both the count, total accumulated latency and maximum
23 * latency are tracked in this data structure. When the fixed size structure is
24 * full, no new causes are tracked until the buffer is flushed by writing to
25 * the /proc file; the userspace tool does this on a regular basis.
26 *
27 * A latency cause is identified by a stringified backtrace at the point that
28 * the scheduler gets invoked. The userland tool will use this string to
29 * identify the cause of the latency in human readable form.
30 *
31 * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
32 * These files look like this:
33 *
34 * Latency Top version : v0.1
35 * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
36 * | | | |
37 * | | | +----> the stringified backtrace
38 * | | +---------> The maximum latency for this entry in microseconds
39 * | +--------------> The accumulated latency for this entry (microseconds)
40 * +-------------------> The number of times this entry is hit
41 *
42 * (note: the average latency is the accumulated latency divided by the number
43 * of times)
44 */
45
9745512c
AV
46#include <linux/kallsyms.h>
47#include <linux/seq_file.h>
48#include <linux/notifier.h>
49#include <linux/spinlock.h>
50#include <linux/proc_fs.h>
cb251765 51#include <linux/latencytop.h>
9984de1a 52#include <linux/export.h>
9745512c 53#include <linux/sched.h>
b17b0153 54#include <linux/sched/debug.h>
3905f9ad 55#include <linux/sched/stat.h>
9745512c 56#include <linux/list.h>
9745512c 57#include <linux/stacktrace.h>
988f11e0 58#include <linux/sysctl.h>
9745512c 59
757455d4 60static DEFINE_RAW_SPINLOCK(latency_lock);
9745512c
AV
61
62#define MAXLR 128
63static struct latency_record latency_record[MAXLR];
64
65int latencytop_enabled;
66
988f11e0 67#ifdef CONFIG_SYSCTL
68static int sysctl_latencytop(struct ctl_table *table, int write, void *buffer,
69 size_t *lenp, loff_t *ppos)
70{
71 int err;
72
73 err = proc_dointvec(table, write, buffer, lenp, ppos);
74 if (latencytop_enabled)
75 force_schedstat_enabled();
76
77 return err;
78}
79
80static struct ctl_table latencytop_sysctl[] = {
81 {
82 .procname = "latencytop",
83 .data = &latencytop_enabled,
84 .maxlen = sizeof(int),
85 .mode = 0644,
86 .proc_handler = sysctl_latencytop,
87 },
88 {}
89};
90#endif
91
e02c9b0d 92void clear_tsk_latency_tracing(struct task_struct *p)
9745512c
AV
93{
94 unsigned long flags;
95
757455d4 96 raw_spin_lock_irqsave(&latency_lock, flags);
9745512c
AV
97 memset(&p->latency_record, 0, sizeof(p->latency_record));
98 p->latency_record_count = 0;
757455d4 99 raw_spin_unlock_irqrestore(&latency_lock, flags);
9745512c
AV
100}
101
102static void clear_global_latency_tracing(void)
103{
104 unsigned long flags;
105
757455d4 106 raw_spin_lock_irqsave(&latency_lock, flags);
9745512c 107 memset(&latency_record, 0, sizeof(latency_record));
757455d4 108 raw_spin_unlock_irqrestore(&latency_lock, flags);
9745512c
AV
109}
110
111static void __sched
eaa1809b
FF
112account_global_scheduler_latency(struct task_struct *tsk,
113 struct latency_record *lat)
9745512c
AV
114{
115 int firstnonnull = MAXLR + 1;
116 int i;
117
9745512c
AV
118 /* skip kernel threads for now */
119 if (!tsk->mm)
120 return;
121
122 for (i = 0; i < MAXLR; i++) {
19fb518c
DA
123 int q, same = 1;
124
9745512c
AV
125 /* Nothing stored: */
126 if (!latency_record[i].backtrace[0]) {
127 if (firstnonnull > i)
128 firstnonnull = i;
129 continue;
130 }
ad0b0fd5 131 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
19fb518c
DA
132 unsigned long record = lat->backtrace[q];
133
134 if (latency_record[i].backtrace[q] != record) {
9745512c 135 same = 0;
9745512c 136 break;
19fb518c
DA
137 }
138
accddc41
TG
139 /* 0 entry marks end of backtrace: */
140 if (!record)
9745512c
AV
141 break;
142 }
143 if (same) {
144 latency_record[i].count++;
145 latency_record[i].time += lat->time;
146 if (lat->time > latency_record[i].max)
147 latency_record[i].max = lat->time;
148 return;
149 }
150 }
151
152 i = firstnonnull;
153 if (i >= MAXLR - 1)
154 return;
155
156 /* Allocted a new one: */
157 memcpy(&latency_record[i], lat, sizeof(struct latency_record));
158}
159
ad0b0fd5 160/**
25985edc 161 * __account_scheduler_latency - record an occurred latency
ad0b0fd5
AV
162 * @tsk - the task struct of the task hitting the latency
163 * @usecs - the duration of the latency in microseconds
164 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
165 *
166 * This function is the main entry point for recording latency entries
167 * as called by the scheduler.
168 *
169 * This function has a few special cases to deal with normal 'non-latency'
170 * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
171 * since this usually is caused by waiting for events via select() and co.
172 *
173 * Negative latencies (caused by time going backwards) are also explicitly
174 * skipped.
175 */
9745512c 176void __sched
ad0b0fd5 177__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
9745512c
AV
178{
179 unsigned long flags;
180 int i, q;
181 struct latency_record lat;
182
9745512c
AV
183 /* Long interruptible waits are generally user requested... */
184 if (inter && usecs > 5000)
185 return;
186
ad0b0fd5
AV
187 /* Negative sleeps are time going backwards */
188 /* Zero-time sleeps are non-interesting */
189 if (usecs <= 0)
190 return;
191
9745512c
AV
192 memset(&lat, 0, sizeof(lat));
193 lat.count = 1;
194 lat.time = usecs;
195 lat.max = usecs;
f9387721
TG
196
197 stack_trace_save_tsk(tsk, lat.backtrace, LT_BACKTRACEDEPTH, 0);
9745512c 198
757455d4 199 raw_spin_lock_irqsave(&latency_lock, flags);
9745512c
AV
200
201 account_global_scheduler_latency(tsk, &lat);
202
38715258 203 for (i = 0; i < tsk->latency_record_count; i++) {
9745512c
AV
204 struct latency_record *mylat;
205 int same = 1;
19fb518c 206
9745512c 207 mylat = &tsk->latency_record[i];
ad0b0fd5 208 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
19fb518c
DA
209 unsigned long record = lat.backtrace[q];
210
211 if (mylat->backtrace[q] != record) {
9745512c 212 same = 0;
9745512c 213 break;
19fb518c
DA
214 }
215
accddc41
TG
216 /* 0 entry is end of backtrace */
217 if (!record)
9745512c
AV
218 break;
219 }
220 if (same) {
221 mylat->count++;
222 mylat->time += lat.time;
223 if (lat.time > mylat->max)
224 mylat->max = lat.time;
225 goto out_unlock;
226 }
227 }
228
38715258
KC
229 /*
230 * short term hack; if we're > 32 we stop; future we recycle:
231 */
232 if (tsk->latency_record_count >= LT_SAVECOUNT)
233 goto out_unlock;
234
9745512c 235 /* Allocated a new one: */
38715258 236 i = tsk->latency_record_count++;
9745512c
AV
237 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
238
239out_unlock:
757455d4 240 raw_spin_unlock_irqrestore(&latency_lock, flags);
9745512c
AV
241}
242
243static int lstats_show(struct seq_file *m, void *v)
244{
245 int i;
246
247 seq_puts(m, "Latency Top version : v0.1\n");
248
249 for (i = 0; i < MAXLR; i++) {
34e49d4f
JP
250 struct latency_record *lr = &latency_record[i];
251
252 if (lr->backtrace[0]) {
9745512c 253 int q;
34e49d4f
JP
254 seq_printf(m, "%i %lu %lu",
255 lr->count, lr->time, lr->max);
9745512c 256 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
34e49d4f 257 unsigned long bt = lr->backtrace[q];
accddc41 258
34e49d4f 259 if (!bt)
9745512c 260 break;
accddc41 261
34e49d4f 262 seq_printf(m, " %ps", (void *)bt);
9745512c 263 }
eaa1809b 264 seq_puts(m, "\n");
9745512c
AV
265 }
266 }
267 return 0;
268}
269
270static ssize_t
271lstats_write(struct file *file, const char __user *buf, size_t count,
272 loff_t *offs)
273{
274 clear_global_latency_tracing();
275
276 return count;
277}
278
279static int lstats_open(struct inode *inode, struct file *filp)
280{
281 return single_open(filp, lstats_show, NULL);
282}
283
97a32539
AD
284static const struct proc_ops lstats_proc_ops = {
285 .proc_open = lstats_open,
286 .proc_read = seq_read,
287 .proc_write = lstats_write,
288 .proc_lseek = seq_lseek,
289 .proc_release = single_release,
9745512c
AV
290};
291
292static int __init init_lstats_procfs(void)
293{
97a32539 294 proc_create("latency_stats", 0644, NULL, &lstats_proc_ops);
988f11e0 295#ifdef CONFIG_SYSCTL
296 register_sysctl_init("kernel", latencytop_sysctl);
297#endif
9745512c
AV
298 return 0;
299}
ad0b0fd5 300device_initcall(init_lstats_procfs);