]>
Commit | Line | Data |
---|---|---|
b7ca0ba0 SL |
1 | From 97a7f84a7a2f0086ff610254ed3c24b62a8627b5 Mon Sep 17 00:00:00 2001 |
2 | From: Sasha Levin <sashal@kernel.org> | |
3 | Date: Tue, 12 Mar 2024 11:56:41 -0400 | |
4 | Subject: ring-buffer: Do not set shortest_full when full target is hit | |
5 | ||
6 | From: Steven Rostedt (Google) <rostedt@goodmis.org> | |
7 | ||
8 | [ Upstream commit 761d9473e27f0c8782895013a3e7b52a37c8bcfc ] | |
9 | ||
10 | The rb_watermark_hit() checks if the amount of data in the ring buffer is | |
11 | above the percentage level passed in by the "full" variable. If it is, it | |
12 | returns true. | |
13 | ||
14 | But it also sets the "shortest_full" field of the cpu_buffer that informs | |
15 | writers that it needs to call the irq_work if the amount of data on the | |
16 | ring buffer is above the requested amount. | |
17 | ||
18 | The rb_watermark_hit() always sets the shortest_full even if the amount in | |
19 | the ring buffer is what it wants. As it is not going to wait, because it | |
20 | has what it wants, there's no reason to set shortest_full. | |
21 | ||
22 | Link: https://lore.kernel.org/linux-trace-kernel/20240312115641.6aa8ba08@gandalf.local.home | |
23 | ||
24 | Cc: stable@vger.kernel.org | |
25 | Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
26 | Fixes: 42fb0a1e84ff5 ("tracing/ring-buffer: Have polling block on watermark") | |
27 | Reviewed-by: Masami Hiramatsu (Google) <mhiramat@kernel.org> | |
28 | Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org> | |
29 | Signed-off-by: Sasha Levin <sashal@kernel.org> | |
30 | --- | |
31 | kernel/trace/ring_buffer.c | 7 ++++--- | |
32 | 1 file changed, 4 insertions(+), 3 deletions(-) | |
33 | ||
34 | diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c | |
35 | index 936b560989a3e..5b665e5991bf7 100644 | |
36 | --- a/kernel/trace/ring_buffer.c | |
37 | +++ b/kernel/trace/ring_buffer.c | |
38 | @@ -887,9 +887,10 @@ static bool rb_watermark_hit(struct trace_buffer *buffer, int cpu, int full) | |
39 | pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; | |
40 | ret = !pagebusy && full_hit(buffer, cpu, full); | |
41 | ||
42 | - if (!cpu_buffer->shortest_full || | |
43 | - cpu_buffer->shortest_full > full) | |
44 | - cpu_buffer->shortest_full = full; | |
45 | + if (!ret && (!cpu_buffer->shortest_full || | |
46 | + cpu_buffer->shortest_full > full)) { | |
47 | + cpu_buffer->shortest_full = full; | |
48 | + } | |
49 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | |
50 | } | |
51 | return ret; | |
52 | -- | |
53 | 2.43.0 | |
54 |