]>
Commit | Line | Data |
---|---|---|
a15a8890 SL |
1 | From 6b63c95707a2873d97b40b22fccf31d600add995 Mon Sep 17 00:00:00 2001 |
2 | From: Peter Zijlstra <peterz@infradead.org> | |
3 | Date: Fri, 17 May 2019 13:52:32 +0200 | |
4 | Subject: perf/ring_buffer: Add ordering to rb->nest increment | |
5 | ||
6 | [ Upstream commit 3f9fbe9bd86c534eba2faf5d840fd44c6049f50e ] | |
7 | ||
8 | Similar to how decrementing rb->next too early can cause data_head to | |
9 | (temporarily) be observed to go backward, so too can this happen when | |
10 | we increment too late. | |
11 | ||
12 | This barrier() ensures the rb->head load happens after the increment, | |
13 | both the one in the 'goto again' path, as the one from | |
14 | perf_output_get_handle() -- albeit very unlikely to matter for the | |
15 | latter. | |
16 | ||
17 | Suggested-by: Yabin Cui <yabinc@google.com> | |
18 | Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> | |
19 | Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> | |
20 | Cc: Arnaldo Carvalho de Melo <acme@redhat.com> | |
21 | Cc: Jiri Olsa <jolsa@redhat.com> | |
22 | Cc: Linus Torvalds <torvalds@linux-foundation.org> | |
23 | Cc: Peter Zijlstra <peterz@infradead.org> | |
24 | Cc: Stephane Eranian <eranian@google.com> | |
25 | Cc: Thomas Gleixner <tglx@linutronix.de> | |
26 | Cc: Vince Weaver <vincent.weaver@maine.edu> | |
27 | Cc: acme@kernel.org | |
28 | Cc: mark.rutland@arm.com | |
29 | Cc: namhyung@kernel.org | |
30 | Fixes: ef60777c9abd ("perf: Optimize the perf_output() path by removing IRQ-disables") | |
31 | Link: http://lkml.kernel.org/r/20190517115418.309516009@infradead.org | |
32 | Signed-off-by: Ingo Molnar <mingo@kernel.org> | |
33 | Signed-off-by: Sasha Levin <sashal@kernel.org> | |
34 | --- | |
35 | kernel/events/ring_buffer.c | 9 +++++++++ | |
36 | 1 file changed, 9 insertions(+) | |
37 | ||
38 | diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c | |
39 | index 31edf1f39cca..d32b9375ec0e 100644 | |
40 | --- a/kernel/events/ring_buffer.c | |
41 | +++ b/kernel/events/ring_buffer.c | |
42 | @@ -49,6 +49,15 @@ static void perf_output_put_handle(struct perf_output_handle *handle) | |
43 | unsigned long head; | |
44 | ||
45 | again: | |
46 | + /* | |
47 | + * In order to avoid publishing a head value that goes backwards, | |
48 | + * we must ensure the load of @rb->head happens after we've | |
49 | + * incremented @rb->nest. | |
50 | + * | |
51 | + * Otherwise we can observe a @rb->head value before one published | |
52 | + * by an IRQ/NMI happening between the load and the increment. | |
53 | + */ | |
54 | + barrier(); | |
55 | head = local_read(&rb->head); | |
56 | ||
57 | /* | |
58 | -- | |
59 | 2.20.1 | |
60 |