]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/2.6.36.2/perf-fix-inherit-vs.-context-rotation-bug.patch
Fixes for 5.10
[thirdparty/kernel/stable-queue.git] / releases / 2.6.36.2 / perf-fix-inherit-vs.-context-rotation-bug.patch
1 From dddd3379a619a4cb8247bfd3c94ca9ae3797aa2e Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Wed, 24 Nov 2010 10:05:55 +0100
4 Subject: perf: Fix inherit vs. context rotation bug
5
6 From: Thomas Gleixner <tglx@linutronix.de>
7
8 commit dddd3379a619a4cb8247bfd3c94ca9ae3797aa2e upstream.
9
10 It was found that sometimes children of tasks with inherited events had
11 one extra event. Eventually it turned out to be due to the list rotation
12 no being exclusive with the list iteration in the inheritance code.
13
14 Cure this by temporarily disabling the rotation while we inherit the events.
15
16 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
17 Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
18 LKML-Reference: <new-submission>
19 Signed-off-by: Ingo Molnar <mingo@elte.hu>
20 Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
21
22 ---
23 include/linux/perf_event.h | 1 +
24 kernel/perf_event.c | 22 ++++++++++++++++++++--
25 2 files changed, 21 insertions(+), 2 deletions(-)
26
27 --- a/include/linux/perf_event.h
28 +++ b/include/linux/perf_event.h
29 @@ -788,6 +788,7 @@ struct perf_event_context {
30 int nr_active;
31 int is_active;
32 int nr_stat;
33 + int rotate_disable;
34 atomic_t refcount;
35 struct task_struct *task;
36
37 --- a/kernel/perf_event.c
38 +++ b/kernel/perf_event.c
39 @@ -1620,8 +1620,12 @@ static void rotate_ctx(struct perf_event
40 {
41 raw_spin_lock(&ctx->lock);
42
43 - /* Rotate the first entry last of non-pinned groups */
44 - list_rotate_left(&ctx->flexible_groups);
45 + /*
46 + * Rotate the first entry last of non-pinned groups. Rotation might be
47 + * disabled by the inheritance code.
48 + */
49 + if (!ctx->rotate_disable)
50 + list_rotate_left(&ctx->flexible_groups);
51
52 raw_spin_unlock(&ctx->lock);
53 }
54 @@ -5622,6 +5626,7 @@ int perf_event_init_task(struct task_str
55 struct perf_event *event;
56 struct task_struct *parent = current;
57 int inherited_all = 1;
58 + unsigned long flags;
59 int ret = 0;
60
61 child->perf_event_ctxp = NULL;
62 @@ -5662,6 +5667,15 @@ int perf_event_init_task(struct task_str
63 break;
64 }
65
66 + /*
67 + * We can't hold ctx->lock when iterating the ->flexible_group list due
68 + * to allocations, but we need to prevent rotation because
69 + * rotate_ctx() will change the list from interrupt context.
70 + */
71 + raw_spin_lock_irqsave(&parent_ctx->lock, flags);
72 + parent_ctx->rotate_disable = 1;
73 + raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
74 +
75 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
76 ret = inherit_task_group(event, parent, parent_ctx, child,
77 &inherited_all);
78 @@ -5669,6 +5683,10 @@ int perf_event_init_task(struct task_str
79 break;
80 }
81
82 + raw_spin_lock_irqsave(&parent_ctx->lock, flags);
83 + parent_ctx->rotate_disable = 0;
84 + raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
85 +
86 child_ctx = child->perf_event_ctxp;
87
88 if (child_ctx && inherited_all) {