]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob
19999de46bba05df790a4c995f21fae6e54a2fdd
[thirdparty/kernel/stable-queue.git] /
1 From stable+bounces-185638-greg=kroah.com@vger.kernel.org Tue Oct 14 13:37:23 2025
2 From: Sasha Levin <sashal@kernel.org>
3 Date: Tue, 14 Oct 2025 07:37:14 -0400
4 Subject: tracing: Fix race condition in kprobe initialization causing NULL pointer dereference
5 To: stable@vger.kernel.org
6 Cc: Yuan Chen <chenyuan@kylinos.cn>, "Masami Hiramatsu (Google)" <mhiramat@kernel.org>, Sasha Levin <sashal@kernel.org>
7 Message-ID: <20251014113714.4153034-1-sashal@kernel.org>
8
9 From: Yuan Chen <chenyuan@kylinos.cn>
10
11 [ Upstream commit 9cf9aa7b0acfde7545c1a1d912576e9bab28dc6f ]
12
13 There is a critical race condition in kprobe initialization that can lead to
14 NULL pointer dereference and kernel crash.
15
16 [1135630.084782] Unable to handle kernel paging request at virtual address 0000710a04630000
17 ...
18 [1135630.260314] pstate: 404003c9 (nZcv DAIF +PAN -UAO)
19 [1135630.269239] pc : kprobe_perf_func+0x30/0x260
20 [1135630.277643] lr : kprobe_dispatcher+0x44/0x60
21 [1135630.286041] sp : ffffaeff4977fa40
22 [1135630.293441] x29: ffffaeff4977fa40 x28: ffffaf015340e400
23 [1135630.302837] x27: 0000000000000000 x26: 0000000000000000
24 [1135630.312257] x25: ffffaf029ed108a8 x24: ffffaf015340e528
25 [1135630.321705] x23: ffffaeff4977fc50 x22: ffffaeff4977fc50
26 [1135630.331154] x21: 0000000000000000 x20: ffffaeff4977fc50
27 [1135630.340586] x19: ffffaf015340e400 x18: 0000000000000000
28 [1135630.349985] x17: 0000000000000000 x16: 0000000000000000
29 [1135630.359285] x15: 0000000000000000 x14: 0000000000000000
30 [1135630.368445] x13: 0000000000000000 x12: 0000000000000000
31 [1135630.377473] x11: 0000000000000000 x10: 0000000000000000
32 [1135630.386411] x9 : 0000000000000000 x8 : 0000000000000000
33 [1135630.395252] x7 : 0000000000000000 x6 : 0000000000000000
34 [1135630.403963] x5 : 0000000000000000 x4 : 0000000000000000
35 [1135630.412545] x3 : 0000710a04630000 x2 : 0000000000000006
36 [1135630.421021] x1 : ffffaeff4977fc50 x0 : 0000710a04630000
37 [1135630.429410] Call trace:
38 [1135630.434828] kprobe_perf_func+0x30/0x260
39 [1135630.441661] kprobe_dispatcher+0x44/0x60
40 [1135630.448396] aggr_pre_handler+0x70/0xc8
41 [1135630.454959] kprobe_breakpoint_handler+0x140/0x1e0
42 [1135630.462435] brk_handler+0xbc/0xd8
43 [1135630.468437] do_debug_exception+0x84/0x138
44 [1135630.475074] el1_dbg+0x18/0x8c
45 [1135630.480582] security_file_permission+0x0/0xd0
46 [1135630.487426] vfs_write+0x70/0x1c0
47 [1135630.493059] ksys_write+0x5c/0xc8
48 [1135630.498638] __arm64_sys_write+0x24/0x30
49 [1135630.504821] el0_svc_common+0x78/0x130
50 [1135630.510838] el0_svc_handler+0x38/0x78
51 [1135630.516834] el0_svc+0x8/0x1b0
52
53 kernel/trace/trace_kprobe.c: 1308
54 0xffff3df8995039ec <kprobe_perf_func+0x2c>: ldr x21, [x24,#120]
55 include/linux/compiler.h: 294
56 0xffff3df8995039f0 <kprobe_perf_func+0x30>: ldr x1, [x21,x0]
57
58 kernel/trace/trace_kprobe.c
59 1308: head = this_cpu_ptr(call->perf_events);
60 1309: if (hlist_empty(head))
61 1310: return 0;
62
63 crash> struct trace_event_call -o
64 struct trace_event_call {
65 ...
66 [120] struct hlist_head *perf_events; //(call->perf_event)
67 ...
68 }
69
70 crash> struct trace_event_call ffffaf015340e528
71 struct trace_event_call {
72 ...
73 perf_events = 0xffff0ad5fa89f088, //this value is correct, but x21 = 0
74 ...
75 }
76
77 Race Condition Analysis:
78
79 The race occurs between kprobe activation and perf_events initialization:
80
81 CPU0 CPU1
82 ==== ====
83 perf_kprobe_init
84 perf_trace_event_init
85 tp_event->perf_events = list;(1)
86 tp_event->class->reg (2)← KPROBE ACTIVE
87 Debug exception triggers
88 ...
89 kprobe_dispatcher
90 kprobe_perf_func (tk->tp.flags & TP_FLAG_PROFILE)
91 head = this_cpu_ptr(call->perf_events)(3)
92 (perf_events is still NULL)
93
94 Problem:
95 1. CPU0 executes (1) assigning tp_event->perf_events = list
96 2. CPU0 executes (2) enabling kprobe functionality via class->reg()
97 3. CPU1 triggers and reaches kprobe_dispatcher
98 4. CPU1 checks TP_FLAG_PROFILE - condition passes (step 2 completed)
99 5. CPU1 calls kprobe_perf_func() and crashes at (3) because
100 call->perf_events is still NULL
101
102 CPU1 sees that kprobe functionality is enabled but does not see that
103 perf_events has been assigned.
104
105 Add pairing read and write memory barriers to guarantee that if CPU1
106 sees that kprobe functionality is enabled, it must also see that
107 perf_events has been assigned.
108
109 Link: https://lore.kernel.org/all/20251001022025.44626-1-chenyuan_fl@163.com/
110
111 Fixes: 50d780560785 ("tracing/kprobes: Add probe handler dispatcher to support perf and ftrace concurrent use")
112 Cc: stable@vger.kernel.org
113 Signed-off-by: Yuan Chen <chenyuan@kylinos.cn>
114 Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
115 [ Drop fprobe changes + context ]
116 Signed-off-by: Sasha Levin <sashal@kernel.org>
117 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
118 ---
119 kernel/trace/trace_kprobe.c | 11 +++++++----
120 kernel/trace/trace_probe.h | 9 +++++++--
121 kernel/trace/trace_uprobe.c | 12 ++++++++----
122 3 files changed, 22 insertions(+), 10 deletions(-)
123
124 --- a/kernel/trace/trace_kprobe.c
125 +++ b/kernel/trace/trace_kprobe.c
126 @@ -1722,14 +1722,15 @@ static int kprobe_register(struct trace_
127 static int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
128 {
129 struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
130 + unsigned int flags = trace_probe_load_flag(&tk->tp);
131 int ret = 0;
132
133 raw_cpu_inc(*tk->nhit);
134
135 - if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
136 + if (flags & TP_FLAG_TRACE)
137 kprobe_trace_func(tk, regs);
138 #ifdef CONFIG_PERF_EVENTS
139 - if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
140 + if (flags & TP_FLAG_PROFILE)
141 ret = kprobe_perf_func(tk, regs);
142 #endif
143 return ret;
144 @@ -1741,6 +1742,7 @@ kretprobe_dispatcher(struct kretprobe_in
145 {
146 struct kretprobe *rp = get_kretprobe(ri);
147 struct trace_kprobe *tk;
148 + unsigned int flags;
149
150 /*
151 * There is a small chance that get_kretprobe(ri) returns NULL when
152 @@ -1753,10 +1755,11 @@ kretprobe_dispatcher(struct kretprobe_in
153 tk = container_of(rp, struct trace_kprobe, rp);
154 raw_cpu_inc(*tk->nhit);
155
156 - if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
157 + flags = trace_probe_load_flag(&tk->tp);
158 + if (flags & TP_FLAG_TRACE)
159 kretprobe_trace_func(tk, ri, regs);
160 #ifdef CONFIG_PERF_EVENTS
161 - if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
162 + if (flags & TP_FLAG_PROFILE)
163 kretprobe_perf_func(tk, ri, regs);
164 #endif
165 return 0; /* We don't tweak kernel, so just return 0 */
166 --- a/kernel/trace/trace_probe.h
167 +++ b/kernel/trace/trace_probe.h
168 @@ -258,16 +258,21 @@ struct event_file_link {
169 struct list_head list;
170 };
171
172 +static inline unsigned int trace_probe_load_flag(struct trace_probe *tp)
173 +{
174 + return smp_load_acquire(&tp->event->flags);
175 +}
176 +
177 static inline bool trace_probe_test_flag(struct trace_probe *tp,
178 unsigned int flag)
179 {
180 - return !!(tp->event->flags & flag);
181 + return !!(trace_probe_load_flag(tp) & flag);
182 }
183
184 static inline void trace_probe_set_flag(struct trace_probe *tp,
185 unsigned int flag)
186 {
187 - tp->event->flags |= flag;
188 + smp_store_release(&tp->event->flags, tp->event->flags | flag);
189 }
190
191 static inline void trace_probe_clear_flag(struct trace_probe *tp,
192 --- a/kernel/trace/trace_uprobe.c
193 +++ b/kernel/trace/trace_uprobe.c
194 @@ -1485,6 +1485,7 @@ static int uprobe_dispatcher(struct upro
195 struct uprobe_dispatch_data udd;
196 struct uprobe_cpu_buffer *ucb;
197 int dsize, esize;
198 + unsigned int flags;
199 int ret = 0;
200
201
202 @@ -1505,11 +1506,12 @@ static int uprobe_dispatcher(struct upro
203 ucb = uprobe_buffer_get();
204 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
205
206 - if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
207 + flags = trace_probe_load_flag(&tu->tp);
208 + if (flags & TP_FLAG_TRACE)
209 ret |= uprobe_trace_func(tu, regs, ucb, dsize);
210
211 #ifdef CONFIG_PERF_EVENTS
212 - if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
213 + if (flags & TP_FLAG_PROFILE)
214 ret |= uprobe_perf_func(tu, regs, ucb, dsize);
215 #endif
216 uprobe_buffer_put(ucb);
217 @@ -1523,6 +1525,7 @@ static int uretprobe_dispatcher(struct u
218 struct uprobe_dispatch_data udd;
219 struct uprobe_cpu_buffer *ucb;
220 int dsize, esize;
221 + unsigned int flags;
222
223 tu = container_of(con, struct trace_uprobe, consumer);
224
225 @@ -1540,11 +1543,12 @@ static int uretprobe_dispatcher(struct u
226 ucb = uprobe_buffer_get();
227 store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
228
229 - if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
230 + flags = trace_probe_load_flag(&tu->tp);
231 + if (flags & TP_FLAG_TRACE)
232 uretprobe_trace_func(tu, func, regs, ucb, dsize);
233
234 #ifdef CONFIG_PERF_EVENTS
235 - if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
236 + if (flags & TP_FLAG_PROFILE)
237 uretprobe_perf_func(tu, func, regs, ucb, dsize);
238 #endif
239 uprobe_buffer_put(ucb);