1 From 243dbd5c83fd647144b4e6f4e6e01e9e59fe910f Mon Sep 17 00:00:00 2001
2 From: Michael Jeanson <mjeanson@efficios.com>
3 Date: Tue, 29 Jul 2025 15:34:23 -0400
4 Subject: [PATCH 2/2] fix: percpu: repurpose __percpu tag as a named address
5 space qualifier (v6.15)
7 The '__percpu' annotation was repurposed as a named address space
8 qualifier, with compilers supporting 'typeof_unqual' like GCC 14 and
9 Clang 19 this is now enforced at build time.
13 commit 6cea5ae714ba47ea4807d15903baca9857a450e6
14 Author: Uros Bizjak <ubizjak@gmail.com>
15 Date: Mon Jan 27 17:05:09 2025 +0100
17 percpu: repurpose __percpu tag as a named address space qualifier
19 The patch introduces __percpu_qual define and repurposes __percpu tag as a
20 named address space qualifier using the new define.
22 Arches can now conditionally define __percpu_qual as their named address
23 space qualifier for percpu variables.
25 commit 6a367577153acd9b432a5340fb10891eeb7e10f1
26 Author: Uros Bizjak <ubizjak@gmail.com>
27 Date: Mon Jan 27 17:05:10 2025 +0100
29 percpu/x86: enable strict percpu checks via named AS qualifiers
31 This patch declares percpu variables in __seg_gs/__seg_fs named AS and
32 keeps them named AS qualified until they are dereferenced with percpu
33 accessor. This approach enables various compiler check for
34 cross-namespace variable assignments.
36 Upstream-Status: Backport [commit 243dbd5c]
38 Change-Id: Ib212cb4ef077da994867f0541921529dd4a799a0
39 Signed-off-by: Michael Jeanson <mjeanson@efficios.com>
40 Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
42 include/ringbuffer/backend_types.h | 5 ++-
43 include/ringbuffer/frontend_api.h | 4 +-
44 src/lib/ringbuffer/ring_buffer_backend.c | 30 +++++++-------
45 src/lib/ringbuffer/ring_buffer_frontend.c | 40 +++++++++----------
46 src/lib/ringbuffer/ring_buffer_iterator.c | 8 ++--
47 src/lttng-context-callstack.c | 2 +-
48 src/lttng-ring-buffer-event-notifier-client.h | 2 +-
49 src/lttng-ring-buffer-metadata-client.h | 2 +-
50 8 files changed, 48 insertions(+), 45 deletions(-)
52 diff --git a/include/ringbuffer/backend_types.h b/include/ringbuffer/backend_types.h
53 index c23889ea..caafe4da 100644
54 --- a/include/ringbuffer/backend_types.h
55 +++ b/include/ringbuffer/backend_types.h
56 @@ -79,7 +79,10 @@ struct channel_backend {
58 unsigned int buf_size_order; /* Order of buffer size */
59 unsigned int extra_reader_sb:1; /* has extra reader subbuffer ? */
60 - struct lttng_kernel_ring_buffer *buf; /* Channel per-cpu buffers */
62 + struct lttng_kernel_ring_buffer *global_buf; /* Channel global buffer */
63 + struct lttng_kernel_ring_buffer __percpu *percpu_buf; /* Channel per-cpu buffers */
66 unsigned long num_subbuf; /* Number of sub-buffers for writer */
67 u64 start_timestamp; /* Channel creation timestamp value */
68 diff --git a/include/ringbuffer/frontend_api.h b/include/ringbuffer/frontend_api.h
69 index e8d77d95..a0319c98 100644
70 --- a/include/ringbuffer/frontend_api.h
71 +++ b/include/ringbuffer/frontend_api.h
72 @@ -153,9 +153,9 @@ int lib_ring_buffer_reserve(const struct lttng_kernel_ring_buffer_config *config
75 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
76 - buf = per_cpu_ptr(chan->backend.buf, ctx->priv.reserve_cpu);
77 + buf = per_cpu_ptr(chan->backend.percpu_buf, ctx->priv.reserve_cpu);
79 - buf = chan->backend.buf;
80 + buf = chan->backend.global_buf;
81 if (unlikely(atomic_read(&buf->record_disabled)))
84 diff --git a/src/lib/ringbuffer/ring_buffer_backend.c b/src/lib/ringbuffer/ring_buffer_backend.c
85 index 3eaa1b96..b5ecde4d 100644
86 --- a/src/lib/ringbuffer/ring_buffer_backend.c
87 +++ b/src/lib/ringbuffer/ring_buffer_backend.c
88 @@ -279,7 +279,7 @@ int lttng_cpuhp_rb_backend_prepare(unsigned int cpu,
90 CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_PER_CHANNEL);
92 - buf = per_cpu_ptr(chanb->buf, cpu);
93 + buf = per_cpu_ptr(chanb->percpu_buf, cpu);
94 ret = lib_ring_buffer_create(buf, chanb, cpu);
97 @@ -320,7 +320,7 @@ int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
100 case CPU_UP_PREPARE_FROZEN:
101 - buf = per_cpu_ptr(chanb->buf, cpu);
102 + buf = per_cpu_ptr(chanb->percpu_buf, cpu);
103 ret = lib_ring_buffer_create(buf, chanb, cpu);
106 @@ -415,8 +415,8 @@ int channel_backend_init(struct channel_backend *chanb,
108 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
109 /* Allocating the buffer per-cpu structures */
110 - chanb->buf = alloc_percpu(struct lttng_kernel_ring_buffer);
112 + chanb->percpu_buf = alloc_percpu(struct lttng_kernel_ring_buffer);
113 + if (!chanb->percpu_buf)
116 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0))
117 @@ -447,7 +447,7 @@ int channel_backend_init(struct channel_backend *chanb,
119 lttng_cpus_read_lock();
120 for_each_online_cpu(i) {
121 - ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
122 + ret = lib_ring_buffer_create(per_cpu_ptr(chanb->percpu_buf, i),
125 goto free_bufs; /* cpu hotplug locked */
126 @@ -455,7 +455,7 @@ int channel_backend_init(struct channel_backend *chanb,
127 lttng_cpus_read_unlock();
129 for_each_possible_cpu(i) {
130 - ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
131 + ret = lib_ring_buffer_create(per_cpu_ptr(chanb->percpu_buf, i),
135 @@ -464,10 +464,10 @@ int channel_backend_init(struct channel_backend *chanb,
137 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
139 - chanb->buf = kzalloc(sizeof(struct lttng_kernel_ring_buffer), GFP_KERNEL);
141 + chanb->global_buf = kzalloc(sizeof(struct lttng_kernel_ring_buffer), GFP_KERNEL);
142 + if (!chanb->global_buf)
144 - ret = lib_ring_buffer_create(chanb->buf, chanb, -1);
145 + ret = lib_ring_buffer_create(chanb->global_buf, chanb, -1);
149 @@ -491,15 +491,15 @@ free_bufs:
150 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
151 for_each_possible_cpu(i) {
152 struct lttng_kernel_ring_buffer *buf =
153 - per_cpu_ptr(chanb->buf, i);
154 + per_cpu_ptr(chanb->percpu_buf, i);
156 if (!buf->backend.allocated)
158 lib_ring_buffer_free(buf);
160 - free_percpu(chanb->buf);
161 + free_percpu(chanb->percpu_buf);
164 + kfree(chanb->global_buf);
166 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
167 free_cpumask_var(chanb->cpumask);
168 @@ -542,16 +542,16 @@ void channel_backend_free(struct channel_backend *chanb)
170 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
171 for_each_possible_cpu(i) {
172 - struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chanb->buf, i);
173 + struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chanb->percpu_buf, i);
175 if (!buf->backend.allocated)
177 lib_ring_buffer_free(buf);
179 free_cpumask_var(chanb->cpumask);
180 - free_percpu(chanb->buf);
181 + free_percpu(chanb->percpu_buf);
183 - struct lttng_kernel_ring_buffer *buf = chanb->buf;
184 + struct lttng_kernel_ring_buffer *buf = chanb->global_buf;
186 CHAN_WARN_ON(chanb, !buf->backend.allocated);
187 lib_ring_buffer_free(buf);
188 diff --git a/src/lib/ringbuffer/ring_buffer_frontend.c b/src/lib/ringbuffer/ring_buffer_frontend.c
189 index 1ed9dc47..f9def043 100644
190 --- a/src/lib/ringbuffer/ring_buffer_frontend.c
191 +++ b/src/lib/ringbuffer/ring_buffer_frontend.c
192 @@ -505,7 +505,7 @@ int lttng_cpuhp_rb_frontend_dead(unsigned int cpu,
194 struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
196 - struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
197 + struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.percpu_buf, cpu);
198 const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
200 CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_PER_CHANNEL);
201 @@ -526,7 +526,7 @@ int lttng_cpuhp_rb_frontend_online(unsigned int cpu,
203 struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
205 - struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
206 + struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.percpu_buf, cpu);
207 const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
209 CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_PER_CHANNEL);
210 @@ -543,7 +543,7 @@ int lttng_cpuhp_rb_frontend_offline(unsigned int cpu,
212 struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
214 - struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
215 + struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.percpu_buf, cpu);
216 const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
218 CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_PER_CHANNEL);
219 @@ -574,7 +574,7 @@ int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
220 unsigned int cpu = (unsigned long)hcpu;
221 struct lttng_kernel_ring_buffer_channel *chan = container_of(nb, struct lttng_kernel_ring_buffer_channel,
223 - struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
224 + struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.percpu_buf, cpu);
225 const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
227 if (!chan->cpu_hp_enable)
228 @@ -741,7 +741,7 @@ static void channel_unregister_notifiers(struct lttng_kernel_ring_buffer_channel
229 lttng_cpus_read_lock();
230 chan->cpu_hp_enable = 0;
231 for_each_online_cpu(cpu) {
232 - struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
233 + struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.percpu_buf,
235 lib_ring_buffer_stop_switch_timer(buf);
236 lib_ring_buffer_stop_read_timer(buf);
237 @@ -750,7 +750,7 @@ static void channel_unregister_notifiers(struct lttng_kernel_ring_buffer_channel
238 unregister_cpu_notifier(&chan->cpu_hp_notifier);
240 for_each_possible_cpu(cpu) {
241 - struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
242 + struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.percpu_buf,
244 lib_ring_buffer_stop_switch_timer(buf);
245 lib_ring_buffer_stop_read_timer(buf);
246 @@ -759,7 +759,7 @@ static void channel_unregister_notifiers(struct lttng_kernel_ring_buffer_channel
248 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,10,0)) */
250 - struct lttng_kernel_ring_buffer *buf = chan->backend.buf;
251 + struct lttng_kernel_ring_buffer *buf = chan->backend.global_buf;
253 lib_ring_buffer_stop_switch_timer(buf);
254 lib_ring_buffer_stop_read_timer(buf);
255 @@ -788,14 +788,14 @@ void lib_ring_buffer_set_quiescent_channel(struct lttng_kernel_ring_buffer_chann
256 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
257 lttng_cpus_read_lock();
258 for_each_channel_cpu(cpu, chan) {
259 - struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
260 + struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.percpu_buf,
263 lib_ring_buffer_set_quiescent(buf);
265 lttng_cpus_read_unlock();
267 - struct lttng_kernel_ring_buffer *buf = chan->backend.buf;
268 + struct lttng_kernel_ring_buffer *buf = chan->backend.global_buf;
270 lib_ring_buffer_set_quiescent(buf);
272 @@ -810,14 +810,14 @@ void lib_ring_buffer_clear_quiescent_channel(struct lttng_kernel_ring_buffer_cha
273 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
274 lttng_cpus_read_lock();
275 for_each_channel_cpu(cpu, chan) {
276 - struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
277 + struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.percpu_buf,
280 lib_ring_buffer_clear_quiescent(buf);
282 lttng_cpus_read_unlock();
284 - struct lttng_kernel_ring_buffer *buf = chan->backend.buf;
285 + struct lttng_kernel_ring_buffer *buf = chan->backend.global_buf;
287 lib_ring_buffer_clear_quiescent(buf);
289 @@ -915,7 +915,7 @@ struct lttng_kernel_ring_buffer_channel *channel_create(const struct lttng_kerne
291 lttng_cpus_read_lock();
292 for_each_online_cpu(cpu) {
293 - struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
294 + struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.percpu_buf,
296 spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
297 lib_ring_buffer_start_switch_timer(buf);
298 @@ -926,7 +926,7 @@ struct lttng_kernel_ring_buffer_channel *channel_create(const struct lttng_kerne
299 lttng_cpus_read_unlock();
301 for_each_possible_cpu(cpu) {
302 - struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
303 + struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.percpu_buf,
305 spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
306 lib_ring_buffer_start_switch_timer(buf);
307 @@ -947,7 +947,7 @@ struct lttng_kernel_ring_buffer_channel *channel_create(const struct lttng_kerne
308 #endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */
311 - struct lttng_kernel_ring_buffer *buf = chan->backend.buf;
312 + struct lttng_kernel_ring_buffer *buf = chan->backend.global_buf;
314 lib_ring_buffer_start_switch_timer(buf);
315 lib_ring_buffer_start_read_timer(buf);
316 @@ -1004,7 +1004,7 @@ void *channel_destroy(struct lttng_kernel_ring_buffer_channel *chan)
319 for_each_channel_cpu(cpu, chan) {
320 - struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
321 + struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.percpu_buf,
324 if (config->cb.buffer_finalize)
325 @@ -1019,7 +1019,7 @@ void *channel_destroy(struct lttng_kernel_ring_buffer_channel *chan)
326 wake_up_interruptible(&buf->read_wait);
329 - struct lttng_kernel_ring_buffer *buf = chan->backend.buf;
330 + struct lttng_kernel_ring_buffer *buf = chan->backend.global_buf;
332 if (config->cb.buffer_finalize)
333 config->cb.buffer_finalize(buf, chan->backend.priv, -1);
334 @@ -1044,9 +1044,9 @@ struct lttng_kernel_ring_buffer *channel_get_ring_buffer(
335 struct lttng_kernel_ring_buffer_channel *chan, int cpu)
337 if (config->alloc == RING_BUFFER_ALLOC_PER_CHANNEL)
338 - return chan->backend.buf;
339 + return chan->backend.global_buf;
341 - return per_cpu_ptr(chan->backend.buf, cpu);
342 + return per_cpu_ptr(chan->backend.percpu_buf, cpu);
344 EXPORT_SYMBOL_GPL(channel_get_ring_buffer);
346 @@ -2271,9 +2271,9 @@ static struct lttng_kernel_ring_buffer *get_current_buf(struct lttng_kernel_ring
347 const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
349 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
350 - return per_cpu_ptr(chan->backend.buf, cpu);
351 + return per_cpu_ptr(chan->backend.percpu_buf, cpu);
353 - return chan->backend.buf;
354 + return chan->backend.global_buf;
357 void lib_ring_buffer_lost_event_too_big(struct lttng_kernel_ring_buffer_channel *chan)
358 diff --git a/src/lib/ringbuffer/ring_buffer_iterator.c b/src/lib/ringbuffer/ring_buffer_iterator.c
359 index 88d79412..1a1db5e2 100644
360 --- a/src/lib/ringbuffer/ring_buffer_iterator.c
361 +++ b/src/lib/ringbuffer/ring_buffer_iterator.c
362 @@ -361,7 +361,7 @@ int lttng_cpuhp_rb_iter_online(unsigned int cpu,
364 struct lttng_kernel_ring_buffer_channel *chan = container_of(node, struct lttng_kernel_ring_buffer_channel,
366 - struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
367 + struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.percpu_buf, cpu);
368 const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
370 CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_PER_CHANNEL);
371 @@ -382,7 +382,7 @@ int channel_iterator_cpu_hotplug(struct notifier_block *nb,
372 unsigned int cpu = (unsigned long)hcpu;
373 struct lttng_kernel_ring_buffer_channel *chan = container_of(nb, struct lttng_kernel_ring_buffer_channel,
375 - struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
376 + struct lttng_kernel_ring_buffer *buf = per_cpu_ptr(chan->backend.percpu_buf, cpu);
377 const struct lttng_kernel_ring_buffer_config *config = &chan->backend.config;
379 if (!chan->hp_iter_enable)
380 @@ -443,14 +443,14 @@ int channel_iterator_init(struct lttng_kernel_ring_buffer_channel *chan)
382 lttng_cpus_read_lock();
383 for_each_online_cpu(cpu) {
384 - buf = per_cpu_ptr(chan->backend.buf, cpu);
385 + buf = per_cpu_ptr(chan->backend.percpu_buf, cpu);
386 lib_ring_buffer_iterator_init(chan, buf);
388 chan->hp_iter_enable = 1;
389 lttng_cpus_read_unlock();
391 for_each_possible_cpu(cpu) {
392 - buf = per_cpu_ptr(chan->backend.buf, cpu);
393 + buf = per_cpu_ptr(chan->backend.percpu_buf, cpu);
394 lib_ring_buffer_iterator_init(chan, buf);
397 diff --git a/src/lttng-context-callstack.c b/src/lttng-context-callstack.c
398 index 4385472e..c5910718 100644
399 --- a/src/lttng-context-callstack.c
400 +++ b/src/lttng-context-callstack.c
401 @@ -69,7 +69,7 @@ void field_data_free(struct field_data *fdata)
405 -struct field_data __percpu *field_data_create(enum lttng_cs_ctx_modes mode)
406 +struct field_data *field_data_create(enum lttng_cs_ctx_modes mode)
408 struct lttng_cs __percpu *cs_set;
409 struct field_data *fdata;
410 diff --git a/src/lttng-ring-buffer-event-notifier-client.h b/src/lttng-ring-buffer-event-notifier-client.h
411 index 95deab46..b4145914 100644
412 --- a/src/lttng-ring-buffer-event-notifier-client.h
413 +++ b/src/lttng-ring-buffer-event-notifier-client.h
414 @@ -363,7 +363,7 @@ size_t lttng_packet_avail_size(struct lttng_kernel_ring_buffer_channel *chan)
415 unsigned long o_begin;
416 struct lttng_kernel_ring_buffer *buf;
418 - buf = chan->backend.buf; /* Only for global buffer ! */
419 + buf = chan->backend.global_buf; /* Only for global buffer ! */
420 o_begin = v_read(&client_config, &buf->offset);
421 if (subbuf_offset(o_begin, chan) != 0) {
422 return chan->backend.subbuf_size - subbuf_offset(o_begin, chan);
423 diff --git a/src/lttng-ring-buffer-metadata-client.h b/src/lttng-ring-buffer-metadata-client.h
424 index 99158451..9ef73266 100644
425 --- a/src/lttng-ring-buffer-metadata-client.h
426 +++ b/src/lttng-ring-buffer-metadata-client.h
427 @@ -368,7 +368,7 @@ size_t lttng_packet_avail_size(struct lttng_kernel_ring_buffer_channel *chan)
428 unsigned long o_begin;
429 struct lttng_kernel_ring_buffer *buf;
431 - buf = chan->backend.buf; /* Only for global buffer ! */
432 + buf = chan->backend.global_buf; /* Only for global buffer ! */
433 o_begin = v_read(&client_config, &buf->offset);
434 if (subbuf_offset(o_begin, chan) != 0) {
435 return chan->backend.subbuf_size - subbuf_offset(o_begin, chan);