From: Frederic Weisbecker Date: Mon, 28 Apr 2025 11:11:47 +0000 (+0200) Subject: perf: Fix irq work dereferencing garbage X-Git-Tag: v6.16-rc1~196^2~24 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=d20eb2d5fe8f8818abcfdadf5ac5109938f1318e;p=thirdparty%2Flinux.git perf: Fix irq work dereferencing garbage The following commit: da916e96e2de ("perf: Make perf_pmu_unregister() useable") has introduced two significant event's parent lifecycle changes: 1) An event that has exited now has EVENT_TOMBSTONE as a parent. This can result in a situation where the delayed wakeup irq_work can accidentally dereference EVENT_TOMBSTONE on: CPU 0 CPU 1 ----- ----- __schedule() local_irq_disable() rq_lock() perf_event_overflow() irq_work_queue(&child->pending_irq) perf_event_task_sched_out() raw_spin_lock(&ctx->lock) ctx_sched_out() ctx->is_active = 0 event_sched_out(child) raw_spin_unlock(&ctx->lock) perf_event_release_kernel(parent) perf_remove_from_context(child) raw_spin_lock_irq(&ctx->lock) // Sees !ctx->is_active // Removes from context inline __perf_remove_from_context(child) perf_child_detach(child) event->parent = EVENT_TOMBSTONE raw_spin_rq_unlock_irq(rq); perf_pending_irq() perf_event_wakeup(child) ring_buffer_wakeup(child) rcu_dereference(child->parent->rb) <--- CRASH This also concerns the call to kill_fasync() on parent->fasync. 2) The final parent reference count decrement can now happen before the the final child reference count decrement. ie: the parent can now be freed before its child. On PREEMPT_RT, this can result in a situation where the delayed wakeup irq_work can accidentally dereference a freed parent: CPU 0 CPU 1 CPU 2 ----- ----- ------ perf_pmu_unregister() pmu_detach_events() pmu_get_event() atomic_long_inc_not_zero(&child->refcount) perf_event_overflow() irq_work_queue(&child->pending_irq); irq_work_run() wake_irq_workd() preempt_schedule_irq() =========> SWITCH to workd irq_work_run_list() perf_pending_irq() perf_event_wakeup(child) ring_buffer_wakeup(child) event = child->parent perf_event_release_kernel(parent) // Not last ref, PMU holds it put_event(child) // Last ref put_event(parent) free_event() call_rcu(...) rcu_core() free_event_rcu() rcu_dereference(event->rb) <--- CRASH This also concerns the call to kill_fasync() on parent->fasync. The "easy" solution to 1) is to check that event->parent is not EVENT_TOMBSTONE on perf_event_wakeup() (including both ring buffer and fasync uses). The "easy" solution to 2) is to turn perf_event_wakeup() to wholefully run under rcu_read_lock(). However because of 2), sanity would prescribe to make event::parent an __rcu pointer and annotate each and every users to prove they are reliable. Propose an alternate solution and restore the stable pointer to the parent until all its children have called _free_event() themselves to avoid any further accident. Also revert the EVENT_TOMBSTONE design that is mostly here to determine which caller of perf_event_exit_event() must perform the refcount decrement on a child event matching the increment in inherit_event(). Arrange instead for checking the attach state of an event prior to its removal and decrement the refcount of the child accordingly. Fixes: da916e96e2de ("perf: Make perf_pmu_unregister() useable") Signed-off-by: Frederic Weisbecker Signed-off-by: Peter Zijlstra (Intel) --- diff --git a/kernel/events/core.c b/kernel/events/core.c index 882db7bca7821..e0ca4a88beb55 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -208,7 +208,6 @@ static void perf_ctx_unlock(struct perf_cpu_context *cpuctx, } #define TASK_TOMBSTONE ((void *)-1L) -#define EVENT_TOMBSTONE ((void *)-1L) static bool is_kernel_event(struct perf_event *event) { @@ -2338,12 +2337,6 @@ static void perf_child_detach(struct perf_event *event) sync_child_event(event); list_del_init(&event->child_list); - /* - * Cannot set to NULL, as that would confuse the situation vs - * not being a child event. See for example unaccount_event(). - */ - event->parent = EVENT_TOMBSTONE; - put_event(parent_event); } static bool is_orphaned_event(struct perf_event *event) @@ -5705,7 +5698,7 @@ static void put_event(struct perf_event *event) _free_event(event); /* Matches the refcount bump in inherit_event() */ - if (parent && parent != EVENT_TOMBSTONE) + if (parent) put_event(parent); } @@ -9998,7 +9991,7 @@ void perf_event_text_poke(const void *addr, const void *old_bytes, void perf_event_itrace_started(struct perf_event *event) { - event->attach_state |= PERF_ATTACH_ITRACE; + WRITE_ONCE(event->attach_state, event->attach_state | PERF_ATTACH_ITRACE); } static void perf_log_itrace_start(struct perf_event *event) @@ -13922,10 +13915,7 @@ perf_event_exit_event(struct perf_event *event, { struct perf_event *parent_event = event->parent; unsigned long detach_flags = DETACH_EXIT; - bool is_child = !!parent_event; - - if (parent_event == EVENT_TOMBSTONE) - parent_event = NULL; + unsigned int attach_state; if (parent_event) { /* @@ -13942,6 +13932,8 @@ perf_event_exit_event(struct perf_event *event, */ detach_flags |= DETACH_GROUP | DETACH_CHILD; mutex_lock(&parent_event->child_mutex); + /* PERF_ATTACH_ITRACE might be set concurrently */ + attach_state = READ_ONCE(event->attach_state); } if (revoke) @@ -13951,18 +13943,25 @@ perf_event_exit_event(struct perf_event *event, /* * Child events can be freed. */ - if (is_child) { - if (parent_event) { - mutex_unlock(&parent_event->child_mutex); + if (parent_event) { + mutex_unlock(&parent_event->child_mutex); + + /* + * Match the refcount initialization. Make sure it doesn't happen + * twice if pmu_detach_event() calls it on an already exited task. + */ + if (attach_state & PERF_ATTACH_CHILD) { /* * Kick perf_poll() for is_event_hup(); */ perf_event_wakeup(parent_event); /* * pmu_detach_event() will have an extra refcount. + * perf_pending_task() might have one too. */ put_event(event); } + return; }