From: Thomas Gleixner Date: Wed, 19 Nov 2025 17:27:05 +0000 (+0100) Subject: signal: Move MMCID exit out of sighand lock X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=2b1642b881088bbf73fcb1147c474a198ec46729;p=thirdparty%2Fkernel%2Flinux.git signal: Move MMCID exit out of sighand lock There is no need anymore to keep this under sighand lock as the current code and the upcoming replacement are not depending on the exit state of a task anymore. That allows to use a mutex in the exit path. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Reviewed-by: Mathieu Desnoyers Link: https://patch.msgid.link/20251119172549.706439391@linutronix.de --- diff --git a/include/linux/sched.h b/include/linux/sched.h index 64f080d6ed6e4..c411ae021bc55 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2298,7 +2298,7 @@ static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct allo void sched_mm_cid_before_execve(struct task_struct *t); void sched_mm_cid_after_execve(struct task_struct *t); void sched_mm_cid_fork(struct task_struct *t); -void sched_mm_cid_exit_signals(struct task_struct *t); +void sched_mm_cid_exit(struct task_struct *t); static inline int task_mm_cid(struct task_struct *t) { return t->mm_cid.cid; @@ -2307,7 +2307,7 @@ static inline int task_mm_cid(struct task_struct *t) static inline void sched_mm_cid_before_execve(struct task_struct *t) { } static inline void sched_mm_cid_after_execve(struct task_struct *t) { } static inline void sched_mm_cid_fork(struct task_struct *t) { } -static inline void sched_mm_cid_exit_signals(struct task_struct *t) { } +static inline void sched_mm_cid_exit(struct task_struct *t) { } static inline int task_mm_cid(struct task_struct *t) { /* diff --git a/kernel/exit.c b/kernel/exit.c index 9f74e8f1c431b..324616f690b7b 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -910,6 +910,7 @@ void __noreturn do_exit(long code) user_events_exit(tsk); io_uring_files_cancel(); + sched_mm_cid_exit(tsk); exit_signals(tsk); /* sets PF_EXITING */ seccomp_filter_release(tsk); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9a114b6f6a6fd..3fdf90a7074d8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10392,7 +10392,7 @@ static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpu WRITE_ONCE(mm->mm_cid.nr_cpus_allowed, weight); } -void sched_mm_cid_exit_signals(struct task_struct *t) +void sched_mm_cid_exit(struct task_struct *t) { struct mm_struct *mm = t->mm; @@ -10410,7 +10410,7 @@ void sched_mm_cid_exit_signals(struct task_struct *t) /* Deactivate MM CID allocation across execve() */ void sched_mm_cid_before_execve(struct task_struct *t) { - sched_mm_cid_exit_signals(t); + sched_mm_cid_exit(t); } /* Reactivate MM CID after successful execve() */ diff --git a/kernel/signal.c b/kernel/signal.c index fe9190d84f281..e42b8bd6922fc 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -3125,7 +3125,6 @@ void exit_signals(struct task_struct *tsk) cgroup_threadgroup_change_begin(tsk); if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) { - sched_mm_cid_exit_signals(tsk); tsk->flags |= PF_EXITING; cgroup_threadgroup_change_end(tsk); return; @@ -3136,7 +3135,6 @@ void exit_signals(struct task_struct *tsk) * From now this task is not visible for group-wide signals, * see wants_signal(), do_signal_stop(). */ - sched_mm_cid_exit_signals(tsk); tsk->flags |= PF_EXITING; cgroup_threadgroup_change_end(tsk);