From: Greg Kroah-Hartman Date: Mon, 17 Aug 2020 13:45:53 +0000 (+0200) Subject: 5.8-stable patches X-Git-Tag: v4.19.140~8 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=c672c74a99ef0748b500ab65b07ad9b0f96cadce;p=thirdparty%2Fkernel%2Fstable-queue.git 5.8-stable patches added patches: task_work-only-grab-task-signal-lock-when-needed.patch --- diff --git a/queue-5.8/series b/queue-5.8/series index 971c9fc5604..3a422d9e050 100644 --- a/queue-5.8/series +++ b/queue-5.8/series @@ -461,3 +461,4 @@ io_uring-fix-null-pointer-dereference-in-loop_rw_iter.patch io_uring-hold-ctx-reference-around-task_work-queue-execute.patch io_uring-add-missing-req_f_comp_locked-for-nested-requests.patch io_uring-enable-lookup-of-links-holding-inflight-files.patch +task_work-only-grab-task-signal-lock-when-needed.patch diff --git a/queue-5.8/task_work-only-grab-task-signal-lock-when-needed.patch b/queue-5.8/task_work-only-grab-task-signal-lock-when-needed.patch new file mode 100644 index 00000000000..a09d2a55ebb --- /dev/null +++ b/queue-5.8/task_work-only-grab-task-signal-lock-when-needed.patch @@ -0,0 +1,78 @@ +From ebf0d100df0731901c16632f78d78d35f4123bc4 Mon Sep 17 00:00:00 2001 +From: Jens Axboe +Date: Thu, 13 Aug 2020 09:01:38 -0600 +Subject: task_work: only grab task signal lock when needed + +From: Jens Axboe + +commit ebf0d100df0731901c16632f78d78d35f4123bc4 upstream. + +If JOBCTL_TASK_WORK is already set on the targeted task, then we need +not go through {lock,unlock}_task_sighand() to set it again and queue +a signal wakeup. This is safe as we're checking it _after_ adding the +new task_work with cmpxchg(). + +The ordering is as follows: + +task_work_add() get_signal() +-------------------------------------------------------------- +STORE(task->task_works, new_work); STORE(task->jobctl); +mb(); mb(); +LOAD(task->jobctl); LOAD(task->task_works); + +This speeds up TWA_SIGNAL handling quite a bit, which is important now +that io_uring is relying on it for all task_work deliveries. + +Cc: Peter Zijlstra +Cc: Jann Horn +Acked-by: Oleg Nesterov +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman + +--- + kernel/signal.c | 16 +++++++++++++++- + kernel/task_work.c | 8 +++++++- + 2 files changed, 22 insertions(+), 2 deletions(-) + +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -2541,7 +2541,21 @@ bool get_signal(struct ksignal *ksig) + + relock: + spin_lock_irq(&sighand->siglock); +- current->jobctl &= ~JOBCTL_TASK_WORK; ++ /* ++ * Make sure we can safely read ->jobctl() in task_work add. As Oleg ++ * states: ++ * ++ * It pairs with mb (implied by cmpxchg) before READ_ONCE. So we ++ * roughly have ++ * ++ * task_work_add: get_signal: ++ * STORE(task->task_works, new_work); STORE(task->jobctl); ++ * mb(); mb(); ++ * LOAD(task->jobctl); LOAD(task->task_works); ++ * ++ * and we can rely on STORE-MB-LOAD [ in task_work_add]. ++ */ ++ smp_store_mb(current->jobctl, current->jobctl & ~JOBCTL_TASK_WORK); + if (unlikely(current->task_works)) { + spin_unlock_irq(&sighand->siglock); + task_work_run(); +--- a/kernel/task_work.c ++++ b/kernel/task_work.c +@@ -42,7 +42,13 @@ task_work_add(struct task_struct *task, + set_notify_resume(task); + break; + case TWA_SIGNAL: +- if (lock_task_sighand(task, &flags)) { ++ /* ++ * Only grab the sighand lock if we don't already have some ++ * task_work pending. This pairs with the smp_store_mb() ++ * in get_signal(), see comment there. ++ */ ++ if (!(READ_ONCE(task->jobctl) & JOBCTL_TASK_WORK) && ++ lock_task_sighand(task, &flags)) { + task->jobctl |= JOBCTL_TASK_WORK; + signal_wake_up(task, 0); + unlock_task_sighand(task, &flags);