From: Greg Kroah-Hartman Date: Sun, 16 Oct 2022 20:09:44 +0000 (+0200) Subject: 5.4-stable patches X-Git-Tag: v5.4.219~55 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=ffe647c7d0c882932147878096ea9ff8d8d533b4;p=thirdparty%2Fkernel%2Fstable-queue.git 5.4-stable patches added patches: tracing-disable-interrupt-or-preemption-before-acquiring-arch_spinlock_t.patch --- diff --git a/queue-5.4/series b/queue-5.4/series index a843d98ec23..e9050946956 100644 --- a/queue-5.4/series +++ b/queue-5.4/series @@ -54,3 +54,4 @@ kvm-nvmx-unconditionally-purge-queued-injected-events-on-nested-exit.patch kvm-vmx-drop-bits-31-16-when-shoving-exception-error-code-into-vmcs.patch drm-nouveau-fix-a-use-after-free-in-nouveau_gem_prime_import_sg_table.patch selinux-use-grep-e-instead-of-egrep.patch +tracing-disable-interrupt-or-preemption-before-acquiring-arch_spinlock_t.patch diff --git a/queue-5.4/tracing-disable-interrupt-or-preemption-before-acquiring-arch_spinlock_t.patch b/queue-5.4/tracing-disable-interrupt-or-preemption-before-acquiring-arch_spinlock_t.patch new file mode 100644 index 00000000000..63b5d1f644a --- /dev/null +++ b/queue-5.4/tracing-disable-interrupt-or-preemption-before-acquiring-arch_spinlock_t.patch @@ -0,0 +1,157 @@ +From c0a581d7126c0bbc96163276f585fd7b4e4d8d0e Mon Sep 17 00:00:00 2001 +From: Waiman Long +Date: Thu, 22 Sep 2022 10:56:22 -0400 +Subject: tracing: Disable interrupt or preemption before acquiring arch_spinlock_t + +From: Waiman Long + +commit c0a581d7126c0bbc96163276f585fd7b4e4d8d0e upstream. + +It was found that some tracing functions in kernel/trace/trace.c acquire +an arch_spinlock_t with preemption and irqs enabled. An example is the +tracing_saved_cmdlines_size_read() function which intermittently causes +a "BUG: using smp_processor_id() in preemptible" warning when the LTP +read_all_proc test is run. + +That can be problematic in case preemption happens after acquiring the +lock. Add the necessary preemption or interrupt disabling code in the +appropriate places before acquiring an arch_spinlock_t. + +The convention here is to disable preemption for trace_cmdline_lock and +interupt for max_lock. + +Link: https://lkml.kernel.org/r/20220922145622.1744826-1-longman@redhat.com + +Cc: Peter Zijlstra +Cc: Ingo Molnar +Cc: Will Deacon +Cc: Boqun Feng +Cc: stable@vger.kernel.org +Fixes: a35873a0993b ("tracing: Add conditional snapshot") +Fixes: 939c7a4f04fc ("tracing: Introduce saved_cmdlines_size file") +Suggested-by: Steven Rostedt +Signed-off-by: Waiman Long +Signed-off-by: Steven Rostedt (Google) +Signed-off-by: Greg Kroah-Hartman +--- + kernel/trace/trace.c | 22 ++++++++++++++++++++++ + 1 file changed, 22 insertions(+) + +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -1015,12 +1015,14 @@ void *tracing_cond_snapshot_data(struct + { + void *cond_data = NULL; + ++ local_irq_disable(); + arch_spin_lock(&tr->max_lock); + + if (tr->cond_snapshot) + cond_data = tr->cond_snapshot->cond_data; + + arch_spin_unlock(&tr->max_lock); ++ local_irq_enable(); + + return cond_data; + } +@@ -1156,9 +1158,11 @@ int tracing_snapshot_cond_enable(struct + goto fail_unlock; + } + ++ local_irq_disable(); + arch_spin_lock(&tr->max_lock); + tr->cond_snapshot = cond_snapshot; + arch_spin_unlock(&tr->max_lock); ++ local_irq_enable(); + + mutex_unlock(&trace_types_lock); + +@@ -1185,6 +1189,7 @@ int tracing_snapshot_cond_disable(struct + { + int ret = 0; + ++ local_irq_disable(); + arch_spin_lock(&tr->max_lock); + + if (!tr->cond_snapshot) +@@ -1195,6 +1200,7 @@ int tracing_snapshot_cond_disable(struct + } + + arch_spin_unlock(&tr->max_lock); ++ local_irq_enable(); + + return ret; + } +@@ -1951,6 +1957,11 @@ static size_t tgid_map_max; + + #define SAVED_CMDLINES_DEFAULT 128 + #define NO_CMDLINE_MAP UINT_MAX ++/* ++ * Preemption must be disabled before acquiring trace_cmdline_lock. ++ * The various trace_arrays' max_lock must be acquired in a context ++ * where interrupt is disabled. ++ */ + static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED; + struct saved_cmdlines_buffer { + unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; +@@ -2163,6 +2174,9 @@ static int trace_save_cmdline(struct tas + * the lock, but we also don't want to spin + * nor do we want to disable interrupts, + * so if we miss here, then better luck next time. ++ * ++ * This is called within the scheduler and wake up, so interrupts ++ * had better been disabled and run queue lock been held. + */ + if (!arch_spin_trylock(&trace_cmdline_lock)) + return 0; +@@ -5199,9 +5213,11 @@ tracing_saved_cmdlines_size_read(struct + char buf[64]; + int r; + ++ preempt_disable(); + arch_spin_lock(&trace_cmdline_lock); + r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num); + arch_spin_unlock(&trace_cmdline_lock); ++ preempt_enable(); + + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); + } +@@ -5226,10 +5242,12 @@ static int tracing_resize_saved_cmdlines + return -ENOMEM; + } + ++ preempt_disable(); + arch_spin_lock(&trace_cmdline_lock); + savedcmd_temp = savedcmd; + savedcmd = s; + arch_spin_unlock(&trace_cmdline_lock); ++ preempt_enable(); + free_saved_cmdlines_buffer(savedcmd_temp); + + return 0; +@@ -5684,10 +5702,12 @@ static int tracing_set_tracer(struct tra + + #ifdef CONFIG_TRACER_SNAPSHOT + if (t->use_max_tr) { ++ local_irq_disable(); + arch_spin_lock(&tr->max_lock); + if (tr->cond_snapshot) + ret = -EBUSY; + arch_spin_unlock(&tr->max_lock); ++ local_irq_enable(); + if (ret) + goto out; + } +@@ -6767,10 +6787,12 @@ tracing_snapshot_write(struct file *filp + goto out; + } + ++ local_irq_disable(); + arch_spin_lock(&tr->max_lock); + if (tr->cond_snapshot) + ret = -EBUSY; + arch_spin_unlock(&tr->max_lock); ++ local_irq_enable(); + if (ret) + goto out; +