From: Greg Kroah-Hartman Date: Sun, 8 Sep 2024 12:09:52 +0000 (+0200) Subject: 6.10-stable patches X-Git-Tag: v4.19.322~90 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=344fdb406076bc911b321fb972fdd754b2f21ea5;p=thirdparty%2Fkernel%2Fstable-queue.git 6.10-stable patches added patches: spi-rockchip-resolve-unbalanced-runtime-pm-system-pm-handling.patch tracing-avoid-possible-softlockup-in-tracing_iter_reset.patch tracing-osnoise-use-a-cpumask-to-know-what-threads-are-kthreads.patch tracing-timerlat-add-interface_lock-around-clearing-of-kthread-in-stop_kthread.patch tracing-timerlat-only-clear-timer-if-a-kthread-exists.patch --- diff --git a/queue-6.10/series b/queue-6.10/series index 61110ae4e07..4cceb06307d 100644 --- a/queue-6.10/series +++ b/queue-6.10/series @@ -58,3 +58,8 @@ kexec_file-fix-elfcorehdr-digest-exclusion-when-config_crash_hotplug-y.patch mm-vmalloc-ensure-vmap_block-is-initialised-before-adding-to-queue.patch mm-slub-add-check-for-s-flags-in-the-alloc_tagging_slab_free_hook.patch revert-mm-skip-cma-pages-when-they-are-not-available.patch +spi-rockchip-resolve-unbalanced-runtime-pm-system-pm-handling.patch +tracing-osnoise-use-a-cpumask-to-know-what-threads-are-kthreads.patch +tracing-timerlat-only-clear-timer-if-a-kthread-exists.patch +tracing-avoid-possible-softlockup-in-tracing_iter_reset.patch +tracing-timerlat-add-interface_lock-around-clearing-of-kthread-in-stop_kthread.patch diff --git a/queue-6.10/spi-rockchip-resolve-unbalanced-runtime-pm-system-pm-handling.patch b/queue-6.10/spi-rockchip-resolve-unbalanced-runtime-pm-system-pm-handling.patch new file mode 100644 index 00000000000..be283d2887f --- /dev/null +++ b/queue-6.10/spi-rockchip-resolve-unbalanced-runtime-pm-system-pm-handling.patch @@ -0,0 +1,88 @@ +From be721b451affbecc4ba4eaac3b71cdbdcade1b1b Mon Sep 17 00:00:00 2001 +From: Brian Norris +Date: Tue, 27 Aug 2024 10:11:16 -0700 +Subject: spi: rockchip: Resolve unbalanced runtime PM / system PM handling +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Brian Norris + +commit be721b451affbecc4ba4eaac3b71cdbdcade1b1b upstream. + +Commit e882575efc77 ("spi: rockchip: Suspend and resume the bus during +NOIRQ_SYSTEM_SLEEP_PM ops") stopped respecting runtime PM status and +simply disabled clocks unconditionally when suspending the system. This +causes problems when the device is already runtime suspended when we go +to sleep -- in which case we double-disable clocks and produce a +WARNing. + +Switch back to pm_runtime_force_{suspend,resume}(), because that still +seems like the right thing to do, and the aforementioned commit makes no +explanation why it stopped using it. + +Also, refactor some of the resume() error handling, because it's not +actually a good idea to re-disable clocks on failure. + +Fixes: e882575efc77 ("spi: rockchip: Suspend and resume the bus during NOIRQ_SYSTEM_SLEEP_PM ops") +Cc: stable@vger.kernel.org +Reported-by: Ondřej Jirman +Closes: https://lore.kernel.org/lkml/20220621154218.sau54jeij4bunf56@core/ +Signed-off-by: Brian Norris +Link: https://patch.msgid.link/20240827171126.1115748-1-briannorris@chromium.org +Signed-off-by: Mark Brown +Signed-off-by: Greg Kroah-Hartman +--- + drivers/spi/spi-rockchip.c | 23 +++++++---------------- + 1 file changed, 7 insertions(+), 16 deletions(-) + +--- a/drivers/spi/spi-rockchip.c ++++ b/drivers/spi/spi-rockchip.c +@@ -945,14 +945,16 @@ static int rockchip_spi_suspend(struct d + { + int ret; + struct spi_controller *ctlr = dev_get_drvdata(dev); +- struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); + + ret = spi_controller_suspend(ctlr); + if (ret < 0) + return ret; + +- clk_disable_unprepare(rs->spiclk); +- clk_disable_unprepare(rs->apb_pclk); ++ ret = pm_runtime_force_suspend(dev); ++ if (ret < 0) { ++ spi_controller_resume(ctlr); ++ return ret; ++ } + + pinctrl_pm_select_sleep_state(dev); + +@@ -963,25 +965,14 @@ static int rockchip_spi_resume(struct de + { + int ret; + struct spi_controller *ctlr = dev_get_drvdata(dev); +- struct rockchip_spi *rs = spi_controller_get_devdata(ctlr); + + pinctrl_pm_select_default_state(dev); + +- ret = clk_prepare_enable(rs->apb_pclk); ++ ret = pm_runtime_force_resume(dev); + if (ret < 0) + return ret; + +- ret = clk_prepare_enable(rs->spiclk); +- if (ret < 0) +- clk_disable_unprepare(rs->apb_pclk); +- +- ret = spi_controller_resume(ctlr); +- if (ret < 0) { +- clk_disable_unprepare(rs->spiclk); +- clk_disable_unprepare(rs->apb_pclk); +- } +- +- return 0; ++ return spi_controller_resume(ctlr); + } + #endif /* CONFIG_PM_SLEEP */ + diff --git a/queue-6.10/tracing-avoid-possible-softlockup-in-tracing_iter_reset.patch b/queue-6.10/tracing-avoid-possible-softlockup-in-tracing_iter_reset.patch new file mode 100644 index 00000000000..1be747d7014 --- /dev/null +++ b/queue-6.10/tracing-avoid-possible-softlockup-in-tracing_iter_reset.patch @@ -0,0 +1,40 @@ +From 49aa8a1f4d6800721c7971ed383078257f12e8f9 Mon Sep 17 00:00:00 2001 +From: Zheng Yejian +Date: Tue, 27 Aug 2024 20:46:54 +0800 +Subject: tracing: Avoid possible softlockup in tracing_iter_reset() + +From: Zheng Yejian + +commit 49aa8a1f4d6800721c7971ed383078257f12e8f9 upstream. + +In __tracing_open(), when max latency tracers took place on the cpu, +the time start of its buffer would be updated, then event entries with +timestamps being earlier than start of the buffer would be skipped +(see tracing_iter_reset()). + +Softlockup will occur if the kernel is non-preemptible and too many +entries were skipped in the loop that reset every cpu buffer, so add +cond_resched() to avoid it. + +Cc: stable@vger.kernel.org +Fixes: 2f26ebd549b9a ("tracing: use timestamp to determine start of latency traces") +Link: https://lore.kernel.org/20240827124654.3817443-1-zhengyejian@huaweicloud.com +Suggested-by: Steven Rostedt +Signed-off-by: Zheng Yejian +Signed-off-by: Steven Rostedt (Google) +Signed-off-by: Greg Kroah-Hartman +--- + kernel/trace/trace.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -3958,6 +3958,8 @@ void tracing_iter_reset(struct trace_ite + break; + entries++; + ring_buffer_iter_advance(buf_iter); ++ /* This could be a big loop */ ++ cond_resched(); + } + + per_cpu_ptr(iter->array_buffer->data, cpu)->skipped_entries = entries; diff --git a/queue-6.10/tracing-osnoise-use-a-cpumask-to-know-what-threads-are-kthreads.patch b/queue-6.10/tracing-osnoise-use-a-cpumask-to-know-what-threads-are-kthreads.patch new file mode 100644 index 00000000000..010b6d57c1e --- /dev/null +++ b/queue-6.10/tracing-osnoise-use-a-cpumask-to-know-what-threads-are-kthreads.patch @@ -0,0 +1,185 @@ +From 177e1cc2f41235c145041eed03ef5bab18f32328 Mon Sep 17 00:00:00 2001 +From: Steven Rostedt +Date: Wed, 4 Sep 2024 10:34:28 -0400 +Subject: tracing/osnoise: Use a cpumask to know what threads are kthreads + +From: Steven Rostedt + +commit 177e1cc2f41235c145041eed03ef5bab18f32328 upstream. + +The start_kthread() and stop_thread() code was not always called with the +interface_lock held. This means that the kthread variable could be +unexpectedly changed causing the kthread_stop() to be called on it when it +should not have been, leading to: + + while true; do + rtla timerlat top -u -q & PID=$!; + sleep 5; + kill -INT $PID; + sleep 0.001; + kill -TERM $PID; + wait $PID; + done + +Causing the following OOPS: + + Oops: general protection fault, probably for non-canonical address 0xdffffc0000000002: 0000 [#1] PREEMPT SMP KASAN PTI + KASAN: null-ptr-deref in range [0x0000000000000010-0x0000000000000017] + CPU: 5 UID: 0 PID: 885 Comm: timerlatu/5 Not tainted 6.11.0-rc4-test-00002-gbc754cc76d1b-dirty #125 a533010b71dab205ad2f507188ce8c82203b0254 + Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2 04/01/2014 + RIP: 0010:hrtimer_active+0x58/0x300 + Code: 48 c1 ee 03 41 54 48 01 d1 48 01 d6 55 53 48 83 ec 20 80 39 00 0f 85 30 02 00 00 49 8b 6f 30 4c 8d 75 10 4c 89 f0 48 c1 e8 03 <0f> b6 3c 10 4c 89 f0 83 e0 07 83 c0 03 40 38 f8 7c 09 40 84 ff 0f + RSP: 0018:ffff88811d97f940 EFLAGS: 00010202 + RAX: 0000000000000002 RBX: ffff88823c6b5b28 RCX: ffffed10478d6b6b + RDX: dffffc0000000000 RSI: ffffed10478d6b6c RDI: ffff88823c6b5b28 + RBP: 0000000000000000 R08: ffff88823c6b5b58 R09: ffff88823c6b5b60 + R10: ffff88811d97f957 R11: 0000000000000010 R12: 00000000000a801d + R13: ffff88810d8b35d8 R14: 0000000000000010 R15: ffff88823c6b5b28 + FS: 0000000000000000(0000) GS:ffff88823c680000(0000) knlGS:0000000000000000 + CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 + CR2: 0000561858ad7258 CR3: 000000007729e001 CR4: 0000000000170ef0 + Call Trace: + + ? die_addr+0x40/0xa0 + ? exc_general_protection+0x154/0x230 + ? asm_exc_general_protection+0x26/0x30 + ? hrtimer_active+0x58/0x300 + ? __pfx_mutex_lock+0x10/0x10 + ? __pfx_locks_remove_file+0x10/0x10 + hrtimer_cancel+0x15/0x40 + timerlat_fd_release+0x8e/0x1f0 + ? security_file_release+0x43/0x80 + __fput+0x372/0xb10 + task_work_run+0x11e/0x1f0 + ? _raw_spin_lock+0x85/0xe0 + ? __pfx_task_work_run+0x10/0x10 + ? poison_slab_object+0x109/0x170 + ? do_exit+0x7a0/0x24b0 + do_exit+0x7bd/0x24b0 + ? __pfx_migrate_enable+0x10/0x10 + ? __pfx_do_exit+0x10/0x10 + ? __pfx_read_tsc+0x10/0x10 + ? ktime_get+0x64/0x140 + ? _raw_spin_lock_irq+0x86/0xe0 + do_group_exit+0xb0/0x220 + get_signal+0x17ba/0x1b50 + ? vfs_read+0x179/0xa40 + ? timerlat_fd_read+0x30b/0x9d0 + ? __pfx_get_signal+0x10/0x10 + ? __pfx_timerlat_fd_read+0x10/0x10 + arch_do_signal_or_restart+0x8c/0x570 + ? __pfx_arch_do_signal_or_restart+0x10/0x10 + ? vfs_read+0x179/0xa40 + ? ksys_read+0xfe/0x1d0 + ? __pfx_ksys_read+0x10/0x10 + syscall_exit_to_user_mode+0xbc/0x130 + do_syscall_64+0x74/0x110 + ? __pfx___rseq_handle_notify_resume+0x10/0x10 + ? __pfx_ksys_read+0x10/0x10 + ? fpregs_restore_userregs+0xdb/0x1e0 + ? fpregs_restore_userregs+0xdb/0x1e0 + ? syscall_exit_to_user_mode+0x116/0x130 + ? do_syscall_64+0x74/0x110 + ? do_syscall_64+0x74/0x110 + ? do_syscall_64+0x74/0x110 + entry_SYSCALL_64_after_hwframe+0x71/0x79 + RIP: 0033:0x7ff0070eca9c + Code: Unable to access opcode bytes at 0x7ff0070eca72. + RSP: 002b:00007ff006dff8c0 EFLAGS: 00000246 ORIG_RAX: 0000000000000000 + RAX: 0000000000000000 RBX: 0000000000000005 RCX: 00007ff0070eca9c + RDX: 0000000000000400 RSI: 00007ff006dff9a0 RDI: 0000000000000003 + RBP: 00007ff006dffde0 R08: 0000000000000000 R09: 00007ff000000ba0 + R10: 00007ff007004b08 R11: 0000000000000246 R12: 0000000000000003 + R13: 00007ff006dff9a0 R14: 0000000000000007 R15: 0000000000000008 + + Modules linked in: snd_hda_intel snd_intel_dspcfg snd_intel_sdw_acpi snd_hda_codec snd_hwdep snd_hda_core + ---[ end trace 0000000000000000 ]--- + +This is because it would mistakenly call kthread_stop() on a user space +thread making it "exit" before it actually exits. + +Since kthreads are created based on global behavior, use a cpumask to know +when kthreads are running and that they need to be shutdown before +proceeding to do new work. + +Link: https://lore.kernel.org/all/20240820130001.124768-1-tglozar@redhat.com/ + +This was debugged by using the persistent ring buffer: + +Link: https://lore.kernel.org/all/20240823013902.135036960@goodmis.org/ + +Note, locking was originally used to fix this, but that proved to cause too +many deadlocks to work around: + + https://lore.kernel.org/linux-trace-kernel/20240823102816.5e55753b@gandalf.local.home/ + +Cc: stable@vger.kernel.org +Cc: Masami Hiramatsu +Cc: Mathieu Desnoyers +Cc: "Luis Claudio R. Goncalves" +Link: https://lore.kernel.org/20240904103428.08efdf4c@gandalf.local.home +Fixes: e88ed227f639e ("tracing/timerlat: Add user-space interface") +Reported-by: Tomas Glozar +Signed-off-by: Steven Rostedt (Google) +Signed-off-by: Greg Kroah-Hartman +--- + kernel/trace/trace_osnoise.c | 18 +++++++++++++++--- + 1 file changed, 15 insertions(+), 3 deletions(-) + +--- a/kernel/trace/trace_osnoise.c ++++ b/kernel/trace/trace_osnoise.c +@@ -1612,6 +1612,7 @@ out: + + static struct cpumask osnoise_cpumask; + static struct cpumask save_cpumask; ++static struct cpumask kthread_cpumask; + + /* + * osnoise_sleep - sleep until the next period +@@ -1675,6 +1676,7 @@ static inline int osnoise_migration_pend + */ + mutex_lock(&interface_lock); + this_cpu_osn_var()->kthread = NULL; ++ cpumask_clear_cpu(smp_processor_id(), &kthread_cpumask); + mutex_unlock(&interface_lock); + + return 1; +@@ -1947,9 +1949,10 @@ static void stop_kthread(unsigned int cp + + kthread = per_cpu(per_cpu_osnoise_var, cpu).kthread; + if (kthread) { +- if (test_bit(OSN_WORKLOAD, &osnoise_options)) { ++ if (cpumask_test_and_clear_cpu(cpu, &kthread_cpumask) && ++ !WARN_ON(!test_bit(OSN_WORKLOAD, &osnoise_options))) { + kthread_stop(kthread); +- } else { ++ } else if (!WARN_ON(test_bit(OSN_WORKLOAD, &osnoise_options))) { + /* + * This is a user thread waiting on the timerlat_fd. We need + * to close all users, and the best way to guarantee this is +@@ -2021,6 +2024,7 @@ static int start_kthread(unsigned int cp + } + + per_cpu(per_cpu_osnoise_var, cpu).kthread = kthread; ++ cpumask_set_cpu(cpu, &kthread_cpumask); + + return 0; + } +@@ -2048,8 +2052,16 @@ static int start_per_cpu_kthreads(void) + */ + cpumask_and(current_mask, cpu_online_mask, &osnoise_cpumask); + +- for_each_possible_cpu(cpu) ++ for_each_possible_cpu(cpu) { ++ if (cpumask_test_and_clear_cpu(cpu, &kthread_cpumask)) { ++ struct task_struct *kthread; ++ ++ kthread = per_cpu(per_cpu_osnoise_var, cpu).kthread; ++ if (!WARN_ON(!kthread)) ++ kthread_stop(kthread); ++ } + per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL; ++ } + + for_each_cpu(cpu, current_mask) { + retval = start_kthread(cpu); diff --git a/queue-6.10/tracing-timerlat-add-interface_lock-around-clearing-of-kthread-in-stop_kthread.patch b/queue-6.10/tracing-timerlat-add-interface_lock-around-clearing-of-kthread-in-stop_kthread.patch new file mode 100644 index 00000000000..6526cf83093 --- /dev/null +++ b/queue-6.10/tracing-timerlat-add-interface_lock-around-clearing-of-kthread-in-stop_kthread.patch @@ -0,0 +1,88 @@ +From 5bfbcd1ee57b607fd29e4645c7f350dd385dd9ad Mon Sep 17 00:00:00 2001 +From: Steven Rostedt +Date: Thu, 5 Sep 2024 11:33:59 -0400 +Subject: tracing/timerlat: Add interface_lock around clearing of kthread in stop_kthread() + +From: Steven Rostedt + +commit 5bfbcd1ee57b607fd29e4645c7f350dd385dd9ad upstream. + +The timerlat interface will get and put the task that is part of the +"kthread" field of the osn_var to keep it around until all references are +released. But here's a race in the "stop_kthread()" code that will call +put_task_struct() on the kthread if it is not a kernel thread. This can +race with the releasing of the references to that task struct and the +put_task_struct() can be called twice when it should have been called just +once. + +Take the interface_lock() in stop_kthread() to synchronize this change. +But to do so, the function stop_per_cpu_kthreads() needs to change the +loop from for_each_online_cpu() to for_each_possible_cpu() and remove the +cpu_read_lock(), as the interface_lock can not be taken while the cpu +locks are held. The only side effect of this change is that it may do some +extra work, as the per_cpu variables of the offline CPUs would not be set +anyway, and would simply be skipped in the loop. + +Remove unneeded "return;" in stop_kthread(). + +Cc: stable@vger.kernel.org +Cc: Masami Hiramatsu +Cc: Mathieu Desnoyers +Cc: Tomas Glozar +Cc: John Kacur +Cc: "Luis Claudio R. Goncalves" +Link: https://lore.kernel.org/20240905113359.2b934242@gandalf.local.home +Fixes: e88ed227f639e ("tracing/timerlat: Add user-space interface") +Signed-off-by: Steven Rostedt (Google) +Signed-off-by: Greg Kroah-Hartman +--- + kernel/trace/trace_osnoise.c | 13 ++++++------- + 1 file changed, 6 insertions(+), 7 deletions(-) + +--- a/kernel/trace/trace_osnoise.c ++++ b/kernel/trace/trace_osnoise.c +@@ -1953,8 +1953,12 @@ static void stop_kthread(unsigned int cp + { + struct task_struct *kthread; + ++ mutex_lock(&interface_lock); + kthread = per_cpu(per_cpu_osnoise_var, cpu).kthread; + if (kthread) { ++ per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL; ++ mutex_unlock(&interface_lock); ++ + if (cpumask_test_and_clear_cpu(cpu, &kthread_cpumask) && + !WARN_ON(!test_bit(OSN_WORKLOAD, &osnoise_options))) { + kthread_stop(kthread); +@@ -1967,8 +1971,8 @@ static void stop_kthread(unsigned int cp + kill_pid(kthread->thread_pid, SIGKILL, 1); + put_task_struct(kthread); + } +- per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL; + } else { ++ mutex_unlock(&interface_lock); + /* if no workload, just return */ + if (!test_bit(OSN_WORKLOAD, &osnoise_options)) { + /* +@@ -1976,7 +1980,6 @@ static void stop_kthread(unsigned int cp + */ + per_cpu(per_cpu_osnoise_var, cpu).sampling = false; + barrier(); +- return; + } + } + } +@@ -1991,12 +1994,8 @@ static void stop_per_cpu_kthreads(void) + { + int cpu; + +- cpus_read_lock(); +- +- for_each_online_cpu(cpu) ++ for_each_possible_cpu(cpu) + stop_kthread(cpu); +- +- cpus_read_unlock(); + } + + /* diff --git a/queue-6.10/tracing-timerlat-only-clear-timer-if-a-kthread-exists.patch b/queue-6.10/tracing-timerlat-only-clear-timer-if-a-kthread-exists.patch new file mode 100644 index 00000000000..28e78e658a2 --- /dev/null +++ b/queue-6.10/tracing-timerlat-only-clear-timer-if-a-kthread-exists.patch @@ -0,0 +1,95 @@ +From e6a53481da292d970d1edf0d8831121d1c5e2f0d Mon Sep 17 00:00:00 2001 +From: Steven Rostedt +Date: Thu, 5 Sep 2024 08:53:30 -0400 +Subject: tracing/timerlat: Only clear timer if a kthread exists + +From: Steven Rostedt + +commit e6a53481da292d970d1edf0d8831121d1c5e2f0d upstream. + +The timerlat tracer can use user space threads to check for osnoise and +timer latency. If the program using this is killed via a SIGTERM, the +threads are shutdown one at a time and another tracing instance can start +up resetting the threads before they are fully closed. That causes the +hrtimer assigned to the kthread to be shutdown and freed twice when the +dying thread finally closes the file descriptors, causing a use-after-free +bug. + +Only cancel the hrtimer if the associated thread is still around. Also add +the interface_lock around the resetting of the tlat_var->kthread. + +Note, this is just a quick fix that can be backported to stable. A real +fix is to have a better synchronization between the shutdown of old +threads and the starting of new ones. + +Link: https://lore.kernel.org/all/20240820130001.124768-1-tglozar@redhat.com/ + +Cc: stable@vger.kernel.org +Cc: Masami Hiramatsu +Cc: Mathieu Desnoyers +Cc: "Luis Claudio R. Goncalves" +Link: https://lore.kernel.org/20240905085330.45985730@gandalf.local.home +Fixes: e88ed227f639e ("tracing/timerlat: Add user-space interface") +Reported-by: Tomas Glozar +Signed-off-by: Steven Rostedt (Google) +Signed-off-by: Greg Kroah-Hartman +--- + kernel/trace/trace_osnoise.c | 19 +++++++++++++------ + 1 file changed, 13 insertions(+), 6 deletions(-) + +--- a/kernel/trace/trace_osnoise.c ++++ b/kernel/trace/trace_osnoise.c +@@ -253,20 +253,31 @@ static inline struct timerlat_variables + } + + /* ++ * Protect the interface. ++ */ ++static struct mutex interface_lock; ++ ++/* + * tlat_var_reset - Reset the values of the given timerlat_variables + */ + static inline void tlat_var_reset(void) + { + struct timerlat_variables *tlat_var; + int cpu; ++ ++ /* Synchronize with the timerlat interfaces */ ++ mutex_lock(&interface_lock); + /* + * So far, all the values are initialized as 0, so + * zeroing the structure is perfect. + */ + for_each_cpu(cpu, cpu_online_mask) { + tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu); ++ if (tlat_var->kthread) ++ hrtimer_cancel(&tlat_var->timer); + memset(tlat_var, 0, sizeof(*tlat_var)); + } ++ mutex_unlock(&interface_lock); + } + #else /* CONFIG_TIMERLAT_TRACER */ + #define tlat_var_reset() do {} while (0) +@@ -332,11 +343,6 @@ struct timerlat_sample { + #endif + + /* +- * Protect the interface. +- */ +-static struct mutex interface_lock; +- +-/* + * Tracer data. + */ + static struct osnoise_data { +@@ -2591,7 +2597,8 @@ static int timerlat_fd_release(struct in + osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu); + tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu); + +- hrtimer_cancel(&tlat_var->timer); ++ if (tlat_var->kthread) ++ hrtimer_cancel(&tlat_var->timer); + memset(tlat_var, 0, sizeof(*tlat_var)); + + osn_var->sampling = 0;