]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 27 May 2024 18:18:50 +0000 (20:18 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 27 May 2024 18:18:50 +0000 (20:18 +0200)
added patches:
ftrace-fix-possible-use-after-free-issue-in-ftrace_location.patch
x86-tsc-trust-initial-offset-in-architectural-tsc-adjust-msrs.patch

queue-6.1/ftrace-fix-possible-use-after-free-issue-in-ftrace_location.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/x86-tsc-trust-initial-offset-in-architectural-tsc-adjust-msrs.patch [new file with mode: 0644]

diff --git a/queue-6.1/ftrace-fix-possible-use-after-free-issue-in-ftrace_location.patch b/queue-6.1/ftrace-fix-possible-use-after-free-issue-in-ftrace_location.patch
new file mode 100644 (file)
index 0000000..3a6a1a1
--- /dev/null
@@ -0,0 +1,170 @@
+From e60b613df8b6253def41215402f72986fee3fc8d Mon Sep 17 00:00:00 2001
+From: Zheng Yejian <zhengyejian1@huawei.com>
+Date: Fri, 10 May 2024 03:28:59 +0800
+Subject: ftrace: Fix possible use-after-free issue in ftrace_location()
+
+From: Zheng Yejian <zhengyejian1@huawei.com>
+
+commit e60b613df8b6253def41215402f72986fee3fc8d upstream.
+
+KASAN reports a bug:
+
+  BUG: KASAN: use-after-free in ftrace_location+0x90/0x120
+  Read of size 8 at addr ffff888141d40010 by task insmod/424
+  CPU: 8 PID: 424 Comm: insmod Tainted: G        W          6.9.0-rc2+
+  [...]
+  Call Trace:
+   <TASK>
+   dump_stack_lvl+0x68/0xa0
+   print_report+0xcf/0x610
+   kasan_report+0xb5/0xe0
+   ftrace_location+0x90/0x120
+   register_kprobe+0x14b/0xa40
+   kprobe_init+0x2d/0xff0 [kprobe_example]
+   do_one_initcall+0x8f/0x2d0
+   do_init_module+0x13a/0x3c0
+   load_module+0x3082/0x33d0
+   init_module_from_file+0xd2/0x130
+   __x64_sys_finit_module+0x306/0x440
+   do_syscall_64+0x68/0x140
+   entry_SYSCALL_64_after_hwframe+0x71/0x79
+
+The root cause is that, in lookup_rec(), ftrace record of some address
+is being searched in ftrace pages of some module, but those ftrace pages
+at the same time is being freed in ftrace_release_mod() as the
+corresponding module is being deleted:
+
+           CPU1                       |      CPU2
+  register_kprobes() {                | delete_module() {
+    check_kprobe_address_safe() {     |
+      arch_check_ftrace_location() {  |
+        ftrace_location() {           |
+          lookup_rec() // USE!        |   ftrace_release_mod() // Free!
+
+To fix this issue:
+  1. Hold rcu lock as accessing ftrace pages in ftrace_location_range();
+  2. Use ftrace_location_range() instead of lookup_rec() in
+     ftrace_location();
+  3. Call synchronize_rcu() before freeing any ftrace pages both in
+     ftrace_process_locs()/ftrace_release_mod()/ftrace_free_mem().
+
+Link: https://lore.kernel.org/linux-trace-kernel/20240509192859.1273558-1-zhengyejian1@huawei.com
+
+Cc: stable@vger.kernel.org
+Cc: <mhiramat@kernel.org>
+Cc: <mark.rutland@arm.com>
+Cc: <mathieu.desnoyers@efficios.com>
+Fixes: ae6aa16fdc16 ("kprobes: introduce ftrace based optimization")
+Suggested-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Zheng Yejian <zhengyejian1@huawei.com>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/ftrace.c |   39 +++++++++++++++++++++++----------------
+ 1 file changed, 23 insertions(+), 16 deletions(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -1565,12 +1565,15 @@ static struct dyn_ftrace *lookup_rec(uns
+ unsigned long ftrace_location_range(unsigned long start, unsigned long end)
+ {
+       struct dyn_ftrace *rec;
++      unsigned long ip = 0;
++      rcu_read_lock();
+       rec = lookup_rec(start, end);
+       if (rec)
+-              return rec->ip;
++              ip = rec->ip;
++      rcu_read_unlock();
+-      return 0;
++      return ip;
+ }
+ /**
+@@ -1583,25 +1586,22 @@ unsigned long ftrace_location_range(unsi
+  */
+ unsigned long ftrace_location(unsigned long ip)
+ {
+-      struct dyn_ftrace *rec;
++      unsigned long loc;
+       unsigned long offset;
+       unsigned long size;
+-      rec = lookup_rec(ip, ip);
+-      if (!rec) {
++      loc = ftrace_location_range(ip, ip);
++      if (!loc) {
+               if (!kallsyms_lookup_size_offset(ip, &size, &offset))
+                       goto out;
+               /* map sym+0 to __fentry__ */
+               if (!offset)
+-                      rec = lookup_rec(ip, ip + size - 1);
++                      loc = ftrace_location_range(ip, ip + size - 1);
+       }
+-      if (rec)
+-              return rec->ip;
+-
+ out:
+-      return 0;
++      return loc;
+ }
+ /**
+@@ -6784,6 +6784,8 @@ static int ftrace_process_locs(struct mo
+       /* We should have used all pages unless we skipped some */
+       if (pg_unuse) {
+               WARN_ON(!skipped);
++              /* Need to synchronize with ftrace_location_range() */
++              synchronize_rcu();
+               ftrace_free_pages(pg_unuse);
+       }
+       return ret;
+@@ -6998,6 +7000,9 @@ void ftrace_release_mod(struct module *m
+  out_unlock:
+       mutex_unlock(&ftrace_lock);
++      /* Need to synchronize with ftrace_location_range() */
++      if (tmp_page)
++              synchronize_rcu();
+       for (pg = tmp_page; pg; pg = tmp_page) {
+               /* Needs to be called outside of ftrace_lock */
+@@ -7332,6 +7337,7 @@ void ftrace_free_mem(struct module *mod,
+       unsigned long start = (unsigned long)(start_ptr);
+       unsigned long end = (unsigned long)(end_ptr);
+       struct ftrace_page **last_pg = &ftrace_pages_start;
++      struct ftrace_page *tmp_page = NULL;
+       struct ftrace_page *pg;
+       struct dyn_ftrace *rec;
+       struct dyn_ftrace key;
+@@ -7375,12 +7381,8 @@ void ftrace_free_mem(struct module *mod,
+               ftrace_update_tot_cnt--;
+               if (!pg->index) {
+                       *last_pg = pg->next;
+-                      if (pg->records) {
+-                              free_pages((unsigned long)pg->records, pg->order);
+-                              ftrace_number_of_pages -= 1 << pg->order;
+-                      }
+-                      ftrace_number_of_groups--;
+-                      kfree(pg);
++                      pg->next = tmp_page;
++                      tmp_page = pg;
+                       pg = container_of(last_pg, struct ftrace_page, next);
+                       if (!(*last_pg))
+                               ftrace_pages = pg;
+@@ -7397,6 +7399,11 @@ void ftrace_free_mem(struct module *mod,
+               clear_func_from_hashes(func);
+               kfree(func);
+       }
++      /* Need to synchronize with ftrace_location_range() */
++      if (tmp_page) {
++              synchronize_rcu();
++              ftrace_free_pages(tmp_page);
++      }
+ }
+ void __init ftrace_free_init_mem(void)
index 4defc59175eaf204f3ae7abd75aa7e197efb98ca..81d1e33f9a521b668a72868e7295ed08b53459ab 100644 (file)
@@ -1,3 +1,5 @@
+x86-tsc-trust-initial-offset-in-architectural-tsc-adjust-msrs.patch
+ftrace-fix-possible-use-after-free-issue-in-ftrace_location.patch
 tty-n_gsm-fix-possible-out-of-bounds-in-gsm0_receive.patch
 tty-n_gsm-fix-missing-receive-state-reset-after-mode-switch.patch
 speakup-fix-sizeof-vs-array_size-bug.patch
diff --git a/queue-6.1/x86-tsc-trust-initial-offset-in-architectural-tsc-adjust-msrs.patch b/queue-6.1/x86-tsc-trust-initial-offset-in-architectural-tsc-adjust-msrs.patch
new file mode 100644 (file)
index 0000000..9289773
--- /dev/null
@@ -0,0 +1,46 @@
+From 455f9075f14484f358b3c1d6845b4a438de198a7 Mon Sep 17 00:00:00 2001
+From: Daniel J Blueman <daniel@quora.org>
+Date: Fri, 19 Apr 2024 16:51:46 +0800
+Subject: x86/tsc: Trust initial offset in architectural TSC-adjust MSRs
+
+From: Daniel J Blueman <daniel@quora.org>
+
+commit 455f9075f14484f358b3c1d6845b4a438de198a7 upstream.
+
+When the BIOS configures the architectural TSC-adjust MSRs on secondary
+sockets to correct a constant inter-chassis offset, after Linux brings the
+cores online, the TSC sync check later resets the core-local MSR to 0,
+triggering HPET fallback and leading to performance loss.
+
+Fix this by unconditionally using the initial adjust values read from the
+MSRs. Trusting the initial offsets in this architectural mechanism is a
+better approach than special-casing workarounds for specific platforms.
+
+Signed-off-by: Daniel J Blueman <daniel@quora.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Steffen Persvold <sp@numascale.com>
+Reviewed-by: James Cleverdon <james.cleverdon.external@eviden.com>
+Reviewed-by: Dimitri Sivanich <sivanich@hpe.com>
+Reviewed-by: Prarit Bhargava <prarit@redhat.com>
+Link: https://lore.kernel.org/r/20240419085146.175665-1-daniel@quora.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/tsc_sync.c |    6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/arch/x86/kernel/tsc_sync.c
++++ b/arch/x86/kernel/tsc_sync.c
+@@ -192,11 +192,9 @@ bool tsc_store_and_check_tsc_adjust(bool
+       cur->warned = false;
+       /*
+-       * If a non-zero TSC value for socket 0 may be valid then the default
+-       * adjusted value cannot assumed to be zero either.
++       * The default adjust value cannot be assumed to be zero on any socket.
+        */
+-      if (tsc_async_resets)
+-              cur->adjusted = bootval;
++      cur->adjusted = bootval;
+       /*
+        * Check whether this CPU is the first in a package to come up. In