]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 18 Dec 2023 11:06:16 +0000 (12:06 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 18 Dec 2023 11:06:16 +0000 (12:06 +0100)
added patches:
powerpc-ftrace-create-a-dummy-stackframe-to-fix-stack-unwind.patch
powerpc-ftrace-fix-stack-teardown-in-ftrace_no_trace.patch

queue-4.14/powerpc-ftrace-create-a-dummy-stackframe-to-fix-stack-unwind.patch [new file with mode: 0644]
queue-4.14/powerpc-ftrace-fix-stack-teardown-in-ftrace_no_trace.patch [new file with mode: 0644]
queue-4.14/series

diff --git a/queue-4.14/powerpc-ftrace-create-a-dummy-stackframe-to-fix-stack-unwind.patch b/queue-4.14/powerpc-ftrace-create-a-dummy-stackframe-to-fix-stack-unwind.patch
new file mode 100644 (file)
index 0000000..d2cb3a7
--- /dev/null
@@ -0,0 +1,109 @@
+From stable+bounces-6799-greg=kroah.com@vger.kernel.org Fri Dec 15 12:15:27 2023
+From: Naveen N Rao <naveen@kernel.org>
+Date: Fri, 15 Dec 2023 16:41:21 +0530
+Subject: powerpc/ftrace: Create a dummy stackframe to fix stack unwind
+To: <stable@vger.kernel.org>
+Cc: Greg KH <gregkh@linuxfoundation.org>, Michael Ellerman <mpe@ellerman.id.au>
+Message-ID: <20231215111122.2361478-1-naveen@kernel.org>
+
+From: Naveen N Rao <naveen@kernel.org>
+
+commit 41a506ef71eb38d94fe133f565c87c3e06ccc072 upstream.
+
+With ppc64 -mprofile-kernel and ppc32 -pg, profiling instructions to
+call into ftrace are emitted right at function entry. The instruction
+sequence used is minimal to reduce overhead. Crucially, a stackframe is
+not created for the function being traced. This breaks stack unwinding
+since the function being traced does not have a stackframe for itself.
+As such, it never shows up in the backtrace:
+
+/sys/kernel/debug/tracing # echo 1 > /proc/sys/kernel/stack_tracer_enabled
+/sys/kernel/debug/tracing # cat stack_trace
+        Depth    Size   Location    (17 entries)
+        -----    ----   --------
+  0)     4144      32   ftrace_call+0x4/0x44
+  1)     4112     432   get_page_from_freelist+0x26c/0x1ad0
+  2)     3680     496   __alloc_pages+0x290/0x1280
+  3)     3184     336   __folio_alloc+0x34/0x90
+  4)     2848     176   vma_alloc_folio+0xd8/0x540
+  5)     2672     272   __handle_mm_fault+0x700/0x1cc0
+  6)     2400     208   handle_mm_fault+0xf0/0x3f0
+  7)     2192      80   ___do_page_fault+0x3e4/0xbe0
+  8)     2112     160   do_page_fault+0x30/0xc0
+  9)     1952     256   data_access_common_virt+0x210/0x220
+ 10)     1696     400   0xc00000000f16b100
+ 11)     1296     384   load_elf_binary+0x804/0x1b80
+ 12)      912     208   bprm_execve+0x2d8/0x7e0
+ 13)      704      64   do_execveat_common+0x1d0/0x2f0
+ 14)      640     160   sys_execve+0x54/0x70
+ 15)      480      64   system_call_exception+0x138/0x350
+ 16)      416     416   system_call_common+0x160/0x2c4
+
+Fix this by having ftrace create a dummy stackframe for the function
+being traced. With this, backtraces now capture the function being
+traced:
+
+/sys/kernel/debug/tracing # cat stack_trace
+        Depth    Size   Location    (17 entries)
+        -----    ----   --------
+  0)     3888      32   _raw_spin_trylock+0x8/0x70
+  1)     3856     576   get_page_from_freelist+0x26c/0x1ad0
+  2)     3280      64   __alloc_pages+0x290/0x1280
+  3)     3216     336   __folio_alloc+0x34/0x90
+  4)     2880     176   vma_alloc_folio+0xd8/0x540
+  5)     2704     416   __handle_mm_fault+0x700/0x1cc0
+  6)     2288      96   handle_mm_fault+0xf0/0x3f0
+  7)     2192      48   ___do_page_fault+0x3e4/0xbe0
+  8)     2144     192   do_page_fault+0x30/0xc0
+  9)     1952     608   data_access_common_virt+0x210/0x220
+ 10)     1344      16   0xc0000000334bbb50
+ 11)     1328     416   load_elf_binary+0x804/0x1b80
+ 12)      912      64   bprm_execve+0x2d8/0x7e0
+ 13)      848     176   do_execveat_common+0x1d0/0x2f0
+ 14)      672     192   sys_execve+0x54/0x70
+ 15)      480      64   system_call_exception+0x138/0x350
+ 16)      416     416   system_call_common+0x160/0x2c4
+
+This results in two additional stores in the ftrace entry code, but
+produces reliable backtraces.
+
+Fixes: 153086644fd1 ("powerpc/ftrace: Add support for -mprofile-kernel ftrace ABI")
+Cc: stable@vger.kernel.org
+Signed-off-by: Naveen N Rao <naveen@kernel.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20230621051349.759567-1-naveen@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/trace/ftrace_64_mprofile.S |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
++++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
+@@ -41,6 +41,9 @@ _GLOBAL(ftrace_caller)
+       /* Save the original return address in A's stack frame */
+       std     r0,LRSAVE(r1)
++      /* Create a minimal stack frame for representing B */
++      stdu    r1, -STACK_FRAME_MIN_SIZE(r1)
++
+       /* Create our stack frame + pt_regs */
+       stdu    r1,-SWITCH_FRAME_SIZE(r1)
+@@ -64,6 +67,8 @@ _GLOBAL(ftrace_caller)
+       mflr    r7
+       /* Save it as pt_regs->nip */
+       std     r7, _NIP(r1)
++      /* Also save it in B's stackframe header for proper unwind */
++      std     r7, LRSAVE+SWITCH_FRAME_SIZE(r1)
+       /* Save the read LR in pt_regs->link */
+       std     r0, _LINK(r1)
+@@ -146,7 +151,7 @@ ftrace_call:
+       ld      r2, 24(r1)
+       /* Pop our stack frame */
+-      addi r1, r1, SWITCH_FRAME_SIZE
++      addi r1, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
+ #ifdef CONFIG_LIVEPATCH
+         /*
diff --git a/queue-4.14/powerpc-ftrace-fix-stack-teardown-in-ftrace_no_trace.patch b/queue-4.14/powerpc-ftrace-fix-stack-teardown-in-ftrace_no_trace.patch
new file mode 100644 (file)
index 0000000..23c382f
--- /dev/null
@@ -0,0 +1,41 @@
+From stable+bounces-6800-greg=kroah.com@vger.kernel.org Fri Dec 15 12:15:29 2023
+From: Naveen N Rao <naveen@kernel.org>
+Date: Fri, 15 Dec 2023 16:41:22 +0530
+Subject: powerpc/ftrace: Fix stack teardown in ftrace_no_trace
+To: <stable@vger.kernel.org>
+Cc: Greg KH <gregkh@linuxfoundation.org>, Michael Ellerman <mpe@ellerman.id.au>
+Message-ID: <20231215111122.2361478-2-naveen@kernel.org>
+
+From: Naveen N Rao <naveen@kernel.org>
+
+commit 4b3338aaa74d7d4ec5b6734dc298f0db94ec83d2 upstream.
+
+Commit 41a506ef71eb ("powerpc/ftrace: Create a dummy stackframe to fix
+stack unwind") added use of a new stack frame on ftrace entry to fix
+stack unwind. However, the commit missed updating the offset used while
+tearing down the ftrace stack when ftrace is disabled. Fix the same.
+
+In addition, the commit missed saving the correct stack pointer in
+pt_regs. Update the same.
+
+Fixes: 41a506ef71eb ("powerpc/ftrace: Create a dummy stackframe to fix stack unwind")
+Cc: stable@vger.kernel.org # v6.5+
+Signed-off-by: Naveen N Rao <naveen@kernel.org>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20231130065947.2188860-1-naveen@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/trace/ftrace_64_mprofile.S |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
++++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
+@@ -54,7 +54,7 @@ _GLOBAL(ftrace_caller)
+       SAVE_10GPRS(22, r1)
+       /* Save previous stack pointer (r1) */
+-      addi    r8, r1, SWITCH_FRAME_SIZE
++      addi    r8, r1, SWITCH_FRAME_SIZE+STACK_FRAME_MIN_SIZE
+       std     r8, GPR1(r1)
+       /* Load special regs for save below */
index f38b414750a13f118f0bd249fa4f51d6e62f2d1f..f87d1673c0b3b5f14506dfd4a6a3ea8ca93699db 100644 (file)
@@ -22,3 +22,5 @@ hid-hid-asus-add-const-to-read-only-outgoing-usb-buf.patch
 ext4-prevent-the-normalized-size-from-exceeding-ext_max_blocks.patch
 team-fix-use-after-free-when-an-option-instance-allocation-fails.patch
 ring-buffer-fix-memory-leak-of-free-page.patch
+powerpc-ftrace-create-a-dummy-stackframe-to-fix-stack-unwind.patch
+powerpc-ftrace-fix-stack-teardown-in-ftrace_no_trace.patch