]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob
68cc3cdc9fb27a02d23e4a74a5600f7b8fb0e1a0
[thirdparty/kernel/stable-queue.git] /
1 From 833fd800bf56b74d39d71d3f5936dffb3e0409c6 Mon Sep 17 00:00:00 2001
2 From: Petr Pavlu <petr.pavlu@suse.com>
3 Date: Tue, 11 Jul 2023 11:19:52 +0200
4 Subject: x86/retpoline,kprobes: Skip optprobe check for indirect jumps with retpolines and IBT
5
6 From: Petr Pavlu <petr.pavlu@suse.com>
7
8 commit 833fd800bf56b74d39d71d3f5936dffb3e0409c6 upstream.
9
10 The kprobes optimization check can_optimize() calls
11 insn_is_indirect_jump() to detect indirect jump instructions in
12 a target function. If any is found, creating an optprobe is disallowed
13 in the function because the jump could be from a jump table and could
14 potentially land in the middle of the target optprobe.
15
16 With retpolines, insn_is_indirect_jump() additionally looks for calls to
17 indirect thunks which the compiler potentially used to replace original
18 jumps. This extra check is however unnecessary because jump tables are
19 disabled when the kernel is built with retpolines. The same is currently
20 the case with IBT.
21
22 Based on this observation, remove the logic to look for calls to
23 indirect thunks and skip the check for indirect jumps altogether if the
24 kernel is built with retpolines or IBT. Remove subsequently the symbols
25 __indirect_thunk_start and __indirect_thunk_end which are no longer
26 needed.
27
28 Dropping this logic indirectly fixes a problem where the range
29 [__indirect_thunk_start, __indirect_thunk_end] wrongly included also the
30 return thunk. It caused that machines which used the return thunk as
31 a mitigation and didn't have it patched by any alternative ended up not
32 being able to use optprobes in any regular function.
33
34 Fixes: 0b53c374b9ef ("x86/retpoline: Use -mfunction-return")
35 Suggested-by: Peter Zijlstra (Intel) <peterz@infradead.org>
36 Suggested-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
37 Signed-off-by: Petr Pavlu <petr.pavlu@suse.com>
38 Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
39 Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
40 Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
41 Link: https://lore.kernel.org/r/20230711091952.27944-3-petr.pavlu@suse.com
42 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
43 ---
44 arch/x86/include/asm/nospec-branch.h | 3 --
45 arch/x86/kernel/kprobes/opt.c | 40 ++++++++++++++---------------------
46 arch/x86/kernel/vmlinux.lds.S | 2 -
47 tools/perf/util/thread-stack.c | 4 ---
48 4 files changed, 17 insertions(+), 32 deletions(-)
49
50 --- a/arch/x86/include/asm/nospec-branch.h
51 +++ b/arch/x86/include/asm/nospec-branch.h
52 @@ -482,9 +482,6 @@ enum ssb_mitigation {
53 SPEC_STORE_BYPASS_SECCOMP,
54 };
55
56 -extern char __indirect_thunk_start[];
57 -extern char __indirect_thunk_end[];
58 -
59 static __always_inline
60 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
61 {
62 --- a/arch/x86/kernel/kprobes/opt.c
63 +++ b/arch/x86/kernel/kprobes/opt.c
64 @@ -226,7 +226,7 @@ static int copy_optimized_instructions(u
65 }
66
67 /* Check whether insn is indirect jump */
68 -static int __insn_is_indirect_jump(struct insn *insn)
69 +static int insn_is_indirect_jump(struct insn *insn)
70 {
71 return ((insn->opcode.bytes[0] == 0xff &&
72 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
73 @@ -260,26 +260,6 @@ static int insn_jump_into_range(struct i
74 return (start <= target && target <= start + len);
75 }
76
77 -static int insn_is_indirect_jump(struct insn *insn)
78 -{
79 - int ret = __insn_is_indirect_jump(insn);
80 -
81 -#ifdef CONFIG_RETPOLINE
82 - /*
83 - * Jump to x86_indirect_thunk_* is treated as an indirect jump.
84 - * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
85 - * older gcc may use indirect jump. So we add this check instead of
86 - * replace indirect-jump check.
87 - */
88 - if (!ret)
89 - ret = insn_jump_into_range(insn,
90 - (unsigned long)__indirect_thunk_start,
91 - (unsigned long)__indirect_thunk_end -
92 - (unsigned long)__indirect_thunk_start);
93 -#endif
94 - return ret;
95 -}
96 -
97 /* Decode whole function to ensure any instructions don't jump into target */
98 static int can_optimize(unsigned long paddr)
99 {
100 @@ -334,9 +314,21 @@ static int can_optimize(unsigned long pa
101 /* Recover address */
102 insn.kaddr = (void *)addr;
103 insn.next_byte = (void *)(addr + insn.length);
104 - /* Check any instructions don't jump into target */
105 - if (insn_is_indirect_jump(&insn) ||
106 - insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
107 + /*
108 + * Check any instructions don't jump into target, indirectly or
109 + * directly.
110 + *
111 + * The indirect case is present to handle a code with jump
112 + * tables. When the kernel uses retpolines, the check should in
113 + * theory additionally look for jumps to indirect thunks.
114 + * However, the kernel built with retpolines or IBT has jump
115 + * tables disabled so the check can be skipped altogether.
116 + */
117 + if (!IS_ENABLED(CONFIG_RETPOLINE) &&
118 + !IS_ENABLED(CONFIG_X86_KERNEL_IBT) &&
119 + insn_is_indirect_jump(&insn))
120 + return 0;
121 + if (insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
122 DISP32_SIZE))
123 return 0;
124 addr += insn.length;
125 --- a/arch/x86/kernel/vmlinux.lds.S
126 +++ b/arch/x86/kernel/vmlinux.lds.S
127 @@ -133,10 +133,8 @@ SECTIONS
128 KPROBES_TEXT
129 SOFTIRQENTRY_TEXT
130 #ifdef CONFIG_RETPOLINE
131 - __indirect_thunk_start = .;
132 *(.text..__x86.indirect_thunk)
133 *(.text..__x86.return_thunk)
134 - __indirect_thunk_end = .;
135 #endif
136 STATIC_CALL_TEXT
137
138 --- a/tools/perf/util/thread-stack.c
139 +++ b/tools/perf/util/thread-stack.c
140 @@ -1037,9 +1037,7 @@ static int thread_stack__trace_end(struc
141
142 static bool is_x86_retpoline(const char *name)
143 {
144 - const char *p = strstr(name, "__x86_indirect_thunk_");
145 -
146 - return p == name || !strcmp(name, "__indirect_thunk_start");
147 + return strstr(name, "__x86_indirect_thunk_") == name;
148 }
149
150 /*