]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-6.6/s390-bpf-fix-bpf_plt-pointer-arithmetic.patch
Linux 6.1.85
[thirdparty/kernel/stable-queue.git] / queue-6.6 / s390-bpf-fix-bpf_plt-pointer-arithmetic.patch
1 From 261df1303801264b89bde211bd6c883e5935cd36 Mon Sep 17 00:00:00 2001
2 From: Sasha Levin <sashal@kernel.org>
3 Date: Wed, 20 Mar 2024 02:54:12 +0100
4 Subject: s390/bpf: Fix bpf_plt pointer arithmetic
5
6 From: Ilya Leoshkevich <iii@linux.ibm.com>
7
8 [ Upstream commit 7ded842b356d151ece8ac4985940438e6d3998bb ]
9
10 Kui-Feng Lee reported a crash on s390x triggered by the
11 dummy_st_ops/dummy_init_ptr_arg test [1]:
12
13 [<0000000000000002>] 0x2
14 [<00000000009d5cde>] bpf_struct_ops_test_run+0x156/0x250
15 [<000000000033145a>] __sys_bpf+0xa1a/0xd00
16 [<00000000003319dc>] __s390x_sys_bpf+0x44/0x50
17 [<0000000000c4382c>] __do_syscall+0x244/0x300
18 [<0000000000c59a40>] system_call+0x70/0x98
19
20 This is caused by GCC moving memcpy() after assignments in
21 bpf_jit_plt(), resulting in NULL pointers being written instead of
22 the return and the target addresses.
23
24 Looking at the GCC internals, the reordering is allowed because the
25 alias analysis thinks that the memcpy() destination and the assignments'
26 left-hand-sides are based on different objects: new_plt and
27 bpf_plt_ret/bpf_plt_target respectively, and therefore they cannot
28 alias.
29
30 This is in turn due to a violation of the C standard:
31
32 When two pointers are subtracted, both shall point to elements of the
33 same array object, or one past the last element of the array object
34 ...
35
36 From the C's perspective, bpf_plt_ret and bpf_plt are distinct objects
37 and cannot be subtracted. In the practical terms, doing so confuses the
38 GCC's alias analysis.
39
40 The code was written this way in order to let the C side know a few
41 offsets defined in the assembly. While nice, this is by no means
42 necessary. Fix the noncompliance by hardcoding these offsets.
43
44 [1] https://lore.kernel.org/bpf/c9923c1d-971d-4022-8dc8-1364e929d34c@gmail.com/
45
46 Fixes: f1d5df84cd8c ("s390/bpf: Implement bpf_arch_text_poke()")
47 Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
48 Message-ID: <20240320015515.11883-1-iii@linux.ibm.com>
49 Signed-off-by: Alexei Starovoitov <ast@kernel.org>
50 Signed-off-by: Sasha Levin <sashal@kernel.org>
51 ---
52 arch/s390/net/bpf_jit_comp.c | 46 ++++++++++++++++--------------------
53 1 file changed, 20 insertions(+), 26 deletions(-)
54
55 diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
56 index e507692e51e71..8af02176f68bf 100644
57 --- a/arch/s390/net/bpf_jit_comp.c
58 +++ b/arch/s390/net/bpf_jit_comp.c
59 @@ -516,11 +516,12 @@ static void bpf_skip(struct bpf_jit *jit, int size)
60 * PLT for hotpatchable calls. The calling convention is the same as for the
61 * ftrace hotpatch trampolines: %r0 is return address, %r1 is clobbered.
62 */
63 -extern const char bpf_plt[];
64 -extern const char bpf_plt_ret[];
65 -extern const char bpf_plt_target[];
66 -extern const char bpf_plt_end[];
67 -#define BPF_PLT_SIZE 32
68 +struct bpf_plt {
69 + char code[16];
70 + void *ret;
71 + void *target;
72 +} __packed;
73 +extern const struct bpf_plt bpf_plt;
74 asm(
75 ".pushsection .rodata\n"
76 " .balign 8\n"
77 @@ -531,15 +532,14 @@ asm(
78 " .balign 8\n"
79 "bpf_plt_ret: .quad 0\n"
80 "bpf_plt_target: .quad 0\n"
81 - "bpf_plt_end:\n"
82 " .popsection\n"
83 );
84
85 -static void bpf_jit_plt(void *plt, void *ret, void *target)
86 +static void bpf_jit_plt(struct bpf_plt *plt, void *ret, void *target)
87 {
88 - memcpy(plt, bpf_plt, BPF_PLT_SIZE);
89 - *(void **)((char *)plt + (bpf_plt_ret - bpf_plt)) = ret;
90 - *(void **)((char *)plt + (bpf_plt_target - bpf_plt)) = target ?: ret;
91 + memcpy(plt, &bpf_plt, sizeof(*plt));
92 + plt->ret = ret;
93 + plt->target = target;
94 }
95
96 /*
97 @@ -662,9 +662,9 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
98 jit->prg = ALIGN(jit->prg, 8);
99 jit->prologue_plt = jit->prg;
100 if (jit->prg_buf)
101 - bpf_jit_plt(jit->prg_buf + jit->prg,
102 + bpf_jit_plt((struct bpf_plt *)(jit->prg_buf + jit->prg),
103 jit->prg_buf + jit->prologue_plt_ret, NULL);
104 - jit->prg += BPF_PLT_SIZE;
105 + jit->prg += sizeof(struct bpf_plt);
106 }
107
108 static int get_probe_mem_regno(const u8 *insn)
109 @@ -1901,9 +1901,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
110 struct bpf_jit jit;
111 int pass;
112
113 - if (WARN_ON_ONCE(bpf_plt_end - bpf_plt != BPF_PLT_SIZE))
114 - return orig_fp;
115 -
116 if (!fp->jit_requested)
117 return orig_fp;
118
119 @@ -2009,14 +2006,11 @@ bool bpf_jit_supports_far_kfunc_call(void)
120 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
121 void *old_addr, void *new_addr)
122 {
123 + struct bpf_plt expected_plt, current_plt, new_plt, *plt;
124 struct {
125 u16 opc;
126 s32 disp;
127 } __packed insn;
128 - char expected_plt[BPF_PLT_SIZE];
129 - char current_plt[BPF_PLT_SIZE];
130 - char new_plt[BPF_PLT_SIZE];
131 - char *plt;
132 char *ret;
133 int err;
134
135 @@ -2035,18 +2029,18 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
136 */
137 } else {
138 /* Verify the PLT. */
139 - plt = (char *)ip + (insn.disp << 1);
140 - err = copy_from_kernel_nofault(current_plt, plt, BPF_PLT_SIZE);
141 + plt = ip + (insn.disp << 1);
142 + err = copy_from_kernel_nofault(&current_plt, plt,
143 + sizeof(current_plt));
144 if (err < 0)
145 return err;
146 ret = (char *)ip + 6;
147 - bpf_jit_plt(expected_plt, ret, old_addr);
148 - if (memcmp(current_plt, expected_plt, BPF_PLT_SIZE))
149 + bpf_jit_plt(&expected_plt, ret, old_addr);
150 + if (memcmp(&current_plt, &expected_plt, sizeof(current_plt)))
151 return -EINVAL;
152 /* Adjust the call address. */
153 - bpf_jit_plt(new_plt, ret, new_addr);
154 - s390_kernel_write(plt + (bpf_plt_target - bpf_plt),
155 - new_plt + (bpf_plt_target - bpf_plt),
156 + bpf_jit_plt(&new_plt, ret, new_addr);
157 + s390_kernel_write(&plt->target, &new_plt.target,
158 sizeof(void *));
159 }
160
161 --
162 2.43.0
163