]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-3.18/arm64-futex-fix-futex_wake_op-atomic-ops-with-non-zero-result-value.patch
3.18-stable patches
[thirdparty/kernel/stable-queue.git] / queue-3.18 / arm64-futex-fix-futex_wake_op-atomic-ops-with-non-zero-result-value.patch
1 From 045afc24124d80c6998d9c770844c67912083506 Mon Sep 17 00:00:00 2001
2 From: Will Deacon <will.deacon@arm.com>
3 Date: Mon, 8 Apr 2019 12:45:09 +0100
4 Subject: arm64: futex: Fix FUTEX_WAKE_OP atomic ops with non-zero result value
5
6 From: Will Deacon <will.deacon@arm.com>
7
8 commit 045afc24124d80c6998d9c770844c67912083506 upstream.
9
10 Rather embarrassingly, our futex() FUTEX_WAKE_OP implementation doesn't
11 explicitly set the return value on the non-faulting path and instead
12 leaves it holding the result of the underlying atomic operation. This
13 means that any FUTEX_WAKE_OP atomic operation which computes a non-zero
14 value will be reported as having failed. Regrettably, I wrote the buggy
15 code back in 2011 and it was upstreamed as part of the initial arm64
16 support in 2012.
17
18 The reasons we appear to get away with this are:
19
20 1. FUTEX_WAKE_OP is rarely used and therefore doesn't appear to get
21 exercised by futex() test applications
22
23 2. If the result of the atomic operation is zero, the system call
24 behaves correctly
25
26 3. Prior to version 2.25, the only operation used by GLIBC set the
27 futex to zero, and therefore worked as expected. From 2.25 onwards,
28 FUTEX_WAKE_OP is not used by GLIBC at all.
29
30 Fix the implementation by ensuring that the return value is either 0
31 to indicate that the atomic operation completed successfully, or -EFAULT
32 if we encountered a fault when accessing the user mapping.
33
34 Cc: <stable@kernel.org>
35 Fixes: 6170a97460db ("arm64: Atomic operations")
36 Signed-off-by: Will Deacon <will.deacon@arm.com>
37 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
38
39 ---
40 arch/arm64/include/asm/futex.h | 16 ++++++++--------
41 1 file changed, 8 insertions(+), 8 deletions(-)
42
43 --- a/arch/arm64/include/asm/futex.h
44 +++ b/arch/arm64/include/asm/futex.h
45 @@ -26,8 +26,8 @@
46 asm volatile( \
47 "1: ldxr %w1, %2\n" \
48 insn "\n" \
49 -"2: stlxr %w3, %w0, %2\n" \
50 -" cbnz %w3, 1b\n" \
51 +"2: stlxr %w0, %w3, %2\n" \
52 +" cbnz %w0, 1b\n" \
53 " dmb ish\n" \
54 "3:\n" \
55 " .pushsection .fixup,\"ax\"\n" \
56 @@ -50,7 +50,7 @@ futex_atomic_op_inuser(unsigned int enco
57 int cmp = (encoded_op >> 24) & 15;
58 int oparg = (int)(encoded_op << 8) >> 20;
59 int cmparg = (int)(encoded_op << 20) >> 20;
60 - int oldval = 0, ret, tmp;
61 + int oldval, ret, tmp;
62
63 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
64 oparg = 1U << (oparg & 0x1f);
65 @@ -62,23 +62,23 @@ futex_atomic_op_inuser(unsigned int enco
66
67 switch (op) {
68 case FUTEX_OP_SET:
69 - __futex_atomic_op("mov %w0, %w4",
70 + __futex_atomic_op("mov %w3, %w4",
71 ret, oldval, uaddr, tmp, oparg);
72 break;
73 case FUTEX_OP_ADD:
74 - __futex_atomic_op("add %w0, %w1, %w4",
75 + __futex_atomic_op("add %w3, %w1, %w4",
76 ret, oldval, uaddr, tmp, oparg);
77 break;
78 case FUTEX_OP_OR:
79 - __futex_atomic_op("orr %w0, %w1, %w4",
80 + __futex_atomic_op("orr %w3, %w1, %w4",
81 ret, oldval, uaddr, tmp, oparg);
82 break;
83 case FUTEX_OP_ANDN:
84 - __futex_atomic_op("and %w0, %w1, %w4",
85 + __futex_atomic_op("and %w3, %w1, %w4",
86 ret, oldval, uaddr, tmp, ~oparg);
87 break;
88 case FUTEX_OP_XOR:
89 - __futex_atomic_op("eor %w0, %w1, %w4",
90 + __futex_atomic_op("eor %w3, %w1, %w4",
91 ret, oldval, uaddr, tmp, oparg);
92 break;
93 default: