x86-bugs-warn-when-ibrs-mitigation-is-selected-on-enhanced-ibrs-parts.patch
dlm-fix-pending-remove-if-msg-allocation-fails.patch
x86-uaccess-implement-macros-for-cmpxchg-on-user-add.patch
-x86-uaccess-implement-macros-for-cmpxchg-on-user-add.patch-32473
x86-extable-tidy-up-redundant-handler-functions.patch
x86-extable-get-rid-of-redundant-macros.patch
x86-mce-deduplicate-exception-handling.patch
tracing-fix-return-value-of-trace_pid_write.patch
um-virtio_uml-allow-probing-from-devicetree.patch
um-virtio_uml-fix-broken-device-handling-in-time-tra.patch
-x86-uaccess-implement-macros-for-cmpxchg-on-user-add.patch-12415
bluetooth-add-bt_skb_sendmsg-helper.patch
bluetooth-add-bt_skb_sendmmsg-helper.patch
bluetooth-sco-replace-use-of-memcpy_from_msg-with-bt_skb_sendmsg.patch
+++ /dev/null
-From 8a0706834a022fcc9d6d7b4b80c9cc8f3342539f Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 2 Feb 2022 00:49:42 +0000
-Subject: x86/uaccess: Implement macros for CMPXCHG on user addresses
-
-From: Peter Zijlstra <peterz@infradead.org>
-
-[ Upstream commit 989b5db215a2f22f89d730b607b071d964780f10 ]
-
-Add support for CMPXCHG loops on userspace addresses. Provide both an
-"unsafe" version for tight loops that do their own uaccess begin/end, as
-well as a "safe" version for use cases where the CMPXCHG is not buried in
-a loop, e.g. KVM will resume the guest instead of looping when emulation
-of a guest atomic accesses fails the CMPXCHG.
-
-Provide 8-byte versions for 32-bit kernels so that KVM can do CMPXCHG on
-guest PAE PTEs, which are accessed via userspace addresses.
-
-Guard the asm_volatile_goto() variation with CC_HAS_ASM_GOTO_TIED_OUTPUT,
-the "+m" constraint fails on some compilers that otherwise support
-CC_HAS_ASM_GOTO_OUTPUT.
-
-Cc: stable@vger.kernel.org
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Co-developed-by: Sean Christopherson <seanjc@google.com>
-Signed-off-by: Sean Christopherson <seanjc@google.com>
-Message-Id: <20220202004945.2540433-3-seanjc@google.com>
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/x86/include/asm/uaccess.h | 97 +++++++++++++++++++++++++++++++++++++++++
- 1 file changed, 97 insertions(+)
-
---- a/arch/x86/include/asm/uaccess.h
-+++ b/arch/x86/include/asm/uaccess.h
-@@ -608,6 +608,103 @@ do { \
- #endif // CONFIG_X86_32
- #endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
-
-+#ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
-+#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
-+ bool success; \
-+ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
-+ __typeof__(*(_ptr)) __old = *_old; \
-+ __typeof__(*(_ptr)) __new = (_new); \
-+ asm_volatile_goto("\n" \
-+ "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
-+ _ASM_EXTABLE_UA(1b, %l[label]) \
-+ : CC_OUT(z) (success), \
-+ [ptr] "+m" (*_ptr), \
-+ [old] "+a" (__old) \
-+ : [new] ltype (__new) \
-+ : "memory" \
-+ : label); \
-+ if (unlikely(!success)) \
-+ *_old = __old; \
-+ likely(success); })
-+
-+#ifdef CONFIG_X86_32
-+#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
-+ bool success; \
-+ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
-+ __typeof__(*(_ptr)) __old = *_old; \
-+ __typeof__(*(_ptr)) __new = (_new); \
-+ asm_volatile_goto("\n" \
-+ "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
-+ _ASM_EXTABLE_UA(1b, %l[label]) \
-+ : CC_OUT(z) (success), \
-+ "+A" (__old), \
-+ [ptr] "+m" (*_ptr) \
-+ : "b" ((u32)__new), \
-+ "c" ((u32)((u64)__new >> 32)) \
-+ : "memory" \
-+ : label); \
-+ if (unlikely(!success)) \
-+ *_old = __old; \
-+ likely(success); })
-+#endif // CONFIG_X86_32
-+#else // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
-+#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
-+ int __err = 0; \
-+ bool success; \
-+ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
-+ __typeof__(*(_ptr)) __old = *_old; \
-+ __typeof__(*(_ptr)) __new = (_new); \
-+ asm volatile("\n" \
-+ "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
-+ CC_SET(z) \
-+ "2:\n" \
-+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \
-+ %[errout]) \
-+ : CC_OUT(z) (success), \
-+ [errout] "+r" (__err), \
-+ [ptr] "+m" (*_ptr), \
-+ [old] "+a" (__old) \
-+ : [new] ltype (__new) \
-+ : "memory", "cc"); \
-+ if (unlikely(__err)) \
-+ goto label; \
-+ if (unlikely(!success)) \
-+ *_old = __old; \
-+ likely(success); })
-+
-+#ifdef CONFIG_X86_32
-+/*
-+ * Unlike the normal CMPXCHG, hardcode ECX for both success/fail and error.
-+ * There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are
-+ * hardcoded by CMPXCHG8B, leaving only ESI and EDI. If the compiler uses
-+ * both ESI and EDI for the memory operand, compilation will fail if the error
-+ * is an input+output as there will be no register available for input.
-+ */
-+#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
-+ int __result; \
-+ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
-+ __typeof__(*(_ptr)) __old = *_old; \
-+ __typeof__(*(_ptr)) __new = (_new); \
-+ asm volatile("\n" \
-+ "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
-+ "mov $0, %%ecx\n\t" \
-+ "setz %%cl\n" \
-+ "2:\n" \
-+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %%ecx) \
-+ : [result]"=c" (__result), \
-+ "+A" (__old), \
-+ [ptr] "+m" (*_ptr) \
-+ : "b" ((u32)__new), \
-+ "c" ((u32)((u64)__new >> 32)) \
-+ : "memory", "cc"); \
-+ if (unlikely(__result < 0)) \
-+ goto label; \
-+ if (unlikely(!__result)) \
-+ *_old = __old; \
-+ likely(__result); })
-+#endif // CONFIG_X86_32
-+#endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
-+
- /* FIXME: this hack is definitely wrong -AK */
- struct __large_struct { unsigned long buf[100]; };
- #define __m(x) (*(struct __large_struct __user *)(x))
+++ /dev/null
-From 9fd1d1b2e1d0f4aadee5632111f011a0a4e2ca5d Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 2 Feb 2022 00:49:42 +0000
-Subject: x86/uaccess: Implement macros for CMPXCHG on user addresses
-
-From: Peter Zijlstra <peterz@infradead.org>
-
-[ Upstream commit 989b5db215a2f22f89d730b607b071d964780f10 ]
-
-Add support for CMPXCHG loops on userspace addresses. Provide both an
-"unsafe" version for tight loops that do their own uaccess begin/end, as
-well as a "safe" version for use cases where the CMPXCHG is not buried in
-a loop, e.g. KVM will resume the guest instead of looping when emulation
-of a guest atomic accesses fails the CMPXCHG.
-
-Provide 8-byte versions for 32-bit kernels so that KVM can do CMPXCHG on
-guest PAE PTEs, which are accessed via userspace addresses.
-
-Guard the asm_volatile_goto() variation with CC_HAS_ASM_GOTO_TIED_OUTPUT,
-the "+m" constraint fails on some compilers that otherwise support
-CC_HAS_ASM_GOTO_OUTPUT.
-
-Cc: stable@vger.kernel.org
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Co-developed-by: Sean Christopherson <seanjc@google.com>
-Signed-off-by: Sean Christopherson <seanjc@google.com>
-Message-Id: <20220202004945.2540433-3-seanjc@google.com>
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/x86/include/asm/uaccess.h | 97 ++++++++++++++++++++++++++++++++++
- 1 file changed, 97 insertions(+)
-
-diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
-index 2f4c9c168b11..775f1162da74 100644
---- a/arch/x86/include/asm/uaccess.h
-+++ b/arch/x86/include/asm/uaccess.h
-@@ -511,6 +511,103 @@ do { \
- #endif // CONFIG_X86_32
- #endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
-
-+#ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
-+#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
-+ bool success; \
-+ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
-+ __typeof__(*(_ptr)) __old = *_old; \
-+ __typeof__(*(_ptr)) __new = (_new); \
-+ asm_volatile_goto("\n" \
-+ "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
-+ _ASM_EXTABLE_UA(1b, %l[label]) \
-+ : CC_OUT(z) (success), \
-+ [ptr] "+m" (*_ptr), \
-+ [old] "+a" (__old) \
-+ : [new] ltype (__new) \
-+ : "memory" \
-+ : label); \
-+ if (unlikely(!success)) \
-+ *_old = __old; \
-+ likely(success); })
-+
-+#ifdef CONFIG_X86_32
-+#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
-+ bool success; \
-+ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
-+ __typeof__(*(_ptr)) __old = *_old; \
-+ __typeof__(*(_ptr)) __new = (_new); \
-+ asm_volatile_goto("\n" \
-+ "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
-+ _ASM_EXTABLE_UA(1b, %l[label]) \
-+ : CC_OUT(z) (success), \
-+ "+A" (__old), \
-+ [ptr] "+m" (*_ptr) \
-+ : "b" ((u32)__new), \
-+ "c" ((u32)((u64)__new >> 32)) \
-+ : "memory" \
-+ : label); \
-+ if (unlikely(!success)) \
-+ *_old = __old; \
-+ likely(success); })
-+#endif // CONFIG_X86_32
-+#else // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
-+#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
-+ int __err = 0; \
-+ bool success; \
-+ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
-+ __typeof__(*(_ptr)) __old = *_old; \
-+ __typeof__(*(_ptr)) __new = (_new); \
-+ asm volatile("\n" \
-+ "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
-+ CC_SET(z) \
-+ "2:\n" \
-+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \
-+ %[errout]) \
-+ : CC_OUT(z) (success), \
-+ [errout] "+r" (__err), \
-+ [ptr] "+m" (*_ptr), \
-+ [old] "+a" (__old) \
-+ : [new] ltype (__new) \
-+ : "memory", "cc"); \
-+ if (unlikely(__err)) \
-+ goto label; \
-+ if (unlikely(!success)) \
-+ *_old = __old; \
-+ likely(success); })
-+
-+#ifdef CONFIG_X86_32
-+/*
-+ * Unlike the normal CMPXCHG, hardcode ECX for both success/fail and error.
-+ * There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are
-+ * hardcoded by CMPXCHG8B, leaving only ESI and EDI. If the compiler uses
-+ * both ESI and EDI for the memory operand, compilation will fail if the error
-+ * is an input+output as there will be no register available for input.
-+ */
-+#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
-+ int __result; \
-+ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
-+ __typeof__(*(_ptr)) __old = *_old; \
-+ __typeof__(*(_ptr)) __new = (_new); \
-+ asm volatile("\n" \
-+ "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
-+ "mov $0, %%ecx\n\t" \
-+ "setz %%cl\n" \
-+ "2:\n" \
-+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %%ecx) \
-+ : [result]"=c" (__result), \
-+ "+A" (__old), \
-+ [ptr] "+m" (*_ptr) \
-+ : "b" ((u32)__new), \
-+ "c" ((u32)((u64)__new >> 32)) \
-+ : "memory", "cc"); \
-+ if (unlikely(__result < 0)) \
-+ goto label; \
-+ if (unlikely(!__result)) \
-+ *_old = __old; \
-+ likely(__result); })
-+#endif // CONFIG_X86_32
-+#endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
-+
- /* FIXME: this hack is definitely wrong -AK */
- struct __large_struct { unsigned long buf[100]; };
- #define __m(x) (*(struct __large_struct __user *)(x))
---
-2.35.1
-
locking-refcount-consolidate-implementations-of-refc.patch
x86-get-rid-of-small-constant-size-cases-in-raw_copy.patch
x86-uaccess-implement-macros-for-cmpxchg-on-user-add.patch
-x86-uaccess-implement-macros-for-cmpxchg-on-user-add.patch-104
mmap-locking-api-initial-implementation-as-rwsem-wra.patch
x86-mce-deduplicate-exception-handling.patch
bitfield.h-fix-type-of-reg-too-small-for-mask-test.patch
+++ /dev/null
-From b64be47e3d6b941b1934109ad7388ef0748547fc Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 2 Feb 2022 00:49:42 +0000
-Subject: x86/uaccess: Implement macros for CMPXCHG on user addresses
-
-From: Peter Zijlstra <peterz@infradead.org>
-
-[ Upstream commit 989b5db215a2f22f89d730b607b071d964780f10 ]
-
-Add support for CMPXCHG loops on userspace addresses. Provide both an
-"unsafe" version for tight loops that do their own uaccess begin/end, as
-well as a "safe" version for use cases where the CMPXCHG is not buried in
-a loop, e.g. KVM will resume the guest instead of looping when emulation
-of a guest atomic accesses fails the CMPXCHG.
-
-Provide 8-byte versions for 32-bit kernels so that KVM can do CMPXCHG on
-guest PAE PTEs, which are accessed via userspace addresses.
-
-Guard the asm_volatile_goto() variation with CC_HAS_ASM_GOTO_TIED_OUTPUT,
-the "+m" constraint fails on some compilers that otherwise support
-CC_HAS_ASM_GOTO_OUTPUT.
-
-Cc: stable@vger.kernel.org
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Co-developed-by: Sean Christopherson <seanjc@google.com>
-Signed-off-by: Sean Christopherson <seanjc@google.com>
-Message-Id: <20220202004945.2540433-3-seanjc@google.com>
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/x86/include/asm/uaccess.h | 142 +++++++++++++++++++++++++++++++++
- 1 file changed, 142 insertions(+)
-
-diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
-index 865795e2355e..7299acbbb7a6 100644
---- a/arch/x86/include/asm/uaccess.h
-+++ b/arch/x86/include/asm/uaccess.h
-@@ -538,6 +538,103 @@ __pu_label: \
- #endif // CONFIG_X86_32
- #endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
-
-+#ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
-+#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
-+ bool success; \
-+ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
-+ __typeof__(*(_ptr)) __old = *_old; \
-+ __typeof__(*(_ptr)) __new = (_new); \
-+ asm_volatile_goto("\n" \
-+ "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
-+ _ASM_EXTABLE_UA(1b, %l[label]) \
-+ : CC_OUT(z) (success), \
-+ [ptr] "+m" (*_ptr), \
-+ [old] "+a" (__old) \
-+ : [new] ltype (__new) \
-+ : "memory" \
-+ : label); \
-+ if (unlikely(!success)) \
-+ *_old = __old; \
-+ likely(success); })
-+
-+#ifdef CONFIG_X86_32
-+#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
-+ bool success; \
-+ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
-+ __typeof__(*(_ptr)) __old = *_old; \
-+ __typeof__(*(_ptr)) __new = (_new); \
-+ asm_volatile_goto("\n" \
-+ "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
-+ _ASM_EXTABLE_UA(1b, %l[label]) \
-+ : CC_OUT(z) (success), \
-+ "+A" (__old), \
-+ [ptr] "+m" (*_ptr) \
-+ : "b" ((u32)__new), \
-+ "c" ((u32)((u64)__new >> 32)) \
-+ : "memory" \
-+ : label); \
-+ if (unlikely(!success)) \
-+ *_old = __old; \
-+ likely(success); })
-+#endif // CONFIG_X86_32
-+#else // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
-+#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
-+ int __err = 0; \
-+ bool success; \
-+ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
-+ __typeof__(*(_ptr)) __old = *_old; \
-+ __typeof__(*(_ptr)) __new = (_new); \
-+ asm volatile("\n" \
-+ "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
-+ CC_SET(z) \
-+ "2:\n" \
-+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \
-+ %[errout]) \
-+ : CC_OUT(z) (success), \
-+ [errout] "+r" (__err), \
-+ [ptr] "+m" (*_ptr), \
-+ [old] "+a" (__old) \
-+ : [new] ltype (__new) \
-+ : "memory", "cc"); \
-+ if (unlikely(__err)) \
-+ goto label; \
-+ if (unlikely(!success)) \
-+ *_old = __old; \
-+ likely(success); })
-+
-+#ifdef CONFIG_X86_32
-+/*
-+ * Unlike the normal CMPXCHG, hardcode ECX for both success/fail and error.
-+ * There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are
-+ * hardcoded by CMPXCHG8B, leaving only ESI and EDI. If the compiler uses
-+ * both ESI and EDI for the memory operand, compilation will fail if the error
-+ * is an input+output as there will be no register available for input.
-+ */
-+#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
-+ int __result; \
-+ __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
-+ __typeof__(*(_ptr)) __old = *_old; \
-+ __typeof__(*(_ptr)) __new = (_new); \
-+ asm volatile("\n" \
-+ "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
-+ "mov $0, %%ecx\n\t" \
-+ "setz %%cl\n" \
-+ "2:\n" \
-+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %%ecx) \
-+ : [result]"=c" (__result), \
-+ "+A" (__old), \
-+ [ptr] "+m" (*_ptr) \
-+ : "b" ((u32)__new), \
-+ "c" ((u32)((u64)__new >> 32)) \
-+ : "memory", "cc"); \
-+ if (unlikely(__result < 0)) \
-+ goto label; \
-+ if (unlikely(!__result)) \
-+ *_old = __old; \
-+ likely(__result); })
-+#endif // CONFIG_X86_32
-+#endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
-+
- /* FIXME: this hack is definitely wrong -AK */
- struct __large_struct { unsigned long buf[100]; };
- #define __m(x) (*(struct __large_struct __user *)(x))
-@@ -826,6 +923,51 @@ extern void __try_cmpxchg_user_wrong_size(void);
- __try_cmpxchg_user_asm("q", "r", (_ptr), (_oldp), (_nval), _label)
- #endif
-
-+/*
-+ * Force the pointer to u<size> to match the size expected by the asm helper.
-+ * clang/LLVM compiles all cases and only discards the unused paths after
-+ * processing errors, which breaks i386 if the pointer is an 8-byte value.
-+ */
-+#define unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \
-+ bool __ret; \
-+ __chk_user_ptr(_ptr); \
-+ switch (sizeof(*(_ptr))) { \
-+ case 1: __ret = __try_cmpxchg_user_asm("b", "q", \
-+ (__force u8 *)(_ptr), (_oldp), \
-+ (_nval), _label); \
-+ break; \
-+ case 2: __ret = __try_cmpxchg_user_asm("w", "r", \
-+ (__force u16 *)(_ptr), (_oldp), \
-+ (_nval), _label); \
-+ break; \
-+ case 4: __ret = __try_cmpxchg_user_asm("l", "r", \
-+ (__force u32 *)(_ptr), (_oldp), \
-+ (_nval), _label); \
-+ break; \
-+ case 8: __ret = __try_cmpxchg64_user_asm((__force u64 *)(_ptr), (_oldp),\
-+ (_nval), _label); \
-+ break; \
-+ default: __try_cmpxchg_user_wrong_size(); \
-+ } \
-+ __ret; })
-+
-+/* "Returns" 0 on success, 1 on failure, -EFAULT if the access faults. */
-+#define __try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \
-+ int __ret = -EFAULT; \
-+ __uaccess_begin_nospec(); \
-+ __ret = !unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label); \
-+_label: \
-+ __uaccess_end(); \
-+ __ret; \
-+ })
-+
-+extern void __try_cmpxchg_user_wrong_size(void);
-+
-+#ifndef CONFIG_X86_32
-+#define __try_cmpxchg64_user_asm(_ptr, _oldp, _nval, _label) \
-+ __try_cmpxchg_user_asm("q", "r", (_ptr), (_oldp), (_nval), _label)
-+#endif
-+
- /*
- * Force the pointer to u<size> to match the size expected by the asm helper.
- * clang/LLVM compiles all cases and only discards the unused paths after
---
-2.35.1
-