]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
x86: use cmov for user address masking
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 10 Dec 2024 18:25:04 +0000 (10:25 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 20 Jan 2025 16:51:44 +0000 (08:51 -0800)
This was a suggestion by David Laight, and while I was slightly worried
that some micro-architecture would predict cmov like a conditional
branch, there is little reason to actually believe any core would be
that broken.

Intel documents that their existing cores treat CMOVcc as a data
dependency that will constrain speculation in their "Speculative
Execution Side Channel Mitigations" whitepaper:

  "Other instructions such as CMOVcc, AND, ADC, SBB and SETcc can also
   be used to prevent bounds check bypass by constraining speculative
   execution on current family 6 processors (Intel® Core™, Intel® Atom™,
   Intel® Xeon® and Intel® Xeon Phi™ processors)"

and while that leaves the future uarch issues open, that's certainly
true of our traditional SBB usage too.

Any core that predicts CMOV will be unusable for various crypto
algorithms that need data-independent timing stability, so let's just
treat CMOV as the safe choice that simplifies the address masking by
avoiding an extra instruction and doesn't need a temporary register.

Suggested-by: David Laight <David.Laight@aculab.com>
Link: https://www.intel.com/content/dam/develop/external/us/en/documents/336996-speculative-execution-side-channel-mitigations.pdf
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/include/asm/uaccess_64.h
arch/x86/lib/getuser.S

index b0a887209400de33aa24e75f00af64d72600291d..c52f0133425b94f5bdb3bdb958474f3736e9d57d 100644 (file)
@@ -63,13 +63,13 @@ static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
  */
 static inline void __user *mask_user_address(const void __user *ptr)
 {
-       unsigned long mask;
+       void __user *ret;
        asm("cmp %1,%0\n\t"
-           "sbb %0,%0"
-               :"=r" (mask)
-               :"r" (ptr),
-                "0" (runtime_const_ptr(USER_PTR_MAX)));
-       return (__force void __user *)(mask | (__force unsigned long)ptr);
+           "cmova %1,%0"
+               :"=r" (ret)
+               :"r" (runtime_const_ptr(USER_PTR_MAX)),
+                "0" (ptr));
+       return ret;
 }
 #define masked_user_access_begin(x) ({                         \
        __auto_type __masked_ptr = (x);                         \
index 4357ec2a0bfc2c887f64ba6fe6dc1d47b6956a53..89ecd57c9d4234e58a4fb4f8dd23e93774b2106b 100644 (file)
@@ -44,9 +44,8 @@
   .pushsection runtime_ptr_USER_PTR_MAX,"a"
        .long 1b - 8 - .
   .popsection
-       cmp %rax, %rdx
-       sbb %rdx, %rdx
-       or %rdx, %rax
+       cmp %rdx, %rax
+       cmova %rdx, %rax
 .else
        cmp $TASK_SIZE_MAX-\size+1, %eax
        jae .Lbad_get_user