]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - arch/x86/include/asm/uaccess_64.h
x86: don't use REP_GOOD or ERMS for user memory clearing
[thirdparty/kernel/stable.git] / arch / x86 / include / asm / uaccess_64.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_64_H
3 #define _ASM_X86_UACCESS_64_H
4
5 /*
6 * User space memory access functions
7 */
8 #include <linux/compiler.h>
9 #include <linux/lockdep.h>
10 #include <linux/kasan-checks.h>
11 #include <asm/alternative.h>
12 #include <asm/cpufeatures.h>
13 #include <asm/page.h>
14
15 /*
16 * Copy To/From Userspace
17 */
18
19 /* Handles exceptions in both to and from, but doesn't do access_ok */
20 __must_check unsigned long
21 copy_user_fast_string(void *to, const void *from, unsigned len);
22 __must_check unsigned long
23 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
24
25 static __always_inline __must_check unsigned long
26 copy_user_generic(void *to, const void *from, unsigned len)
27 {
28 unsigned ret;
29
30 /*
31 * If CPU has FSRM feature, use 'rep movs'.
32 * Otherwise, use copy_user_generic_unrolled.
33 */
34 alternative_call(copy_user_generic_unrolled,
35 copy_user_fast_string,
36 X86_FEATURE_FSRM,
37 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
38 "=d" (len)),
39 "1" (to), "2" (from), "3" (len)
40 : "memory", "rcx", "r8", "r9", "r10", "r11");
41 return ret;
42 }
43
44 static __always_inline __must_check unsigned long
45 raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
46 {
47 return copy_user_generic(dst, (__force void *)src, size);
48 }
49
50 static __always_inline __must_check unsigned long
51 raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
52 {
53 return copy_user_generic((__force void *)dst, src, size);
54 }
55
56 extern long __copy_user_nocache(void *dst, const void __user *src,
57 unsigned size, int zerorest);
58
59 extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
60 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
61 size_t len);
62
63 static inline int
64 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
65 unsigned size)
66 {
67 kasan_check_write(dst, size);
68 return __copy_user_nocache(dst, src, size, 0);
69 }
70
71 static inline int
72 __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
73 {
74 kasan_check_write(dst, size);
75 return __copy_user_flushcache(dst, src, size);
76 }
77
78 /*
79 * Zero Userspace.
80 */
81
82 __must_check unsigned long
83 clear_user_original(void __user *addr, unsigned long len);
84
85 static __always_inline __must_check unsigned long __clear_user(void __user *addr, unsigned long size)
86 {
87 might_fault();
88 stac();
89
90 /*
91 * No memory constraint because it doesn't change any memory gcc
92 * knows about.
93 */
94 asm volatile(
95 "1:\n\t"
96 ALTERNATIVE("rep stosb",
97 "call clear_user_original", ALT_NOT(X86_FEATURE_FSRS))
98 "2:\n"
99 _ASM_EXTABLE_UA(1b, 2b)
100 : "+c" (size), "+D" (addr), ASM_CALL_CONSTRAINT
101 : "a" (0));
102
103 clac();
104
105 return size;
106 }
107
108 static __always_inline unsigned long clear_user(void __user *to, unsigned long n)
109 {
110 if (access_ok(to, n))
111 return __clear_user(to, n);
112 return n;
113 }
114 #endif /* _ASM_X86_UACCESS_64_H */