]>
Commit | Line | Data |
---|---|---|
c22ce143 HY |
1 | #ifndef __LINUX_UACCESS_H__ |
2 | #define __LINUX_UACCESS_H__ | |
3 | ||
8bcbde54 | 4 | #include <linux/sched.h> |
af1d5b37 | 5 | #include <linux/thread_info.h> |
d597580d | 6 | #include <linux/kasan-checks.h> |
5e6039d8 AV |
7 | |
8 | #define VERIFY_READ 0 | |
9 | #define VERIFY_WRITE 1 | |
10 | ||
db68ce10 AV |
11 | #define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS) |
12 | ||
c22ce143 HY |
13 | #include <asm/uaccess.h> |
14 | ||
d597580d AV |
15 | #ifdef CONFIG_ARCH_HAS_RAW_COPY_USER |
16 | /* | |
17 | * Architectures should provide two primitives (raw_copy_{to,from}_user()) | |
18 | * select ARCH_HAS_RAW_COPY_FROM_USER and get rid of their private instances | |
19 | * of copy_{to,from}_user() and __copy_{to,from}_user{,_inatomic}(). Once | |
20 | * all of them switch, this part of linux/uaccess.h will become unconditional. | |
21 | * | |
22 | * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and | |
23 | * return the amount left to copy. They should assume that access_ok() has | |
24 | * already been checked (and succeeded); they should *not* zero-pad anything. | |
25 | * No KASAN or object size checks either - those belong here. | |
26 | * | |
27 | * Both of these functions should attempt to copy size bytes starting at from | |
28 | * into the area starting at to. They must not fetch or store anything | |
29 | * outside of those areas. Return value must be between 0 (everything | |
30 | * copied successfully) and size (nothing copied). | |
31 | * | |
32 | * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting | |
33 | * at to must become equal to the bytes fetched from the corresponding area | |
34 | * starting at from. All data past to + size - N must be left unmodified. | |
35 | * | |
36 | * If copying succeeds, the return value must be 0. If some data cannot be | |
37 | * fetched, it is permitted to copy less than had been fetched; the only | |
38 | * hard requirement is that not storing anything at all (i.e. returning size) | |
39 | * should happen only when nothing could be copied. In other words, you don't | |
40 | * have to squeeze as much as possible - it is allowed, but not necessary. | |
41 | * | |
42 | * For raw_copy_from_user() to always points to kernel memory and no faults | |
43 | * on store should happen. Interpretation of from is affected by set_fs(). | |
44 | * For raw_copy_to_user() it's the other way round. | |
45 | * | |
46 | * Both can be inlined - it's up to architectures whether it wants to bother | |
47 | * with that. They should not be used directly; they are used to implement | |
48 | * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic()) | |
49 | * that are used instead. Out of those, __... ones are inlined. Plain | |
50 | * copy_{to,from}_user() might or might not be inlined. If you want them | |
51 | * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER. | |
52 | * | |
53 | * NOTE: only copy_from_user() zero-pads the destination in case of short copy. | |
54 | * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything | |
55 | * at all; their callers absolutely must check the return value. | |
56 | * | |
57 | * Biarch ones should also provide raw_copy_in_user() - similar to the above, | |
58 | * but both source and destination are __user pointers (affected by set_fs() | |
59 | * as usual) and both source and destination can trigger faults. | |
60 | */ | |
61 | ||
62 | static __always_inline unsigned long | |
63 | __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) | |
64 | { | |
65 | kasan_check_write(to, n); | |
66 | check_object_size(to, n, false); | |
67 | return raw_copy_from_user(to, from, n); | |
68 | } | |
69 | ||
70 | static __always_inline unsigned long | |
71 | __copy_from_user(void *to, const void __user *from, unsigned long n) | |
72 | { | |
73 | might_fault(); | |
74 | kasan_check_write(to, n); | |
75 | check_object_size(to, n, false); | |
76 | return raw_copy_from_user(to, from, n); | |
77 | } | |
78 | ||
79 | /** | |
80 | * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. | |
81 | * @to: Destination address, in user space. | |
82 | * @from: Source address, in kernel space. | |
83 | * @n: Number of bytes to copy. | |
84 | * | |
85 | * Context: User context only. | |
86 | * | |
87 | * Copy data from kernel space to user space. Caller must check | |
88 | * the specified block with access_ok() before calling this function. | |
89 | * The caller should also make sure he pins the user space address | |
90 | * so that we don't result in page fault and sleep. | |
91 | */ | |
92 | static __always_inline unsigned long | |
93 | __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) | |
94 | { | |
95 | kasan_check_read(from, n); | |
96 | check_object_size(from, n, true); | |
97 | return raw_copy_to_user(to, from, n); | |
98 | } | |
99 | ||
100 | static __always_inline unsigned long | |
101 | __copy_to_user(void __user *to, const void *from, unsigned long n) | |
102 | { | |
103 | might_fault(); | |
104 | kasan_check_read(from, n); | |
105 | check_object_size(from, n, true); | |
106 | return raw_copy_to_user(to, from, n); | |
107 | } | |
108 | ||
109 | #ifdef INLINE_COPY_FROM_USER | |
110 | static inline unsigned long | |
111 | _copy_from_user(void *to, const void __user *from, unsigned long n) | |
112 | { | |
113 | unsigned long res = n; | |
114 | if (likely(access_ok(VERIFY_READ, from, n))) | |
115 | res = raw_copy_from_user(to, from, n); | |
116 | if (unlikely(res)) | |
117 | memset(to + (n - res), 0, res); | |
118 | return res; | |
119 | } | |
120 | #else | |
121 | extern unsigned long | |
122 | _copy_from_user(void *, const void __user *, unsigned long); | |
123 | #endif | |
124 | ||
125 | #ifdef INLINE_COPY_TO_USER | |
126 | static inline unsigned long | |
127 | _copy_to_user(void __user *to, const void *from, unsigned long n) | |
128 | { | |
129 | if (access_ok(VERIFY_WRITE, to, n)) | |
130 | n = raw_copy_to_user(to, from, n); | |
131 | return n; | |
132 | } | |
133 | #else | |
134 | extern unsigned long | |
135 | _copy_to_user(void __user *, const void *, unsigned long); | |
136 | #endif | |
137 | ||
138 | extern void __compiletime_error("usercopy buffer size is too small") | |
139 | __bad_copy_user(void); | |
140 | ||
141 | static inline void copy_user_overflow(int size, unsigned long count) | |
142 | { | |
143 | WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); | |
144 | } | |
145 | ||
146 | static __always_inline unsigned long __must_check | |
147 | copy_from_user(void *to, const void __user *from, unsigned long n) | |
148 | { | |
149 | int sz = __compiletime_object_size(to); | |
150 | ||
151 | might_fault(); | |
152 | kasan_check_write(to, n); | |
153 | ||
154 | if (likely(sz < 0 || sz >= n)) { | |
155 | check_object_size(to, n, false); | |
156 | n = _copy_from_user(to, from, n); | |
157 | } else if (!__builtin_constant_p(n)) | |
158 | copy_user_overflow(sz, n); | |
159 | else | |
160 | __bad_copy_user(); | |
161 | ||
162 | return n; | |
163 | } | |
164 | ||
165 | static __always_inline unsigned long __must_check | |
166 | copy_to_user(void __user *to, const void *from, unsigned long n) | |
167 | { | |
168 | int sz = __compiletime_object_size(from); | |
169 | ||
170 | kasan_check_read(from, n); | |
171 | might_fault(); | |
172 | ||
173 | if (likely(sz < 0 || sz >= n)) { | |
174 | check_object_size(from, n, true); | |
175 | n = _copy_to_user(to, from, n); | |
176 | } else if (!__builtin_constant_p(n)) | |
177 | copy_user_overflow(sz, n); | |
178 | else | |
179 | __bad_copy_user(); | |
180 | ||
181 | return n; | |
182 | } | |
183 | #ifdef CONFIG_COMPAT | |
184 | static __always_inline unsigned long __must_check | |
185 | __copy_in_user(void __user *to, const void *from, unsigned long n) | |
186 | { | |
187 | might_fault(); | |
188 | return raw_copy_in_user(to, from, n); | |
189 | } | |
190 | static __always_inline unsigned long __must_check | |
191 | copy_in_user(void __user *to, const void *from, unsigned long n) | |
192 | { | |
193 | might_fault(); | |
194 | if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n)) | |
195 | n = raw_copy_in_user(to, from, n); | |
196 | return n; | |
197 | } | |
198 | #endif | |
199 | #endif | |
200 | ||
8bcbde54 DH |
201 | static __always_inline void pagefault_disabled_inc(void) |
202 | { | |
203 | current->pagefault_disabled++; | |
204 | } | |
205 | ||
206 | static __always_inline void pagefault_disabled_dec(void) | |
207 | { | |
208 | current->pagefault_disabled--; | |
209 | WARN_ON(current->pagefault_disabled < 0); | |
210 | } | |
211 | ||
a866374a | 212 | /* |
8bcbde54 DH |
213 | * These routines enable/disable the pagefault handler. If disabled, it will |
214 | * not take any locks and go straight to the fixup table. | |
215 | * | |
8222dbe2 DH |
216 | * User access methods will not sleep when called from a pagefault_disabled() |
217 | * environment. | |
a866374a PZ |
218 | */ |
219 | static inline void pagefault_disable(void) | |
220 | { | |
8bcbde54 | 221 | pagefault_disabled_inc(); |
a866374a PZ |
222 | /* |
223 | * make sure to have issued the store before a pagefault | |
224 | * can hit. | |
225 | */ | |
226 | barrier(); | |
227 | } | |
228 | ||
229 | static inline void pagefault_enable(void) | |
230 | { | |
231 | /* | |
232 | * make sure to issue those last loads/stores before enabling | |
233 | * the pagefault handler again. | |
234 | */ | |
235 | barrier(); | |
8bcbde54 | 236 | pagefault_disabled_dec(); |
a866374a PZ |
237 | } |
238 | ||
8bcbde54 DH |
239 | /* |
240 | * Is the pagefault handler disabled? If so, user access methods will not sleep. | |
241 | */ | |
242 | #define pagefault_disabled() (current->pagefault_disabled != 0) | |
243 | ||
70ffdb93 DH |
244 | /* |
245 | * The pagefault handler is in general disabled by pagefault_disable() or | |
246 | * when in irq context (via in_atomic()). | |
247 | * | |
248 | * This function should only be used by the fault handlers. Other users should | |
249 | * stick to pagefault_disabled(). | |
250 | * Please NEVER use preempt_disable() to disable the fault handler. With | |
251 | * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled. | |
252 | * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT. | |
253 | */ | |
254 | #define faulthandler_disabled() (pagefault_disabled() || in_atomic()) | |
255 | ||
c22ce143 HY |
256 | #ifndef ARCH_HAS_NOCACHE_UACCESS |
257 | ||
258 | static inline unsigned long __copy_from_user_inatomic_nocache(void *to, | |
259 | const void __user *from, unsigned long n) | |
260 | { | |
261 | return __copy_from_user_inatomic(to, from, n); | |
262 | } | |
263 | ||
c22ce143 HY |
264 | #endif /* ARCH_HAS_NOCACHE_UACCESS */ |
265 | ||
c33fa9f5 IM |
266 | /* |
267 | * probe_kernel_read(): safely attempt to read from a location | |
268 | * @dst: pointer to the buffer that shall take the data | |
269 | * @src: address to read from | |
270 | * @size: size of the data chunk | |
271 | * | |
272 | * Safely read from address @src to the buffer at @dst. If a kernel fault | |
273 | * happens, handle that and return -EFAULT. | |
274 | */ | |
f29c5041 SR |
275 | extern long probe_kernel_read(void *dst, const void *src, size_t size); |
276 | extern long __probe_kernel_read(void *dst, const void *src, size_t size); | |
c33fa9f5 IM |
277 | |
278 | /* | |
279 | * probe_kernel_write(): safely attempt to write to a location | |
280 | * @dst: address to write to | |
281 | * @src: pointer to the data that shall be written | |
282 | * @size: size of the data chunk | |
283 | * | |
284 | * Safely write to address @dst from the buffer at @src. If a kernel fault | |
285 | * happens, handle that and return -EFAULT. | |
286 | */ | |
f29c5041 SR |
287 | extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); |
288 | extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); | |
c33fa9f5 | 289 | |
1a6877b9 AS |
290 | extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); |
291 | ||
0ab32b6f AM |
292 | /** |
293 | * probe_kernel_address(): safely attempt to read from a location | |
294 | * @addr: address to read from | |
295 | * @retval: read into this variable | |
296 | * | |
297 | * Returns 0 on success, or -EFAULT. | |
298 | */ | |
299 | #define probe_kernel_address(addr, retval) \ | |
300 | probe_kernel_read(&retval, addr, sizeof(retval)) | |
301 | ||
5b24a7a2 LT |
302 | #ifndef user_access_begin |
303 | #define user_access_begin() do { } while (0) | |
304 | #define user_access_end() do { } while (0) | |
1bd4403d LT |
305 | #define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) |
306 | #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) | |
5b24a7a2 LT |
307 | #endif |
308 | ||
c22ce143 | 309 | #endif /* __LINUX_UACCESS_H__ */ |