]>
git.ipfire.org Git - thirdparty/linux.git/blob - arch/powerpc/include/asm/uaccess.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ARCH_POWERPC_UACCESS_H
3 #define _ARCH_POWERPC_UACCESS_H
5 #include <asm/ppc_asm.h>
6 #include <asm/processor.h>
8 #include <asm/extable.h>
12 * The fs value determines whether argument validity checking should be
13 * performed or not. If get_fs() == USER_DS, checking is performed, with
14 * get_fs() == KERNEL_DS, checking is bypassed.
16 * For historical reasons, these macros are grossly misnamed.
18 * The fs/ds values are now the highest legal address in the "segment".
19 * This simplifies the checking in the routines below.
22 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
24 #define KERNEL_DS MAKE_MM_SEG(~0UL)
26 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
27 #define USER_DS MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
29 #define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
32 #define get_fs() (current->thread.addr_limit)
34 static inline void set_fs(mm_segment_t fs
)
36 current
->thread
.addr_limit
= fs
;
37 /* On user-mode return check addr_limit (fs) is correct */
38 set_thread_flag(TIF_FSCHECK
);
41 #define segment_eq(a, b) ((a).seg == (b).seg)
43 #define user_addr_max() (get_fs().seg)
47 * This check is sufficient because there is a large enough
48 * gap between user addresses and the kernel addresses
50 #define __access_ok(addr, size, segment) \
51 (((addr) <= (segment).seg) && ((size) <= (segment).seg))
55 static inline int __access_ok(unsigned long addr
, unsigned long size
,
60 return (size
== 0 || size
- 1 <= seg
.seg
- addr
);
65 #define access_ok(addr, size) \
66 (__chk_user_ptr(addr), \
67 __access_ok((__force unsigned long)(addr), (size), get_fs()))
70 * These are the main single-value transfer routines. They automatically
71 * use the right size if we just have the right pointer type.
73 * This gets kind of ugly. We want to return _two_ values in "get_user()"
74 * and yet we don't want to do any pointers, because that is too much
75 * of a performance impact. Thus we have a few rather ugly macros here,
76 * and hide all the ugliness from the user.
78 * The "__xxx" versions of the user access functions are versions that
79 * do not verify the address space, that must have been done previously
80 * with a separate "access_ok()" call (this is used when we do multiple
81 * accesses to the same area of user memory).
83 * As we use the same address space for kernel and user data on the
84 * PowerPC, we can just do these as direct assignments. (Of course, the
85 * exception handling means that it's no longer "just"...)
88 #define get_user(x, ptr) \
89 __get_user_check((x), (ptr), sizeof(*(ptr)))
90 #define put_user(x, ptr) \
91 __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
93 #define __get_user(x, ptr) \
94 __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
95 #define __put_user(x, ptr) \
96 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true)
98 #define __get_user_allowed(x, ptr) \
99 __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
100 #define __put_user_allowed(x, ptr) \
101 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false)
103 #define __get_user_inatomic(x, ptr) \
104 __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
105 #define __put_user_inatomic(x, ptr) \
106 __put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
108 extern long __put_user_bad(void);
111 * We don't tell gcc that we are accessing memory, but this is OK
112 * because we do not write to any memory gcc knows about, so there
113 * are no aliasing issues.
115 #define __put_user_asm(x, addr, err, op) \
116 __asm__ __volatile__( \
117 "1: " op " %1,0(%2) # put_user\n" \
119 ".section .fixup,\"ax\"\n" \
125 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
128 #define __put_user_asm2(x, ptr, retval) \
129 __put_user_asm(x, ptr, retval, "std")
130 #else /* __powerpc64__ */
131 #define __put_user_asm2(x, addr, err) \
132 __asm__ __volatile__( \
133 "1: stw %1,0(%2)\n" \
134 "2: stw %1+1,4(%2)\n" \
136 ".section .fixup,\"ax\"\n" \
143 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
144 #endif /* __powerpc64__ */
146 #define __put_user_size_allowed(x, ptr, size, retval) \
150 case 1: __put_user_asm(x, ptr, retval, "stb"); break; \
151 case 2: __put_user_asm(x, ptr, retval, "sth"); break; \
152 case 4: __put_user_asm(x, ptr, retval, "stw"); break; \
153 case 8: __put_user_asm2(x, ptr, retval); break; \
154 default: __put_user_bad(); \
158 #define __put_user_size(x, ptr, size, retval) \
160 allow_write_to_user(ptr, size); \
161 __put_user_size_allowed(x, ptr, size, retval); \
162 prevent_write_to_user(ptr, size); \
165 #define __put_user_nocheck(x, ptr, size, do_allow) \
168 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
169 if (!is_kernel_addr((unsigned long)__pu_addr)) \
171 __chk_user_ptr(ptr); \
173 __put_user_size((x), __pu_addr, (size), __pu_err); \
175 __put_user_size_allowed((x), __pu_addr, (size), __pu_err); \
179 #define __put_user_check(x, ptr, size) \
181 long __pu_err = -EFAULT; \
182 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
184 if (access_ok(__pu_addr, size)) \
185 __put_user_size((x), __pu_addr, (size), __pu_err); \
189 #define __put_user_nosleep(x, ptr, size) \
192 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
193 __chk_user_ptr(ptr); \
194 __put_user_size((x), __pu_addr, (size), __pu_err); \
199 extern long __get_user_bad(void);
202 * This does an atomic 128 byte aligned load from userspace.
203 * Upto caller to do enable_kernel_vmx() before calling!
205 #define __get_user_atomic_128_aligned(kaddr, uaddr, err) \
206 __asm__ __volatile__( \
207 "1: lvx 0,0,%1 # get user\n" \
208 " stvx 0,0,%2 # put kernel\n" \
210 ".section .fixup,\"ax\"\n" \
216 : "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
218 #define __get_user_asm(x, addr, err, op) \
219 __asm__ __volatile__( \
220 "1: "op" %1,0(%2) # get_user\n" \
222 ".section .fixup,\"ax\"\n" \
228 : "=r" (err), "=r" (x) \
229 : "b" (addr), "i" (-EFAULT), "0" (err))
232 #define __get_user_asm2(x, addr, err) \
233 __get_user_asm(x, addr, err, "ld")
234 #else /* __powerpc64__ */
235 #define __get_user_asm2(x, addr, err) \
236 __asm__ __volatile__( \
237 "1: lwz %1,0(%2)\n" \
238 "2: lwz %1+1,4(%2)\n" \
240 ".section .fixup,\"ax\"\n" \
248 : "=r" (err), "=&r" (x) \
249 : "b" (addr), "i" (-EFAULT), "0" (err))
250 #endif /* __powerpc64__ */
252 #define __get_user_size_allowed(x, ptr, size, retval) \
255 __chk_user_ptr(ptr); \
256 if (size > sizeof(x)) \
257 (x) = __get_user_bad(); \
259 case 1: __get_user_asm(x, ptr, retval, "lbz"); break; \
260 case 2: __get_user_asm(x, ptr, retval, "lhz"); break; \
261 case 4: __get_user_asm(x, ptr, retval, "lwz"); break; \
262 case 8: __get_user_asm2(x, ptr, retval); break; \
263 default: (x) = __get_user_bad(); \
267 #define __get_user_size(x, ptr, size, retval) \
269 allow_read_from_user(ptr, size); \
270 __get_user_size_allowed(x, ptr, size, retval); \
271 prevent_read_from_user(ptr, size); \
275 * This is a type: either unsigned long, if the argument fits into
276 * that type, or otherwise unsigned long long.
278 #define __long_type(x) \
279 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
281 #define __get_user_nocheck(x, ptr, size, do_allow) \
284 __long_type(*(ptr)) __gu_val; \
285 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
286 __chk_user_ptr(ptr); \
287 if (!is_kernel_addr((unsigned long)__gu_addr)) \
291 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
293 __get_user_size_allowed(__gu_val, __gu_addr, (size), __gu_err); \
294 (x) = (__typeof__(*(ptr)))__gu_val; \
298 #define __get_user_check(x, ptr, size) \
300 long __gu_err = -EFAULT; \
301 __long_type(*(ptr)) __gu_val = 0; \
302 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
304 if (access_ok(__gu_addr, (size))) { \
306 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
308 (x) = (__force __typeof__(*(ptr)))__gu_val; \
312 #define __get_user_nosleep(x, ptr, size) \
315 __long_type(*(ptr)) __gu_val; \
316 __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
317 __chk_user_ptr(ptr); \
319 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
320 (x) = (__force __typeof__(*(ptr)))__gu_val; \
325 /* more complex routines */
327 extern unsigned long __copy_tofrom_user(void __user
*to
,
328 const void __user
*from
, unsigned long size
);
331 static inline unsigned long
332 raw_copy_in_user(void __user
*to
, const void __user
*from
, unsigned long n
)
337 allow_read_write_user(to
, from
, n
);
338 ret
= __copy_tofrom_user(to
, from
, n
);
339 prevent_read_write_user(to
, from
, n
);
342 #endif /* __powerpc64__ */
344 static inline unsigned long raw_copy_from_user(void *to
,
345 const void __user
*from
, unsigned long n
)
348 if (__builtin_constant_p(n
) && (n
<= 8)) {
354 __get_user_size(*(u8
*)to
, from
, 1, ret
);
358 __get_user_size(*(u16
*)to
, from
, 2, ret
);
362 __get_user_size(*(u32
*)to
, from
, 4, ret
);
366 __get_user_size(*(u64
*)to
, from
, 8, ret
);
374 allow_read_from_user(from
, n
);
375 ret
= __copy_tofrom_user((__force
void __user
*)to
, from
, n
);
376 prevent_read_from_user(from
, n
);
380 static inline unsigned long
381 raw_copy_to_user_allowed(void __user
*to
, const void *from
, unsigned long n
)
383 if (__builtin_constant_p(n
) && (n
<= 8)) {
384 unsigned long ret
= 1;
388 __put_user_size_allowed(*(u8
*)from
, (u8 __user
*)to
, 1, ret
);
391 __put_user_size_allowed(*(u16
*)from
, (u16 __user
*)to
, 2, ret
);
394 __put_user_size_allowed(*(u32
*)from
, (u32 __user
*)to
, 4, ret
);
397 __put_user_size_allowed(*(u64
*)from
, (u64 __user
*)to
, 8, ret
);
404 return __copy_tofrom_user(to
, (__force
const void __user
*)from
, n
);
407 static inline unsigned long
408 raw_copy_to_user(void __user
*to
, const void *from
, unsigned long n
)
412 allow_write_to_user(to
, n
);
413 ret
= raw_copy_to_user_allowed(to
, from
, n
);
414 prevent_write_to_user(to
, n
);
418 static __always_inline
unsigned long __must_check
419 copy_to_user_mcsafe(void __user
*to
, const void *from
, unsigned long n
)
421 if (likely(check_copy_size(from
, n
, true))) {
422 if (access_ok(to
, n
)) {
423 allow_write_to_user(to
, n
);
424 n
= memcpy_mcsafe((void *)to
, from
, n
);
425 prevent_write_to_user(to
, n
);
432 unsigned long __arch_clear_user(void __user
*addr
, unsigned long size
);
434 static inline unsigned long clear_user(void __user
*addr
, unsigned long size
)
436 unsigned long ret
= size
;
438 if (likely(access_ok(addr
, size
))) {
439 allow_write_to_user(addr
, size
);
440 ret
= __arch_clear_user(addr
, size
);
441 prevent_write_to_user(addr
, size
);
446 static inline unsigned long __clear_user(void __user
*addr
, unsigned long size
)
448 return clear_user(addr
, size
);
451 extern long strncpy_from_user(char *dst
, const char __user
*src
, long count
);
452 extern __must_check
long strnlen_user(const char __user
*str
, long n
);
454 extern long __copy_from_user_flushcache(void *dst
, const void __user
*src
,
456 extern void memcpy_page_flushcache(char *to
, struct page
*page
, size_t offset
,
459 static __must_check
inline bool user_access_begin(const void __user
*ptr
, size_t len
)
461 if (unlikely(!access_ok(ptr
, len
)))
463 allow_read_write_user((void __user
*)ptr
, ptr
, len
);
466 #define user_access_begin user_access_begin
467 #define user_access_end prevent_current_access_user
468 #define user_access_save prevent_user_access_return
469 #define user_access_restore restore_user_access
471 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
472 #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
473 #define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e)
474 #define unsafe_copy_to_user(d, s, l, e) \
475 unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e)
477 #endif /* _ARCH_POWERPC_UACCESS_H */