]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - arch/sparc/include/asm/uaccess_64.h
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[thirdparty/kernel/linux.git] / arch / sparc / include / asm / uaccess_64.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
f5e706ad
SR
2#ifndef _ASM_UACCESS_H
3#define _ASM_UACCESS_H
4
5/*
6 * User space memory access functions
7 */
8
f5e706ad 9#include <linux/compiler.h>
f5e706ad
SR
10#include <linux/string.h>
11#include <asm/asi.h>
f5e706ad 12#include <asm/spitfire.h>
c99d2abd 13#include <asm/extable_64.h>
f5e706ad 14
2c66f623
DM
15#include <asm/processor.h>
16
f5e706ad
SR
17/*
18 * Sparc64 is segmented, though more like the M68K than the I386.
19 * We use the secondary ASI to address user memory, which references a
20 * completely different VM map, thus there is zero chance of the user
21 * doing something queer and tricking us into poking kernel memory.
22 *
23 * What is left here is basically what is needed for the other parts of
24 * the kernel that expect to be able to manipulate, erum, "segments".
25 * Or perhaps more properly, permissions.
26 *
27 * "For historical reasons, these macros are grossly misnamed." -Linus
28 */
29
30#define KERNEL_DS ((mm_segment_t) { ASI_P })
31#define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */
32
dff933da 33#define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)})
f5e706ad
SR
34#define get_ds() (KERNEL_DS)
35
7185820a 36#define segment_eq(a, b) ((a).seg == (b).seg)
f5e706ad
SR
37
38#define set_fs(val) \
39do { \
7185820a 40 current_thread_info()->current_ds = (val).seg; \
f5e706ad
SR
41 __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \
42} while(0)
43
b69fb769
DA
44/*
45 * Test whether a block of memory is a valid user space address.
46 * Returns 0 if the range is valid, nonzero otherwise.
47 */
48static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
49{
50 if (__builtin_constant_p(size))
51 return addr > limit - size;
52
53 addr += size;
54 if (addr < size)
55 return true;
56
57 return addr > limit;
58}
59
60#define __range_not_ok(addr, size, limit) \
61({ \
62 __chk_user_ptr(addr); \
63 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
64})
65
f5e706ad
SR
66static inline int __access_ok(const void __user * addr, unsigned long size)
67{
68 return 1;
69}
70
71static inline int access_ok(int type, const void __user * addr, unsigned long size)
72{
73 return 1;
74}
75
f05a6865 76void __retl_efault(void);
f5e706ad
SR
77
78/* Uh, these should become the main single-value transfer routines..
79 * They automatically use the right size if we just have the right
80 * pointer type..
81 *
82 * This gets kind of ugly. We want to return _two_ values in "get_user()"
83 * and yet we don't want to do any pointers, because that is too much
84 * of a performance impact. Thus we have a few rather ugly macros here,
85 * and hide all the ugliness from the user.
86 */
7185820a
MT
87#define put_user(x, ptr) ({ \
88 unsigned long __pu_addr = (unsigned long)(ptr); \
89 __chk_user_ptr(ptr); \
90 __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\
91})
f5e706ad 92
7185820a
MT
93#define get_user(x, ptr) ({ \
94 unsigned long __gu_addr = (unsigned long)(ptr); \
95 __chk_user_ptr(ptr); \
96 __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\
97})
f5e706ad 98
7185820a
MT
99#define __put_user(x, ptr) put_user(x, ptr)
100#define __get_user(x, ptr) get_user(x, ptr)
f5e706ad
SR
101
102struct __large_struct { unsigned long buf[100]; };
103#define __m(x) ((struct __large_struct *)(x))
104
4b636ba2
MT
105#define __put_user_nocheck(data, addr, size) ({ \
106 register int __pu_ret; \
107 switch (size) { \
108 case 1: __put_user_asm(data, b, addr, __pu_ret); break; \
109 case 2: __put_user_asm(data, h, addr, __pu_ret); break; \
110 case 4: __put_user_asm(data, w, addr, __pu_ret); break; \
111 case 8: __put_user_asm(data, x, addr, __pu_ret); break; \
112 default: __pu_ret = __put_user_bad(); break; \
113 } \
114 __pu_ret; \
7185820a
MT
115})
116
117#define __put_user_asm(x, size, addr, ret) \
f5e706ad 118__asm__ __volatile__( \
7185820a
MT
119 "/* Put user asm, inline. */\n" \
120 "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \
121 "clr %0\n" \
122 "2:\n\n\t" \
123 ".section .fixup,#alloc,#execinstr\n\t" \
124 ".align 4\n" \
125 "3:\n\t" \
126 "sethi %%hi(2b), %0\n\t" \
127 "jmpl %0 + %%lo(2b), %%g0\n\t" \
128 " mov %3, %0\n\n\t" \
129 ".previous\n\t" \
130 ".section __ex_table,\"a\"\n\t" \
131 ".align 4\n\t" \
132 ".word 1b, 3b\n\t" \
133 ".previous\n\n\t" \
134 : "=r" (ret) : "r" (x), "r" (__m(addr)), \
135 "i" (-EFAULT))
f5e706ad 136
f05a6865 137int __put_user_bad(void);
f5e706ad 138
4b636ba2
MT
139#define __get_user_nocheck(data, addr, size, type) ({ \
140 register int __gu_ret; \
141 register unsigned long __gu_val; \
142 switch (size) { \
143 case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \
144 case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \
145 case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \
146 case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \
147 default: \
148 __gu_val = 0; \
149 __gu_ret = __get_user_bad(); \
150 break; \
151 } \
152 data = (__force type) __gu_val; \
153 __gu_ret; \
7185820a
MT
154})
155
7185820a 156#define __get_user_asm(x, size, addr, ret) \
f5e706ad 157__asm__ __volatile__( \
7185820a
MT
158 "/* Get user asm, inline. */\n" \
159 "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \
160 "clr %0\n" \
161 "2:\n\n\t" \
162 ".section .fixup,#alloc,#execinstr\n\t" \
163 ".align 4\n" \
164 "3:\n\t" \
165 "sethi %%hi(2b), %0\n\t" \
166 "clr %1\n\t" \
167 "jmpl %0 + %%lo(2b), %%g0\n\t" \
168 " mov %3, %0\n\n\t" \
169 ".previous\n\t" \
170 ".section __ex_table,\"a\"\n\t" \
171 ".align 4\n\t" \
172 ".word 1b, 3b\n\n\t" \
173 ".previous\n\t" \
174 : "=r" (ret), "=r" (x) : "r" (__m(addr)), \
175 "i" (-EFAULT))
176
f05a6865 177int __get_user_bad(void);
f5e706ad 178
31af2f36 179unsigned long __must_check raw_copy_from_user(void *to,
f05a6865
SR
180 const void __user *from,
181 unsigned long size);
f5e706ad 182
31af2f36 183unsigned long __must_check raw_copy_to_user(void __user *to,
f05a6865
SR
184 const void *from,
185 unsigned long size);
31af2f36
AV
186#define INLINE_COPY_FROM_USER
187#define INLINE_COPY_TO_USER
81409e9e 188
31af2f36 189unsigned long __must_check raw_copy_in_user(void __user *to,
f05a6865
SR
190 const void __user *from,
191 unsigned long size);
f5e706ad 192
f05a6865 193unsigned long __must_check __clear_user(void __user *, unsigned long);
f5e706ad
SR
194
195#define clear_user __clear_user
196
f05a6865 197__must_check long strnlen_user(const char __user *str, long n);
f5e706ad 198
f88620b9 199struct pt_regs;
f05a6865
SR
200unsigned long compute_effective_address(struct pt_regs *,
201 unsigned int insn,
202 unsigned int rd);
f88620b9 203
f5e706ad 204#endif /* _ASM_UACCESS_H */