]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - arch/parisc/include/asm/uaccess.h
ea70e36ce6af52e5fe4b244ee61ce0769d7b6e93
[thirdparty/kernel/linux.git] / arch / parisc / include / asm / uaccess.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __PARISC_UACCESS_H
3 #define __PARISC_UACCESS_H
4
5 /*
6 * User space memory access functions
7 */
8 #include <asm/page.h>
9 #include <asm/cache.h>
10
11 #include <linux/bug.h>
12 #include <linux/string.h>
13
14 #define KERNEL_DS ((mm_segment_t){0})
15 #define USER_DS ((mm_segment_t){1})
16
17 #define segment_eq(a, b) ((a).seg == (b).seg)
18
19 #define get_ds() (KERNEL_DS)
20 #define get_fs() (current_thread_info()->addr_limit)
21 #define set_fs(x) (current_thread_info()->addr_limit = (x))
22
23 /*
24 * Note that since kernel addresses are in a separate address space on
25 * parisc, we don't need to do anything for access_ok().
26 * We just let the page fault handler do the right thing. This also means
27 * that put_user is the same as __put_user, etc.
28 */
29
30 #define access_ok(type, uaddr, size) \
31 ( (uaddr) == (uaddr) )
32
33 #define put_user __put_user
34 #define get_user __get_user
35
36 #if !defined(CONFIG_64BIT)
37 #define LDD_USER(val, ptr) __get_user_asm64(val, ptr)
38 #define STD_USER(x, ptr) __put_user_asm64(x, ptr)
39 #else
40 #define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr)
41 #define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
42 #endif
43
44 /*
45 * The exception table contains two values: the first is the relative offset to
46 * the address of the instruction that is allowed to fault, and the second is
47 * the relative offset to the address of the fixup routine. Since relative
48 * addresses are used, 32bit values are sufficient even on 64bit kernel.
49 */
50
51 #define ARCH_HAS_RELATIVE_EXTABLE
52 struct exception_table_entry {
53 int insn; /* relative address of insn that is allowed to fault. */
54 int fixup; /* relative address of fixup routine */
55 };
56
57 #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
58 ".section __ex_table,\"aw\"\n" \
59 ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
60 ".previous\n"
61
62 /*
63 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
64 * (with lowest bit set) for which the fault handler in fixup_exception() will
65 * load -EFAULT into %r8 for a read or write fault, and zeroes the target
66 * register in case of a read fault in get_user().
67 */
68 #define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
69 ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
70
71 /*
72 * load_sr2() preloads the space register %%sr2 - based on the value of
73 * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
74 * is 0), or with the current value of %%sr3 to access user space (USER_DS)
75 * memory. The following __get_user_asm() and __put_user_asm() functions have
76 * %%sr2 hard-coded to access the requested memory.
77 */
78 #define load_sr2() \
79 __asm__(" or,= %0,%%r0,%%r0\n\t" \
80 " mfsp %%sr3,%0\n\t" \
81 " mtsp %0,%%sr2\n\t" \
82 : : "r"(get_fs()) : )
83
84 #define __get_user_internal(val, ptr) \
85 ({ \
86 register long __gu_err __asm__ ("r8") = 0; \
87 \
88 switch (sizeof(*(ptr))) { \
89 case 1: __get_user_asm(val, "ldb", ptr); break; \
90 case 2: __get_user_asm(val, "ldh", ptr); break; \
91 case 4: __get_user_asm(val, "ldw", ptr); break; \
92 case 8: LDD_USER(val, ptr); break; \
93 default: BUILD_BUG(); \
94 } \
95 \
96 __gu_err; \
97 })
98
99 #define __get_user(val, ptr) \
100 ({ \
101 load_sr2(); \
102 __get_user_internal(val, ptr); \
103 })
104
105 #define __get_user_asm(val, ldx, ptr) \
106 { \
107 register long __gu_val; \
108 \
109 __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
110 "9:\n" \
111 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
112 : "=r"(__gu_val), "=r"(__gu_err) \
113 : "r"(ptr), "1"(__gu_err)); \
114 \
115 (val) = (__force __typeof__(*(ptr))) __gu_val; \
116 }
117
118 #if !defined(CONFIG_64BIT)
119
120 #define __get_user_asm64(val, ptr) \
121 { \
122 union { \
123 unsigned long long l; \
124 __typeof__(*(ptr)) t; \
125 } __gu_tmp; \
126 \
127 __asm__(" copy %%r0,%R0\n" \
128 "1: ldw 0(%%sr2,%2),%0\n" \
129 "2: ldw 4(%%sr2,%2),%R0\n" \
130 "9:\n" \
131 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
132 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
133 : "=&r"(__gu_tmp.l), "=r"(__gu_err) \
134 : "r"(ptr), "1"(__gu_err)); \
135 \
136 (val) = __gu_tmp.t; \
137 }
138
139 #endif /* !defined(CONFIG_64BIT) */
140
141
142 #define __put_user_internal(x, ptr) \
143 ({ \
144 register long __pu_err __asm__ ("r8") = 0; \
145 __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
146 \
147 switch (sizeof(*(ptr))) { \
148 case 1: __put_user_asm("stb", __x, ptr); break; \
149 case 2: __put_user_asm("sth", __x, ptr); break; \
150 case 4: __put_user_asm("stw", __x, ptr); break; \
151 case 8: STD_USER(__x, ptr); break; \
152 default: BUILD_BUG(); \
153 } \
154 \
155 __pu_err; \
156 })
157
158 #define __put_user(x, ptr) \
159 ({ \
160 load_sr2(); \
161 __put_user_internal(x, ptr); \
162 })
163
164
165 /*
166 * The "__put_user/kernel_asm()" macros tell gcc they read from memory
167 * instead of writing. This is because they do not write to any memory
168 * gcc knows about, so there are no aliasing issues. These macros must
169 * also be aware that fixups are executed in the context of the fault,
170 * and any registers used there must be listed as clobbers.
171 * r8 is already listed as err.
172 */
173
174 #define __put_user_asm(stx, x, ptr) \
175 __asm__ __volatile__ ( \
176 "1: " stx " %2,0(%%sr2,%1)\n" \
177 "9:\n" \
178 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
179 : "=r"(__pu_err) \
180 : "r"(ptr), "r"(x), "0"(__pu_err))
181
182
183 #if !defined(CONFIG_64BIT)
184
185 #define __put_user_asm64(__val, ptr) do { \
186 __asm__ __volatile__ ( \
187 "1: stw %2,0(%%sr2,%1)\n" \
188 "2: stw %R2,4(%%sr2,%1)\n" \
189 "9:\n" \
190 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
191 ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
192 : "=r"(__pu_err) \
193 : "r"(ptr), "r"(__val), "0"(__pu_err)); \
194 } while (0)
195
196 #endif /* !defined(CONFIG_64BIT) */
197
198
199 /*
200 * Complex access routines -- external declarations
201 */
202
203 extern long strncpy_from_user(char *, const char __user *, long);
204 extern unsigned lclear_user(void __user *, unsigned long);
205 extern long lstrnlen_user(const char __user *, long);
206 /*
207 * Complex access routines -- macros
208 */
209 #define user_addr_max() (~0UL)
210
211 #define strnlen_user lstrnlen_user
212 #define clear_user lclear_user
213 #define __clear_user lclear_user
214
215 unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src,
216 unsigned long len);
217 unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src,
218 unsigned long len);
219 unsigned long __must_check raw_copy_in_user(void __user *dst, const void __user *src,
220 unsigned long len);
221 #define INLINE_COPY_TO_USER
222 #define INLINE_COPY_FROM_USER
223
224 struct pt_regs;
225 int fixup_exception(struct pt_regs *regs);
226
227 #endif /* __PARISC_UACCESS_H */