]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - arch/m68k/include/asm/uaccess_mm.h
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[thirdparty/kernel/linux.git] / arch / m68k / include / asm / uaccess_mm.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __M68K_UACCESS_H
3 #define __M68K_UACCESS_H
4
5 /*
6 * User space memory access functions
7 */
8 #include <linux/compiler.h>
9 #include <linux/types.h>
10 #include <asm/segment.h>
11
12 /* We let the MMU do all checking */
13 static inline int access_ok(int type, const void __user *addr,
14 unsigned long size)
15 {
16 return 1;
17 }
18
19 /*
20 * Not all varients of the 68k family support the notion of address spaces.
21 * The traditional 680x0 parts do, and they use the sfc/dfc registers and
22 * the "moves" instruction to access user space from kernel space. Other
23 * family members like ColdFire don't support this, and only have a single
24 * address space, and use the usual "move" instruction for user space access.
25 *
26 * Outside of this difference the user space access functions are the same.
27 * So lets keep the code simple and just define in what we need to use.
28 */
29 #ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
30 #define MOVES "moves"
31 #else
32 #define MOVES "move"
33 #endif
34
35 extern int __put_user_bad(void);
36 extern int __get_user_bad(void);
37
38 #define __put_user_asm(res, x, ptr, bwl, reg, err) \
39 asm volatile ("\n" \
40 "1: "MOVES"."#bwl" %2,%1\n" \
41 "2:\n" \
42 " .section .fixup,\"ax\"\n" \
43 " .even\n" \
44 "10: moveq.l %3,%0\n" \
45 " jra 2b\n" \
46 " .previous\n" \
47 "\n" \
48 " .section __ex_table,\"a\"\n" \
49 " .align 4\n" \
50 " .long 1b,10b\n" \
51 " .long 2b,10b\n" \
52 " .previous" \
53 : "+d" (res), "=m" (*(ptr)) \
54 : #reg (x), "i" (err))
55
56 /*
57 * These are the main single-value transfer routines. They automatically
58 * use the right size if we just have the right pointer type.
59 */
60
61 #define __put_user(x, ptr) \
62 ({ \
63 typeof(*(ptr)) __pu_val = (x); \
64 int __pu_err = 0; \
65 __chk_user_ptr(ptr); \
66 switch (sizeof (*(ptr))) { \
67 case 1: \
68 __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
69 break; \
70 case 2: \
71 __put_user_asm(__pu_err, __pu_val, ptr, w, r, -EFAULT); \
72 break; \
73 case 4: \
74 __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
75 break; \
76 case 8: \
77 { \
78 const void __user *__pu_ptr = (ptr); \
79 asm volatile ("\n" \
80 "1: "MOVES".l %2,(%1)+\n" \
81 "2: "MOVES".l %R2,(%1)\n" \
82 "3:\n" \
83 " .section .fixup,\"ax\"\n" \
84 " .even\n" \
85 "10: movel %3,%0\n" \
86 " jra 3b\n" \
87 " .previous\n" \
88 "\n" \
89 " .section __ex_table,\"a\"\n" \
90 " .align 4\n" \
91 " .long 1b,10b\n" \
92 " .long 2b,10b\n" \
93 " .long 3b,10b\n" \
94 " .previous" \
95 : "+d" (__pu_err), "+a" (__pu_ptr) \
96 : "r" (__pu_val), "i" (-EFAULT) \
97 : "memory"); \
98 break; \
99 } \
100 default: \
101 __pu_err = __put_user_bad(); \
102 break; \
103 } \
104 __pu_err; \
105 })
106 #define put_user(x, ptr) __put_user(x, ptr)
107
108
109 #define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
110 type __gu_val; \
111 asm volatile ("\n" \
112 "1: "MOVES"."#bwl" %2,%1\n" \
113 "2:\n" \
114 " .section .fixup,\"ax\"\n" \
115 " .even\n" \
116 "10: move.l %3,%0\n" \
117 " sub.l %1,%1\n" \
118 " jra 2b\n" \
119 " .previous\n" \
120 "\n" \
121 " .section __ex_table,\"a\"\n" \
122 " .align 4\n" \
123 " .long 1b,10b\n" \
124 " .previous" \
125 : "+d" (res), "=&" #reg (__gu_val) \
126 : "m" (*(ptr)), "i" (err)); \
127 (x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val; \
128 })
129
130 #define __get_user(x, ptr) \
131 ({ \
132 int __gu_err = 0; \
133 __chk_user_ptr(ptr); \
134 switch (sizeof(*(ptr))) { \
135 case 1: \
136 __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \
137 break; \
138 case 2: \
139 __get_user_asm(__gu_err, x, ptr, u16, w, r, -EFAULT); \
140 break; \
141 case 4: \
142 __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \
143 break; \
144 /* case 8: disabled because gcc-4.1 has a broken typeof \
145 { \
146 const void *__gu_ptr = (ptr); \
147 u64 __gu_val; \
148 asm volatile ("\n" \
149 "1: "MOVES".l (%2)+,%1\n" \
150 "2: "MOVES".l (%2),%R1\n" \
151 "3:\n" \
152 " .section .fixup,\"ax\"\n" \
153 " .even\n" \
154 "10: move.l %3,%0\n" \
155 " sub.l %1,%1\n" \
156 " sub.l %R1,%R1\n" \
157 " jra 3b\n" \
158 " .previous\n" \
159 "\n" \
160 " .section __ex_table,\"a\"\n" \
161 " .align 4\n" \
162 " .long 1b,10b\n" \
163 " .long 2b,10b\n" \
164 " .previous" \
165 : "+d" (__gu_err), "=&r" (__gu_val), \
166 "+a" (__gu_ptr) \
167 : "i" (-EFAULT) \
168 : "memory"); \
169 (x) = (__force typeof(*(ptr)))__gu_val; \
170 break; \
171 } */ \
172 default: \
173 __gu_err = __get_user_bad(); \
174 break; \
175 } \
176 __gu_err; \
177 })
178 #define get_user(x, ptr) __get_user(x, ptr)
179
180 unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
181 unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
182
183 #define __suffix0
184 #define __suffix1 b
185 #define __suffix2 w
186 #define __suffix4 l
187
188 #define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
189 asm volatile ("\n" \
190 "1: "MOVES"."#s1" (%2)+,%3\n" \
191 " move."#s1" %3,(%1)+\n" \
192 " .ifnc \""#s2"\",\"\"\n" \
193 "2: "MOVES"."#s2" (%2)+,%3\n" \
194 " move."#s2" %3,(%1)+\n" \
195 " .ifnc \""#s3"\",\"\"\n" \
196 "3: "MOVES"."#s3" (%2)+,%3\n" \
197 " move."#s3" %3,(%1)+\n" \
198 " .endif\n" \
199 " .endif\n" \
200 "4:\n" \
201 " .section __ex_table,\"a\"\n" \
202 " .align 4\n" \
203 " .long 1b,10f\n" \
204 " .ifnc \""#s2"\",\"\"\n" \
205 " .long 2b,20f\n" \
206 " .ifnc \""#s3"\",\"\"\n" \
207 " .long 3b,30f\n" \
208 " .endif\n" \
209 " .endif\n" \
210 " .previous\n" \
211 "\n" \
212 " .section .fixup,\"ax\"\n" \
213 " .even\n" \
214 "10: addq.l #"#n1",%0\n" \
215 " .ifnc \""#s2"\",\"\"\n" \
216 "20: addq.l #"#n2",%0\n" \
217 " .ifnc \""#s3"\",\"\"\n" \
218 "30: addq.l #"#n3",%0\n" \
219 " .endif\n" \
220 " .endif\n" \
221 " jra 4b\n" \
222 " .previous\n" \
223 : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \
224 : : "memory")
225
226 #define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
227 ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)
228 #define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3) \
229 ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, \
230 __suffix##n1, __suffix##n2, __suffix##n3)
231
232 static __always_inline unsigned long
233 __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
234 {
235 unsigned long res = 0, tmp;
236
237 switch (n) {
238 case 1:
239 __constant_copy_from_user_asm(res, to, from, tmp, 1, 0, 0);
240 break;
241 case 2:
242 __constant_copy_from_user_asm(res, to, from, tmp, 2, 0, 0);
243 break;
244 case 3:
245 __constant_copy_from_user_asm(res, to, from, tmp, 2, 1, 0);
246 break;
247 case 4:
248 __constant_copy_from_user_asm(res, to, from, tmp, 4, 0, 0);
249 break;
250 case 5:
251 __constant_copy_from_user_asm(res, to, from, tmp, 4, 1, 0);
252 break;
253 case 6:
254 __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 0);
255 break;
256 case 7:
257 __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 1);
258 break;
259 case 8:
260 __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 0);
261 break;
262 case 9:
263 __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 1);
264 break;
265 case 10:
266 __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 2);
267 break;
268 case 12:
269 __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 4);
270 break;
271 default:
272 /* we limit the inlined version to 3 moves */
273 return __generic_copy_from_user(to, from, n);
274 }
275
276 return res;
277 }
278
279 #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
280 asm volatile ("\n" \
281 " move."#s1" (%2)+,%3\n" \
282 "11: "MOVES"."#s1" %3,(%1)+\n" \
283 "12: move."#s2" (%2)+,%3\n" \
284 "21: "MOVES"."#s2" %3,(%1)+\n" \
285 "22:\n" \
286 " .ifnc \""#s3"\",\"\"\n" \
287 " move."#s3" (%2)+,%3\n" \
288 "31: "MOVES"."#s3" %3,(%1)+\n" \
289 "32:\n" \
290 " .endif\n" \
291 "4:\n" \
292 "\n" \
293 " .section __ex_table,\"a\"\n" \
294 " .align 4\n" \
295 " .long 11b,5f\n" \
296 " .long 12b,5f\n" \
297 " .long 21b,5f\n" \
298 " .long 22b,5f\n" \
299 " .ifnc \""#s3"\",\"\"\n" \
300 " .long 31b,5f\n" \
301 " .long 32b,5f\n" \
302 " .endif\n" \
303 " .previous\n" \
304 "\n" \
305 " .section .fixup,\"ax\"\n" \
306 " .even\n" \
307 "5: moveq.l #"#n",%0\n" \
308 " jra 4b\n" \
309 " .previous\n" \
310 : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \
311 : : "memory")
312
313 static __always_inline unsigned long
314 __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
315 {
316 unsigned long res = 0, tmp;
317
318 switch (n) {
319 case 1:
320 __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
321 break;
322 case 2:
323 __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2);
324 break;
325 case 3:
326 __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
327 break;
328 case 4:
329 __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
330 break;
331 case 5:
332 __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
333 break;
334 case 6:
335 __constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,);
336 break;
337 case 7:
338 __constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b);
339 break;
340 case 8:
341 __constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,);
342 break;
343 case 9:
344 __constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b);
345 break;
346 case 10:
347 __constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w);
348 break;
349 case 12:
350 __constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l);
351 break;
352 default:
353 /* limit the inlined version to 3 moves */
354 return __generic_copy_to_user(to, from, n);
355 }
356
357 return res;
358 }
359
360 static inline unsigned long
361 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
362 {
363 if (__builtin_constant_p(n))
364 return __constant_copy_from_user(to, from, n);
365 return __generic_copy_from_user(to, from, n);
366 }
367
368 static inline unsigned long
369 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
370 {
371 if (__builtin_constant_p(n))
372 return __constant_copy_to_user(to, from, n);
373 return __generic_copy_to_user(to, from, n);
374 }
375 #define INLINE_COPY_FROM_USER
376 #define INLINE_COPY_TO_USER
377
378 #define user_addr_max() \
379 (uaccess_kernel() ? ~0UL : TASK_SIZE)
380
381 extern long strncpy_from_user(char *dst, const char __user *src, long count);
382 extern __must_check long strnlen_user(const char __user *str, long n);
383
384 unsigned long __clear_user(void __user *to, unsigned long n);
385
386 #define clear_user __clear_user
387
388 #endif /* _M68K_UACCESS_H */