]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - arch/loongarch/kernel/signal.c
Merge tag 'kvm-x86-mmu-6.7' of https://github.com/kvm-x86/linux into HEAD
[thirdparty/kernel/stable.git] / arch / loongarch / kernel / signal.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Author: Hanlu Li <lihanlu@loongson.cn>
4 * Huacai Chen <chenhuacai@loongson.cn>
5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6 *
7 * Derived from MIPS:
8 * Copyright (C) 1991, 1992 Linus Torvalds
9 * Copyright (C) 1994 - 2000 Ralf Baechle
10 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
11 * Copyright (C) 2014, Imagination Technologies Ltd.
12 */
13 #include <linux/audit.h>
14 #include <linux/cache.h>
15 #include <linux/context_tracking.h>
16 #include <linux/entry-common.h>
17 #include <linux/irqflags.h>
18 #include <linux/sched.h>
19 #include <linux/mm.h>
20 #include <linux/personality.h>
21 #include <linux/smp.h>
22 #include <linux/kernel.h>
23 #include <linux/signal.h>
24 #include <linux/errno.h>
25 #include <linux/wait.h>
26 #include <linux/ptrace.h>
27 #include <linux/unistd.h>
28 #include <linux/compiler.h>
29 #include <linux/syscalls.h>
30 #include <linux/uaccess.h>
31
32 #include <asm/asm.h>
33 #include <asm/cacheflush.h>
34 #include <asm/cpu-features.h>
35 #include <asm/fpu.h>
36 #include <asm/lbt.h>
37 #include <asm/ucontext.h>
38 #include <asm/vdso.h>
39
40 #ifdef DEBUG_SIG
41 # define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
42 #else
43 # define DEBUGP(fmt, args...)
44 #endif
45
46 /* Make sure we will not lose FPU ownership */
47 #define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); })
48 #define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
49 /* Make sure we will not lose LBT ownership */
50 #define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); })
51 #define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); })
52
53 /* Assembly functions to move context to/from the FPU */
54 extern asmlinkage int
55 _save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
56 extern asmlinkage int
57 _restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
58 extern asmlinkage int
59 _save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
60 extern asmlinkage int
61 _restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
62 extern asmlinkage int
63 _save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
64 extern asmlinkage int
65 _restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
66
67 #ifdef CONFIG_CPU_HAS_LBT
68 extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
69 extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
70 extern asmlinkage int _save_ftop_context(void __user *ftop);
71 extern asmlinkage int _restore_ftop_context(void __user *ftop);
72 #endif
73
74 struct rt_sigframe {
75 struct siginfo rs_info;
76 struct ucontext rs_uctx;
77 };
78
79 struct _ctx_layout {
80 struct sctx_info *addr;
81 unsigned int size;
82 };
83
84 struct extctx_layout {
85 unsigned long size;
86 unsigned int flags;
87 struct _ctx_layout fpu;
88 struct _ctx_layout lsx;
89 struct _ctx_layout lasx;
90 struct _ctx_layout lbt;
91 struct _ctx_layout end;
92 };
93
94 static void __user *get_ctx_through_ctxinfo(struct sctx_info *info)
95 {
96 return (void __user *)((char *)info + sizeof(struct sctx_info));
97 }
98
99 /*
100 * Thread saved context copy to/from a signal context presumed to be on the
101 * user stack, and therefore accessed with appropriate macros from uaccess.h.
102 */
103 static int copy_fpu_to_sigcontext(struct fpu_context __user *ctx)
104 {
105 int i;
106 int err = 0;
107 uint64_t __user *regs = (uint64_t *)&ctx->regs;
108 uint64_t __user *fcc = &ctx->fcc;
109 uint32_t __user *fcsr = &ctx->fcsr;
110
111 for (i = 0; i < NUM_FPU_REGS; i++) {
112 err |=
113 __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
114 &regs[i]);
115 }
116 err |= __put_user(current->thread.fpu.fcc, fcc);
117 err |= __put_user(current->thread.fpu.fcsr, fcsr);
118
119 return err;
120 }
121
122 static int copy_fpu_from_sigcontext(struct fpu_context __user *ctx)
123 {
124 int i;
125 int err = 0;
126 u64 fpr_val;
127 uint64_t __user *regs = (uint64_t *)&ctx->regs;
128 uint64_t __user *fcc = &ctx->fcc;
129 uint32_t __user *fcsr = &ctx->fcsr;
130
131 for (i = 0; i < NUM_FPU_REGS; i++) {
132 err |= __get_user(fpr_val, &regs[i]);
133 set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
134 }
135 err |= __get_user(current->thread.fpu.fcc, fcc);
136 err |= __get_user(current->thread.fpu.fcsr, fcsr);
137
138 return err;
139 }
140
141 static int copy_lsx_to_sigcontext(struct lsx_context __user *ctx)
142 {
143 int i;
144 int err = 0;
145 uint64_t __user *regs = (uint64_t *)&ctx->regs;
146 uint64_t __user *fcc = &ctx->fcc;
147 uint32_t __user *fcsr = &ctx->fcsr;
148
149 for (i = 0; i < NUM_FPU_REGS; i++) {
150 err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
151 &regs[2*i]);
152 err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
153 &regs[2*i+1]);
154 }
155 err |= __put_user(current->thread.fpu.fcc, fcc);
156 err |= __put_user(current->thread.fpu.fcsr, fcsr);
157
158 return err;
159 }
160
161 static int copy_lsx_from_sigcontext(struct lsx_context __user *ctx)
162 {
163 int i;
164 int err = 0;
165 u64 fpr_val;
166 uint64_t __user *regs = (uint64_t *)&ctx->regs;
167 uint64_t __user *fcc = &ctx->fcc;
168 uint32_t __user *fcsr = &ctx->fcsr;
169
170 for (i = 0; i < NUM_FPU_REGS; i++) {
171 err |= __get_user(fpr_val, &regs[2*i]);
172 set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
173 err |= __get_user(fpr_val, &regs[2*i+1]);
174 set_fpr64(&current->thread.fpu.fpr[i], 1, fpr_val);
175 }
176 err |= __get_user(current->thread.fpu.fcc, fcc);
177 err |= __get_user(current->thread.fpu.fcsr, fcsr);
178
179 return err;
180 }
181
182 static int copy_lasx_to_sigcontext(struct lasx_context __user *ctx)
183 {
184 int i;
185 int err = 0;
186 uint64_t __user *regs = (uint64_t *)&ctx->regs;
187 uint64_t __user *fcc = &ctx->fcc;
188 uint32_t __user *fcsr = &ctx->fcsr;
189
190 for (i = 0; i < NUM_FPU_REGS; i++) {
191 err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
192 &regs[4*i]);
193 err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
194 &regs[4*i+1]);
195 err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 2),
196 &regs[4*i+2]);
197 err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 3),
198 &regs[4*i+3]);
199 }
200 err |= __put_user(current->thread.fpu.fcc, fcc);
201 err |= __put_user(current->thread.fpu.fcsr, fcsr);
202
203 return err;
204 }
205
206 static int copy_lasx_from_sigcontext(struct lasx_context __user *ctx)
207 {
208 int i;
209 int err = 0;
210 u64 fpr_val;
211 uint64_t __user *regs = (uint64_t *)&ctx->regs;
212 uint64_t __user *fcc = &ctx->fcc;
213 uint32_t __user *fcsr = &ctx->fcsr;
214
215 for (i = 0; i < NUM_FPU_REGS; i++) {
216 err |= __get_user(fpr_val, &regs[4*i]);
217 set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
218 err |= __get_user(fpr_val, &regs[4*i+1]);
219 set_fpr64(&current->thread.fpu.fpr[i], 1, fpr_val);
220 err |= __get_user(fpr_val, &regs[4*i+2]);
221 set_fpr64(&current->thread.fpu.fpr[i], 2, fpr_val);
222 err |= __get_user(fpr_val, &regs[4*i+3]);
223 set_fpr64(&current->thread.fpu.fpr[i], 3, fpr_val);
224 }
225 err |= __get_user(current->thread.fpu.fcc, fcc);
226 err |= __get_user(current->thread.fpu.fcsr, fcsr);
227
228 return err;
229 }
230
231 #ifdef CONFIG_CPU_HAS_LBT
232 static int copy_lbt_to_sigcontext(struct lbt_context __user *ctx)
233 {
234 int err = 0;
235 uint64_t __user *regs = (uint64_t *)&ctx->regs;
236 uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
237
238 err |= __put_user(current->thread.lbt.scr0, &regs[0]);
239 err |= __put_user(current->thread.lbt.scr1, &regs[1]);
240 err |= __put_user(current->thread.lbt.scr2, &regs[2]);
241 err |= __put_user(current->thread.lbt.scr3, &regs[3]);
242 err |= __put_user(current->thread.lbt.eflags, eflags);
243
244 return err;
245 }
246
247 static int copy_lbt_from_sigcontext(struct lbt_context __user *ctx)
248 {
249 int err = 0;
250 uint64_t __user *regs = (uint64_t *)&ctx->regs;
251 uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
252
253 err |= __get_user(current->thread.lbt.scr0, &regs[0]);
254 err |= __get_user(current->thread.lbt.scr1, &regs[1]);
255 err |= __get_user(current->thread.lbt.scr2, &regs[2]);
256 err |= __get_user(current->thread.lbt.scr3, &regs[3]);
257 err |= __get_user(current->thread.lbt.eflags, eflags);
258
259 return err;
260 }
261
262 static int copy_ftop_to_sigcontext(struct lbt_context __user *ctx)
263 {
264 uint32_t __user *ftop = &ctx->ftop;
265
266 return __put_user(current->thread.fpu.ftop, ftop);
267 }
268
269 static int copy_ftop_from_sigcontext(struct lbt_context __user *ctx)
270 {
271 uint32_t __user *ftop = &ctx->ftop;
272
273 return __get_user(current->thread.fpu.ftop, ftop);
274 }
275 #endif
276
277 /*
278 * Wrappers for the assembly _{save,restore}_fp_context functions.
279 */
280 static int save_hw_fpu_context(struct fpu_context __user *ctx)
281 {
282 uint64_t __user *regs = (uint64_t *)&ctx->regs;
283 uint64_t __user *fcc = &ctx->fcc;
284 uint32_t __user *fcsr = &ctx->fcsr;
285
286 return _save_fp_context(regs, fcc, fcsr);
287 }
288
289 static int restore_hw_fpu_context(struct fpu_context __user *ctx)
290 {
291 uint64_t __user *regs = (uint64_t *)&ctx->regs;
292 uint64_t __user *fcc = &ctx->fcc;
293 uint32_t __user *fcsr = &ctx->fcsr;
294
295 return _restore_fp_context(regs, fcc, fcsr);
296 }
297
298 static int save_hw_lsx_context(struct lsx_context __user *ctx)
299 {
300 uint64_t __user *regs = (uint64_t *)&ctx->regs;
301 uint64_t __user *fcc = &ctx->fcc;
302 uint32_t __user *fcsr = &ctx->fcsr;
303
304 return _save_lsx_context(regs, fcc, fcsr);
305 }
306
307 static int restore_hw_lsx_context(struct lsx_context __user *ctx)
308 {
309 uint64_t __user *regs = (uint64_t *)&ctx->regs;
310 uint64_t __user *fcc = &ctx->fcc;
311 uint32_t __user *fcsr = &ctx->fcsr;
312
313 return _restore_lsx_context(regs, fcc, fcsr);
314 }
315
316 static int save_hw_lasx_context(struct lasx_context __user *ctx)
317 {
318 uint64_t __user *regs = (uint64_t *)&ctx->regs;
319 uint64_t __user *fcc = &ctx->fcc;
320 uint32_t __user *fcsr = &ctx->fcsr;
321
322 return _save_lasx_context(regs, fcc, fcsr);
323 }
324
325 static int restore_hw_lasx_context(struct lasx_context __user *ctx)
326 {
327 uint64_t __user *regs = (uint64_t *)&ctx->regs;
328 uint64_t __user *fcc = &ctx->fcc;
329 uint32_t __user *fcsr = &ctx->fcsr;
330
331 return _restore_lasx_context(regs, fcc, fcsr);
332 }
333
334 /*
335 * Wrappers for the assembly _{save,restore}_lbt_context functions.
336 */
337 #ifdef CONFIG_CPU_HAS_LBT
338 static int save_hw_lbt_context(struct lbt_context __user *ctx)
339 {
340 uint64_t __user *regs = (uint64_t *)&ctx->regs;
341 uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
342
343 return _save_lbt_context(regs, eflags);
344 }
345
346 static int restore_hw_lbt_context(struct lbt_context __user *ctx)
347 {
348 uint64_t __user *regs = (uint64_t *)&ctx->regs;
349 uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
350
351 return _restore_lbt_context(regs, eflags);
352 }
353
354 static int save_hw_ftop_context(struct lbt_context __user *ctx)
355 {
356 uint32_t __user *ftop = &ctx->ftop;
357
358 return _save_ftop_context(ftop);
359 }
360
361 static int restore_hw_ftop_context(struct lbt_context __user *ctx)
362 {
363 uint32_t __user *ftop = &ctx->ftop;
364
365 return _restore_ftop_context(ftop);
366 }
367 #endif
368
369 static int fcsr_pending(unsigned int __user *fcsr)
370 {
371 int err, sig = 0;
372 unsigned int csr, enabled;
373
374 err = __get_user(csr, fcsr);
375 enabled = ((csr & FPU_CSR_ALL_E) << 24);
376 /*
377 * If the signal handler set some FPU exceptions, clear it and
378 * send SIGFPE.
379 */
380 if (csr & enabled) {
381 csr &= ~enabled;
382 err |= __put_user(csr, fcsr);
383 sig = SIGFPE;
384 }
385 return err ?: sig;
386 }
387
388 /*
389 * Helper routines
390 */
391 static int protected_save_fpu_context(struct extctx_layout *extctx)
392 {
393 int err = 0;
394 struct sctx_info __user *info = extctx->fpu.addr;
395 struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
396 uint64_t __user *regs = (uint64_t *)&fpu_ctx->regs;
397 uint64_t __user *fcc = &fpu_ctx->fcc;
398 uint32_t __user *fcsr = &fpu_ctx->fcsr;
399
400 while (1) {
401 lock_fpu_owner();
402 if (is_fpu_owner())
403 err = save_hw_fpu_context(fpu_ctx);
404 else
405 err = copy_fpu_to_sigcontext(fpu_ctx);
406 unlock_fpu_owner();
407
408 err |= __put_user(FPU_CTX_MAGIC, &info->magic);
409 err |= __put_user(extctx->fpu.size, &info->size);
410
411 if (likely(!err))
412 break;
413 /* Touch the FPU context and try again */
414 err = __put_user(0, &regs[0]) |
415 __put_user(0, &regs[31]) |
416 __put_user(0, fcc) |
417 __put_user(0, fcsr);
418 if (err)
419 return err; /* really bad sigcontext */
420 }
421
422 return err;
423 }
424
425 static int protected_restore_fpu_context(struct extctx_layout *extctx)
426 {
427 int err = 0, sig = 0, tmp __maybe_unused;
428 struct sctx_info __user *info = extctx->fpu.addr;
429 struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
430 uint64_t __user *regs = (uint64_t *)&fpu_ctx->regs;
431 uint64_t __user *fcc = &fpu_ctx->fcc;
432 uint32_t __user *fcsr = &fpu_ctx->fcsr;
433
434 err = sig = fcsr_pending(fcsr);
435 if (err < 0)
436 return err;
437
438 while (1) {
439 lock_fpu_owner();
440 if (is_fpu_owner())
441 err = restore_hw_fpu_context(fpu_ctx);
442 else
443 err = copy_fpu_from_sigcontext(fpu_ctx);
444 unlock_fpu_owner();
445
446 if (likely(!err))
447 break;
448 /* Touch the FPU context and try again */
449 err = __get_user(tmp, &regs[0]) |
450 __get_user(tmp, &regs[31]) |
451 __get_user(tmp, fcc) |
452 __get_user(tmp, fcsr);
453 if (err)
454 break; /* really bad sigcontext */
455 }
456
457 return err ?: sig;
458 }
459
460 static int protected_save_lsx_context(struct extctx_layout *extctx)
461 {
462 int err = 0;
463 struct sctx_info __user *info = extctx->lsx.addr;
464 struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
465 uint64_t __user *regs = (uint64_t *)&lsx_ctx->regs;
466 uint64_t __user *fcc = &lsx_ctx->fcc;
467 uint32_t __user *fcsr = &lsx_ctx->fcsr;
468
469 while (1) {
470 lock_fpu_owner();
471 if (is_lsx_enabled())
472 err = save_hw_lsx_context(lsx_ctx);
473 else {
474 if (is_fpu_owner())
475 save_fp(current);
476 err = copy_lsx_to_sigcontext(lsx_ctx);
477 }
478 unlock_fpu_owner();
479
480 err |= __put_user(LSX_CTX_MAGIC, &info->magic);
481 err |= __put_user(extctx->lsx.size, &info->size);
482
483 if (likely(!err))
484 break;
485 /* Touch the LSX context and try again */
486 err = __put_user(0, &regs[0]) |
487 __put_user(0, &regs[32*2-1]) |
488 __put_user(0, fcc) |
489 __put_user(0, fcsr);
490 if (err)
491 return err; /* really bad sigcontext */
492 }
493
494 return err;
495 }
496
497 static int protected_restore_lsx_context(struct extctx_layout *extctx)
498 {
499 int err = 0, sig = 0, tmp __maybe_unused;
500 struct sctx_info __user *info = extctx->lsx.addr;
501 struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
502 uint64_t __user *regs = (uint64_t *)&lsx_ctx->regs;
503 uint64_t __user *fcc = &lsx_ctx->fcc;
504 uint32_t __user *fcsr = &lsx_ctx->fcsr;
505
506 err = sig = fcsr_pending(fcsr);
507 if (err < 0)
508 return err;
509
510 while (1) {
511 lock_fpu_owner();
512 if (is_lsx_enabled())
513 err = restore_hw_lsx_context(lsx_ctx);
514 else {
515 err = copy_lsx_from_sigcontext(lsx_ctx);
516 if (is_fpu_owner())
517 restore_fp(current);
518 }
519 unlock_fpu_owner();
520
521 if (likely(!err))
522 break;
523 /* Touch the LSX context and try again */
524 err = __get_user(tmp, &regs[0]) |
525 __get_user(tmp, &regs[32*2-1]) |
526 __get_user(tmp, fcc) |
527 __get_user(tmp, fcsr);
528 if (err)
529 break; /* really bad sigcontext */
530 }
531
532 return err ?: sig;
533 }
534
535 static int protected_save_lasx_context(struct extctx_layout *extctx)
536 {
537 int err = 0;
538 struct sctx_info __user *info = extctx->lasx.addr;
539 struct lasx_context __user *lasx_ctx =
540 (struct lasx_context *)get_ctx_through_ctxinfo(info);
541 uint64_t __user *regs = (uint64_t *)&lasx_ctx->regs;
542 uint64_t __user *fcc = &lasx_ctx->fcc;
543 uint32_t __user *fcsr = &lasx_ctx->fcsr;
544
545 while (1) {
546 lock_fpu_owner();
547 if (is_lasx_enabled())
548 err = save_hw_lasx_context(lasx_ctx);
549 else {
550 if (is_lsx_enabled())
551 save_lsx(current);
552 else if (is_fpu_owner())
553 save_fp(current);
554 err = copy_lasx_to_sigcontext(lasx_ctx);
555 }
556 unlock_fpu_owner();
557
558 err |= __put_user(LASX_CTX_MAGIC, &info->magic);
559 err |= __put_user(extctx->lasx.size, &info->size);
560
561 if (likely(!err))
562 break;
563 /* Touch the LASX context and try again */
564 err = __put_user(0, &regs[0]) |
565 __put_user(0, &regs[32*4-1]) |
566 __put_user(0, fcc) |
567 __put_user(0, fcsr);
568 if (err)
569 return err; /* really bad sigcontext */
570 }
571
572 return err;
573 }
574
575 static int protected_restore_lasx_context(struct extctx_layout *extctx)
576 {
577 int err = 0, sig = 0, tmp __maybe_unused;
578 struct sctx_info __user *info = extctx->lasx.addr;
579 struct lasx_context __user *lasx_ctx =
580 (struct lasx_context *)get_ctx_through_ctxinfo(info);
581 uint64_t __user *regs = (uint64_t *)&lasx_ctx->regs;
582 uint64_t __user *fcc = &lasx_ctx->fcc;
583 uint32_t __user *fcsr = &lasx_ctx->fcsr;
584
585 err = sig = fcsr_pending(fcsr);
586 if (err < 0)
587 return err;
588
589 while (1) {
590 lock_fpu_owner();
591 if (is_lasx_enabled())
592 err = restore_hw_lasx_context(lasx_ctx);
593 else {
594 err = copy_lasx_from_sigcontext(lasx_ctx);
595 if (is_lsx_enabled())
596 restore_lsx(current);
597 else if (is_fpu_owner())
598 restore_fp(current);
599 }
600 unlock_fpu_owner();
601
602 if (likely(!err))
603 break;
604 /* Touch the LASX context and try again */
605 err = __get_user(tmp, &regs[0]) |
606 __get_user(tmp, &regs[32*4-1]) |
607 __get_user(tmp, fcc) |
608 __get_user(tmp, fcsr);
609 if (err)
610 break; /* really bad sigcontext */
611 }
612
613 return err ?: sig;
614 }
615
616 #ifdef CONFIG_CPU_HAS_LBT
617 static int protected_save_lbt_context(struct extctx_layout *extctx)
618 {
619 int err = 0;
620 struct sctx_info __user *info = extctx->lbt.addr;
621 struct lbt_context __user *lbt_ctx =
622 (struct lbt_context *)get_ctx_through_ctxinfo(info);
623 uint64_t __user *regs = (uint64_t *)&lbt_ctx->regs;
624 uint32_t __user *eflags = (uint32_t *)&lbt_ctx->eflags;
625
626 while (1) {
627 lock_lbt_owner();
628 if (is_lbt_owner())
629 err |= save_hw_lbt_context(lbt_ctx);
630 else
631 err |= copy_lbt_to_sigcontext(lbt_ctx);
632 if (is_fpu_owner())
633 err |= save_hw_ftop_context(lbt_ctx);
634 else
635 err |= copy_ftop_to_sigcontext(lbt_ctx);
636 unlock_lbt_owner();
637
638 err |= __put_user(LBT_CTX_MAGIC, &info->magic);
639 err |= __put_user(extctx->lbt.size, &info->size);
640
641 if (likely(!err))
642 break;
643 /* Touch the LBT context and try again */
644 err = __put_user(0, &regs[0]) | __put_user(0, eflags);
645
646 if (err)
647 return err;
648 }
649
650 return err;
651 }
652
653 static int protected_restore_lbt_context(struct extctx_layout *extctx)
654 {
655 int err = 0, tmp __maybe_unused;
656 struct sctx_info __user *info = extctx->lbt.addr;
657 struct lbt_context __user *lbt_ctx =
658 (struct lbt_context *)get_ctx_through_ctxinfo(info);
659 uint64_t __user *regs = (uint64_t *)&lbt_ctx->regs;
660 uint32_t __user *eflags = (uint32_t *)&lbt_ctx->eflags;
661
662 while (1) {
663 lock_lbt_owner();
664 if (is_lbt_owner())
665 err |= restore_hw_lbt_context(lbt_ctx);
666 else
667 err |= copy_lbt_from_sigcontext(lbt_ctx);
668 if (is_fpu_owner())
669 err |= restore_hw_ftop_context(lbt_ctx);
670 else
671 err |= copy_ftop_from_sigcontext(lbt_ctx);
672 unlock_lbt_owner();
673
674 if (likely(!err))
675 break;
676 /* Touch the LBT context and try again */
677 err = __get_user(tmp, &regs[0]) | __get_user(tmp, eflags);
678
679 if (err)
680 return err;
681 }
682
683 return err;
684 }
685 #endif
686
687 static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
688 struct extctx_layout *extctx)
689 {
690 int i, err = 0;
691 struct sctx_info __user *info;
692
693 err |= __put_user(regs->csr_era, &sc->sc_pc);
694 err |= __put_user(extctx->flags, &sc->sc_flags);
695
696 err |= __put_user(0, &sc->sc_regs[0]);
697 for (i = 1; i < 32; i++)
698 err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
699
700 if (extctx->lasx.addr)
701 err |= protected_save_lasx_context(extctx);
702 else if (extctx->lsx.addr)
703 err |= protected_save_lsx_context(extctx);
704 else if (extctx->fpu.addr)
705 err |= protected_save_fpu_context(extctx);
706
707 #ifdef CONFIG_CPU_HAS_LBT
708 if (extctx->lbt.addr)
709 err |= protected_save_lbt_context(extctx);
710 #endif
711
712 /* Set the "end" magic */
713 info = (struct sctx_info *)extctx->end.addr;
714 err |= __put_user(0, &info->magic);
715 err |= __put_user(0, &info->size);
716
717 return err;
718 }
719
720 static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout *extctx)
721 {
722 int err = 0;
723 unsigned int magic, size;
724 struct sctx_info __user *info = (struct sctx_info __user *)&sc->sc_extcontext;
725
726 while(1) {
727 err |= __get_user(magic, &info->magic);
728 err |= __get_user(size, &info->size);
729 if (err)
730 return err;
731
732 switch (magic) {
733 case 0: /* END */
734 goto done;
735
736 case FPU_CTX_MAGIC:
737 if (size < (sizeof(struct sctx_info) +
738 sizeof(struct fpu_context)))
739 goto invalid;
740 extctx->fpu.addr = info;
741 break;
742
743 case LSX_CTX_MAGIC:
744 if (size < (sizeof(struct sctx_info) +
745 sizeof(struct lsx_context)))
746 goto invalid;
747 extctx->lsx.addr = info;
748 break;
749
750 case LASX_CTX_MAGIC:
751 if (size < (sizeof(struct sctx_info) +
752 sizeof(struct lasx_context)))
753 goto invalid;
754 extctx->lasx.addr = info;
755 break;
756
757 case LBT_CTX_MAGIC:
758 if (size < (sizeof(struct sctx_info) +
759 sizeof(struct lbt_context)))
760 goto invalid;
761 extctx->lbt.addr = info;
762 break;
763
764 default:
765 goto invalid;
766 }
767
768 info = (struct sctx_info *)((char *)info + size);
769 }
770
771 done:
772 return 0;
773
774 invalid:
775 return -EINVAL;
776 }
777
778 static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
779 {
780 int i, err = 0;
781 struct extctx_layout extctx;
782
783 memset(&extctx, 0, sizeof(struct extctx_layout));
784
785 err = __get_user(extctx.flags, &sc->sc_flags);
786 if (err)
787 goto bad;
788
789 err = parse_extcontext(sc, &extctx);
790 if (err)
791 goto bad;
792
793 conditional_used_math(extctx.flags & SC_USED_FP);
794
795 /*
796 * The signal handler may have used FPU; give it up if the program
797 * doesn't want it following sigreturn.
798 */
799 if (!(extctx.flags & SC_USED_FP))
800 lose_fpu(0);
801
802 /* Always make any pending restarted system calls return -EINTR */
803 current->restart_block.fn = do_no_restart_syscall;
804
805 err |= __get_user(regs->csr_era, &sc->sc_pc);
806 for (i = 1; i < 32; i++)
807 err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
808
809 if (extctx.lasx.addr)
810 err |= protected_restore_lasx_context(&extctx);
811 else if (extctx.lsx.addr)
812 err |= protected_restore_lsx_context(&extctx);
813 else if (extctx.fpu.addr)
814 err |= protected_restore_fpu_context(&extctx);
815
816 #ifdef CONFIG_CPU_HAS_LBT
817 if (extctx.lbt.addr)
818 err |= protected_restore_lbt_context(&extctx);
819 #endif
820
821 bad:
822 return err;
823 }
824
825 static unsigned int handle_flags(void)
826 {
827 unsigned int flags = 0;
828
829 flags = used_math() ? SC_USED_FP : 0;
830
831 switch (current->thread.error_code) {
832 case 1:
833 flags |= SC_ADDRERR_RD;
834 break;
835 case 2:
836 flags |= SC_ADDRERR_WR;
837 break;
838 }
839
840 return flags;
841 }
842
843 static unsigned long extframe_alloc(struct extctx_layout *extctx,
844 struct _ctx_layout *layout,
845 size_t size, unsigned int align, unsigned long base)
846 {
847 unsigned long new_base = base - size;
848
849 new_base = round_down(new_base, (align < 16 ? 16 : align));
850 new_base -= sizeof(struct sctx_info);
851
852 layout->addr = (void *)new_base;
853 layout->size = (unsigned int)(base - new_base);
854 extctx->size += layout->size;
855
856 return new_base;
857 }
858
859 static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned long sp)
860 {
861 unsigned long new_sp = sp;
862
863 memset(extctx, 0, sizeof(struct extctx_layout));
864
865 extctx->flags = handle_flags();
866
867 /* Grow down, alloc "end" context info first. */
868 new_sp -= sizeof(struct sctx_info);
869 extctx->end.addr = (void *)new_sp;
870 extctx->end.size = (unsigned int)sizeof(struct sctx_info);
871 extctx->size += extctx->end.size;
872
873 if (extctx->flags & SC_USED_FP) {
874 if (cpu_has_lasx && thread_lasx_context_live())
875 new_sp = extframe_alloc(extctx, &extctx->lasx,
876 sizeof(struct lasx_context), LASX_CTX_ALIGN, new_sp);
877 else if (cpu_has_lsx && thread_lsx_context_live())
878 new_sp = extframe_alloc(extctx, &extctx->lsx,
879 sizeof(struct lsx_context), LSX_CTX_ALIGN, new_sp);
880 else if (cpu_has_fpu)
881 new_sp = extframe_alloc(extctx, &extctx->fpu,
882 sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp);
883 }
884
885 #ifdef CONFIG_CPU_HAS_LBT
886 if (cpu_has_lbt && thread_lbt_context_live()) {
887 new_sp = extframe_alloc(extctx, &extctx->lbt,
888 sizeof(struct lbt_context), LBT_CTX_ALIGN, new_sp);
889 }
890 #endif
891
892 return new_sp;
893 }
894
895 static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
896 struct extctx_layout *extctx)
897 {
898 unsigned long sp;
899
900 /* Default to using normal stack */
901 sp = regs->regs[3];
902
903 /*
904 * If we are on the alternate signal stack and would overflow it, don't.
905 * Return an always-bogus address instead so we will die with SIGSEGV.
906 */
907 if (on_sig_stack(sp) &&
908 !likely(on_sig_stack(sp - sizeof(struct rt_sigframe))))
909 return (void __user __force *)(-1UL);
910
911 sp = sigsp(sp, ksig);
912 sp = round_down(sp, 16);
913 sp = setup_extcontext(extctx, sp);
914 sp -= sizeof(struct rt_sigframe);
915
916 if (!IS_ALIGNED(sp, 16))
917 BUG();
918
919 return (void __user *)sp;
920 }
921
922 /*
923 * Atomically swap in the new signal mask, and wait for a signal.
924 */
925
926 SYSCALL_DEFINE0(rt_sigreturn)
927 {
928 int sig;
929 sigset_t set;
930 struct pt_regs *regs;
931 struct rt_sigframe __user *frame;
932
933 regs = current_pt_regs();
934 frame = (struct rt_sigframe __user *)regs->regs[3];
935 if (!access_ok(frame, sizeof(*frame)))
936 goto badframe;
937 if (__copy_from_user(&set, &frame->rs_uctx.uc_sigmask, sizeof(set)))
938 goto badframe;
939
940 set_current_blocked(&set);
941
942 sig = restore_sigcontext(regs, &frame->rs_uctx.uc_mcontext);
943 if (sig < 0)
944 goto badframe;
945 else if (sig)
946 force_sig(sig);
947
948 regs->regs[0] = 0; /* No syscall restarting */
949 if (restore_altstack(&frame->rs_uctx.uc_stack))
950 goto badframe;
951
952 return regs->regs[4];
953
954 badframe:
955 force_sig(SIGSEGV);
956 return 0;
957 }
958
959 static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
960 struct pt_regs *regs, sigset_t *set)
961 {
962 int err = 0;
963 struct extctx_layout extctx;
964 struct rt_sigframe __user *frame;
965
966 frame = get_sigframe(ksig, regs, &extctx);
967 if (!access_ok(frame, sizeof(*frame) + extctx.size))
968 return -EFAULT;
969
970 /* Create siginfo. */
971 err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
972
973 /* Create the ucontext. */
974 err |= __put_user(0, &frame->rs_uctx.uc_flags);
975 err |= __put_user(NULL, &frame->rs_uctx.uc_link);
976 err |= __save_altstack(&frame->rs_uctx.uc_stack, regs->regs[3]);
977 err |= setup_sigcontext(regs, &frame->rs_uctx.uc_mcontext, &extctx);
978 err |= __copy_to_user(&frame->rs_uctx.uc_sigmask, set, sizeof(*set));
979
980 if (err)
981 return -EFAULT;
982
983 /*
984 * Arguments to signal handler:
985 *
986 * a0 = signal number
987 * a1 = pointer to siginfo
988 * a2 = pointer to ucontext
989 *
990 * c0_era point to the signal handler, $r3 (sp) points to
991 * the struct rt_sigframe.
992 */
993 regs->regs[4] = ksig->sig;
994 regs->regs[5] = (unsigned long) &frame->rs_info;
995 regs->regs[6] = (unsigned long) &frame->rs_uctx;
996 regs->regs[3] = (unsigned long) frame;
997 regs->regs[1] = (unsigned long) sig_return;
998 regs->csr_era = (unsigned long) ksig->ka.sa.sa_handler;
999
1000 DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
1001 current->comm, current->pid,
1002 frame, regs->csr_era, regs->regs[1]);
1003
1004 return 0;
1005 }
1006
1007 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1008 {
1009 int ret;
1010 sigset_t *oldset = sigmask_to_save();
1011 void *vdso = current->mm->context.vdso;
1012
1013 /* Are we from a system call? */
1014 if (regs->regs[0]) {
1015 switch (regs->regs[4]) {
1016 case -ERESTART_RESTARTBLOCK:
1017 case -ERESTARTNOHAND:
1018 regs->regs[4] = -EINTR;
1019 break;
1020 case -ERESTARTSYS:
1021 if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
1022 regs->regs[4] = -EINTR;
1023 break;
1024 }
1025 fallthrough;
1026 case -ERESTARTNOINTR:
1027 regs->regs[4] = regs->orig_a0;
1028 regs->csr_era -= 4;
1029 }
1030
1031 regs->regs[0] = 0; /* Don't deal with this again. */
1032 }
1033
1034 rseq_signal_deliver(ksig, regs);
1035
1036 ret = setup_rt_frame(vdso + current->thread.vdso->offset_sigreturn, ksig, regs, oldset);
1037
1038 signal_setup_done(ret, ksig, 0);
1039 }
1040
1041 void arch_do_signal_or_restart(struct pt_regs *regs)
1042 {
1043 struct ksignal ksig;
1044
1045 if (get_signal(&ksig)) {
1046 /* Whee! Actually deliver the signal. */
1047 handle_signal(&ksig, regs);
1048 return;
1049 }
1050
1051 /* Are we from a system call? */
1052 if (regs->regs[0]) {
1053 switch (regs->regs[4]) {
1054 case -ERESTARTNOHAND:
1055 case -ERESTARTSYS:
1056 case -ERESTARTNOINTR:
1057 regs->regs[4] = regs->orig_a0;
1058 regs->csr_era -= 4;
1059 break;
1060
1061 case -ERESTART_RESTARTBLOCK:
1062 regs->regs[4] = regs->orig_a0;
1063 regs->regs[11] = __NR_restart_syscall;
1064 regs->csr_era -= 4;
1065 break;
1066 }
1067 regs->regs[0] = 0; /* Don't deal with this again. */
1068 }
1069
1070 /*
1071 * If there's no signal to deliver, we just put the saved sigmask
1072 * back
1073 */
1074 restore_saved_sigmask();
1075 }