1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/signal.c
5 * Copyright (C) 1995-2009 Russell King
6 * Copyright (C) 2012 ARM Ltd.
9 #include <linux/cache.h>
10 #include <linux/compat.h>
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/signal.h>
14 #include <linux/personality.h>
15 #include <linux/freezer.h>
16 #include <linux/stddef.h>
17 #include <linux/uaccess.h>
18 #include <linux/sizes.h>
19 #include <linux/string.h>
20 #include <linux/tracehook.h>
21 #include <linux/ratelimit.h>
22 #include <linux/syscalls.h>
24 #include <asm/daifflags.h>
25 #include <asm/debug-monitors.h>
27 #include <asm/cacheflush.h>
28 #include <asm/ucontext.h>
29 #include <asm/unistd.h>
30 #include <asm/fpsimd.h>
31 #include <asm/ptrace.h>
32 #include <asm/syscall.h>
33 #include <asm/signal32.h>
34 #include <asm/traps.h>
38 * Do a signal return; undo the signal stack. These are aligned to 128-bit.
50 struct rt_sigframe_user_layout
{
51 struct rt_sigframe __user
*sigframe
;
52 struct frame_record __user
*next_frame
;
54 unsigned long size
; /* size of allocated sigframe data */
55 unsigned long limit
; /* largest allowed size */
57 unsigned long fpsimd_offset
;
58 unsigned long esr_offset
;
59 unsigned long sve_offset
;
60 unsigned long extra_offset
;
61 unsigned long end_offset
;
64 #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
65 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
66 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
68 static void init_user_layout(struct rt_sigframe_user_layout
*user
)
70 const size_t reserved_size
=
71 sizeof(user
->sigframe
->uc
.uc_mcontext
.__reserved
);
73 memset(user
, 0, sizeof(*user
));
74 user
->size
= offsetof(struct rt_sigframe
, uc
.uc_mcontext
.__reserved
);
76 user
->limit
= user
->size
+ reserved_size
;
78 user
->limit
-= TERMINATOR_SIZE
;
79 user
->limit
-= EXTRA_CONTEXT_SIZE
;
80 /* Reserve space for extension and terminator ^ */
83 static size_t sigframe_size(struct rt_sigframe_user_layout
const *user
)
85 return round_up(max(user
->size
, sizeof(struct rt_sigframe
)), 16);
89 * Sanity limit on the approximate maximum size of signal frame we'll
90 * try to generate. Stack alignment padding and the frame record are
91 * not taken into account. This limit is not a guarantee and is
94 #define SIGFRAME_MAXSZ SZ_64K
96 static int __sigframe_alloc(struct rt_sigframe_user_layout
*user
,
97 unsigned long *offset
, size_t size
, bool extend
)
99 size_t padded_size
= round_up(size
, 16);
101 if (padded_size
> user
->limit
- user
->size
&&
102 !user
->extra_offset
&&
106 user
->limit
+= EXTRA_CONTEXT_SIZE
;
107 ret
= __sigframe_alloc(user
, &user
->extra_offset
,
108 sizeof(struct extra_context
), false);
110 user
->limit
-= EXTRA_CONTEXT_SIZE
;
114 /* Reserve space for the __reserved[] terminator */
115 user
->size
+= TERMINATOR_SIZE
;
118 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
121 user
->limit
= SIGFRAME_MAXSZ
- TERMINATOR_SIZE
;
124 /* Still not enough space? Bad luck! */
125 if (padded_size
> user
->limit
- user
->size
)
128 *offset
= user
->size
;
129 user
->size
+= padded_size
;
135 * Allocate space for an optional record of <size> bytes in the user
136 * signal frame. The offset from the signal frame base address to the
137 * allocated block is assigned to *offset.
139 static int sigframe_alloc(struct rt_sigframe_user_layout
*user
,
140 unsigned long *offset
, size_t size
)
142 return __sigframe_alloc(user
, offset
, size
, true);
145 /* Allocate the null terminator record and prevent further allocations */
146 static int sigframe_alloc_end(struct rt_sigframe_user_layout
*user
)
150 /* Un-reserve the space reserved for the terminator: */
151 user
->limit
+= TERMINATOR_SIZE
;
153 ret
= sigframe_alloc(user
, &user
->end_offset
,
154 sizeof(struct _aarch64_ctx
));
158 /* Prevent further allocation: */
159 user
->limit
= user
->size
;
163 static void __user
*apply_user_offset(
164 struct rt_sigframe_user_layout
const *user
, unsigned long offset
)
166 char __user
*base
= (char __user
*)user
->sigframe
;
168 return base
+ offset
;
171 static int preserve_fpsimd_context(struct fpsimd_context __user
*ctx
)
173 struct user_fpsimd_state
const *fpsimd
=
174 ¤t
->thread
.uw
.fpsimd_state
;
177 /* copy the FP and status/control registers */
178 err
= __copy_to_user(ctx
->vregs
, fpsimd
->vregs
, sizeof(fpsimd
->vregs
));
179 __put_user_error(fpsimd
->fpsr
, &ctx
->fpsr
, err
);
180 __put_user_error(fpsimd
->fpcr
, &ctx
->fpcr
, err
);
182 /* copy the magic/size information */
183 __put_user_error(FPSIMD_MAGIC
, &ctx
->head
.magic
, err
);
184 __put_user_error(sizeof(struct fpsimd_context
), &ctx
->head
.size
, err
);
186 return err
? -EFAULT
: 0;
189 static int restore_fpsimd_context(struct fpsimd_context __user
*ctx
)
191 struct user_fpsimd_state fpsimd
;
195 /* check the magic/size information */
196 __get_user_error(magic
, &ctx
->head
.magic
, err
);
197 __get_user_error(size
, &ctx
->head
.size
, err
);
200 if (magic
!= FPSIMD_MAGIC
|| size
!= sizeof(struct fpsimd_context
))
203 /* copy the FP and status/control registers */
204 err
= __copy_from_user(fpsimd
.vregs
, ctx
->vregs
,
205 sizeof(fpsimd
.vregs
));
206 __get_user_error(fpsimd
.fpsr
, &ctx
->fpsr
, err
);
207 __get_user_error(fpsimd
.fpcr
, &ctx
->fpcr
, err
);
209 clear_thread_flag(TIF_SVE
);
211 /* load the hardware registers from the fpsimd_state structure */
213 fpsimd_update_current_state(&fpsimd
);
215 return err
? -EFAULT
: 0;
220 struct fpsimd_context __user
*fpsimd
;
221 struct sve_context __user
*sve
;
224 #ifdef CONFIG_ARM64_SVE
226 static int preserve_sve_context(struct sve_context __user
*ctx
)
229 u16 reserved
[ARRAY_SIZE(ctx
->__reserved
)];
230 unsigned int vl
= current
->thread
.sve_vl
;
233 if (test_thread_flag(TIF_SVE
))
234 vq
= sve_vq_from_vl(vl
);
236 memset(reserved
, 0, sizeof(reserved
));
238 __put_user_error(SVE_MAGIC
, &ctx
->head
.magic
, err
);
239 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq
), 16),
240 &ctx
->head
.size
, err
);
241 __put_user_error(vl
, &ctx
->vl
, err
);
242 BUILD_BUG_ON(sizeof(ctx
->__reserved
) != sizeof(reserved
));
243 err
|= __copy_to_user(&ctx
->__reserved
, reserved
, sizeof(reserved
));
247 * This assumes that the SVE state has already been saved to
248 * the task struct by calling the function
249 * fpsimd_signal_preserve_current_state().
251 err
|= __copy_to_user((char __user
*)ctx
+ SVE_SIG_REGS_OFFSET
,
252 current
->thread
.sve_state
,
253 SVE_SIG_REGS_SIZE(vq
));
256 return err
? -EFAULT
: 0;
259 static int restore_sve_fpsimd_context(struct user_ctxs
*user
)
263 struct user_fpsimd_state fpsimd
;
264 struct sve_context sve
;
266 if (__copy_from_user(&sve
, user
->sve
, sizeof(sve
)))
269 if (sve
.vl
!= current
->thread
.sve_vl
)
272 if (sve
.head
.size
<= sizeof(*user
->sve
)) {
273 clear_thread_flag(TIF_SVE
);
277 vq
= sve_vq_from_vl(sve
.vl
);
279 if (sve
.head
.size
< SVE_SIG_CONTEXT_SIZE(vq
))
283 * Careful: we are about __copy_from_user() directly into
284 * thread.sve_state with preemption enabled, so protection is
285 * needed to prevent a racing context switch from writing stale
286 * registers back over the new data.
289 fpsimd_flush_task_state(current
);
290 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
293 if (!current
->thread
.sve_state
) {
294 clear_thread_flag(TIF_SVE
);
298 err
= __copy_from_user(current
->thread
.sve_state
,
299 (char __user
const *)user
->sve
+
301 SVE_SIG_REGS_SIZE(vq
));
305 set_thread_flag(TIF_SVE
);
308 /* copy the FP and status/control registers */
309 /* restore_sigframe() already checked that user->fpsimd != NULL. */
310 err
= __copy_from_user(fpsimd
.vregs
, user
->fpsimd
->vregs
,
311 sizeof(fpsimd
.vregs
));
312 __get_user_error(fpsimd
.fpsr
, &user
->fpsimd
->fpsr
, err
);
313 __get_user_error(fpsimd
.fpcr
, &user
->fpsimd
->fpcr
, err
);
315 /* load the hardware registers from the fpsimd_state structure */
317 fpsimd_update_current_state(&fpsimd
);
319 return err
? -EFAULT
: 0;
322 #else /* ! CONFIG_ARM64_SVE */
324 /* Turn any non-optimised out attempts to use these into a link error: */
325 extern int preserve_sve_context(void __user
*ctx
);
326 extern int restore_sve_fpsimd_context(struct user_ctxs
*user
);
328 #endif /* ! CONFIG_ARM64_SVE */
331 static int parse_user_sigframe(struct user_ctxs
*user
,
332 struct rt_sigframe __user
*sf
)
334 struct sigcontext __user
*const sc
= &sf
->uc
.uc_mcontext
;
335 struct _aarch64_ctx __user
*head
;
336 char __user
*base
= (char __user
*)&sc
->__reserved
;
338 size_t limit
= sizeof(sc
->__reserved
);
339 bool have_extra_context
= false;
340 char const __user
*const sfp
= (char const __user
*)sf
;
345 if (!IS_ALIGNED((unsigned long)base
, 16))
351 char const __user
*userp
;
352 struct extra_context
const __user
*extra
;
355 struct _aarch64_ctx
const __user
*end
;
356 u32 end_magic
, end_size
;
358 if (limit
- offset
< sizeof(*head
))
361 if (!IS_ALIGNED(offset
, 16))
364 head
= (struct _aarch64_ctx __user
*)(base
+ offset
);
365 __get_user_error(magic
, &head
->magic
, err
);
366 __get_user_error(size
, &head
->size
, err
);
370 if (limit
- offset
< size
)
381 if (!system_supports_fpsimd())
386 if (size
< sizeof(*user
->fpsimd
))
389 user
->fpsimd
= (struct fpsimd_context __user
*)head
;
397 if (!system_supports_sve())
403 if (size
< sizeof(*user
->sve
))
406 user
->sve
= (struct sve_context __user
*)head
;
410 if (have_extra_context
)
413 if (size
< sizeof(*extra
))
416 userp
= (char const __user
*)head
;
418 extra
= (struct extra_context
const __user
*)userp
;
421 __get_user_error(extra_datap
, &extra
->datap
, err
);
422 __get_user_error(extra_size
, &extra
->size
, err
);
426 /* Check for the dummy terminator in __reserved[]: */
428 if (limit
- offset
- size
< TERMINATOR_SIZE
)
431 end
= (struct _aarch64_ctx
const __user
*)userp
;
432 userp
+= TERMINATOR_SIZE
;
434 __get_user_error(end_magic
, &end
->magic
, err
);
435 __get_user_error(end_size
, &end
->size
, err
);
439 if (end_magic
|| end_size
)
442 /* Prevent looping/repeated parsing of extra_context */
443 have_extra_context
= true;
445 base
= (__force
void __user
*)extra_datap
;
446 if (!IS_ALIGNED((unsigned long)base
, 16))
449 if (!IS_ALIGNED(extra_size
, 16))
455 /* Reject "unreasonably large" frames: */
456 if (extra_size
> sfp
+ SIGFRAME_MAXSZ
- userp
)
460 * Ignore trailing terminator in __reserved[]
461 * and start parsing extra data:
466 if (!access_ok(base
, limit
))
475 if (size
< sizeof(*head
))
478 if (limit
- offset
< size
)
491 static int restore_sigframe(struct pt_regs
*regs
,
492 struct rt_sigframe __user
*sf
)
496 struct user_ctxs user
;
498 err
= __copy_from_user(&set
, &sf
->uc
.uc_sigmask
, sizeof(set
));
500 set_current_blocked(&set
);
502 for (i
= 0; i
< 31; i
++)
503 __get_user_error(regs
->regs
[i
], &sf
->uc
.uc_mcontext
.regs
[i
],
505 __get_user_error(regs
->sp
, &sf
->uc
.uc_mcontext
.sp
, err
);
506 __get_user_error(regs
->pc
, &sf
->uc
.uc_mcontext
.pc
, err
);
507 __get_user_error(regs
->pstate
, &sf
->uc
.uc_mcontext
.pstate
, err
);
510 * Avoid sys_rt_sigreturn() restarting.
512 forget_syscall(regs
);
514 err
|= !valid_user_regs(®s
->user_regs
, current
);
516 err
= parse_user_sigframe(&user
, sf
);
518 if (err
== 0 && system_supports_fpsimd()) {
523 if (!system_supports_sve())
526 err
= restore_sve_fpsimd_context(&user
);
528 err
= restore_fpsimd_context(user
.fpsimd
);
535 SYSCALL_DEFINE0(rt_sigreturn
)
537 struct pt_regs
*regs
= current_pt_regs();
538 struct rt_sigframe __user
*frame
;
540 /* Always make any pending restarted system calls return -EINTR */
541 current
->restart_block
.fn
= do_no_restart_syscall
;
544 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
545 * be word aligned here.
550 frame
= (struct rt_sigframe __user
*)regs
->sp
;
552 if (!access_ok(frame
, sizeof (*frame
)))
555 if (restore_sigframe(regs
, frame
))
558 if (restore_altstack(&frame
->uc
.uc_stack
))
561 return regs
->regs
[0];
564 arm64_notify_segfault(regs
->sp
);
569 * Determine the layout of optional records in the signal frame
571 * add_all: if true, lays out the biggest possible signal frame for
572 * this task; otherwise, generates a layout for the current state
575 static int setup_sigframe_layout(struct rt_sigframe_user_layout
*user
,
580 err
= sigframe_alloc(user
, &user
->fpsimd_offset
,
581 sizeof(struct fpsimd_context
));
585 /* fault information, if valid */
586 if (add_all
|| current
->thread
.fault_code
) {
587 err
= sigframe_alloc(user
, &user
->esr_offset
,
588 sizeof(struct esr_context
));
593 if (system_supports_sve()) {
596 if (add_all
|| test_thread_flag(TIF_SVE
)) {
600 vl
= current
->thread
.sve_vl
;
602 vq
= sve_vq_from_vl(vl
);
605 err
= sigframe_alloc(user
, &user
->sve_offset
,
606 SVE_SIG_CONTEXT_SIZE(vq
));
611 return sigframe_alloc_end(user
);
614 static int setup_sigframe(struct rt_sigframe_user_layout
*user
,
615 struct pt_regs
*regs
, sigset_t
*set
)
618 struct rt_sigframe __user
*sf
= user
->sigframe
;
620 /* set up the stack frame for unwinding */
621 __put_user_error(regs
->regs
[29], &user
->next_frame
->fp
, err
);
622 __put_user_error(regs
->regs
[30], &user
->next_frame
->lr
, err
);
624 for (i
= 0; i
< 31; i
++)
625 __put_user_error(regs
->regs
[i
], &sf
->uc
.uc_mcontext
.regs
[i
],
627 __put_user_error(regs
->sp
, &sf
->uc
.uc_mcontext
.sp
, err
);
628 __put_user_error(regs
->pc
, &sf
->uc
.uc_mcontext
.pc
, err
);
629 __put_user_error(regs
->pstate
, &sf
->uc
.uc_mcontext
.pstate
, err
);
631 __put_user_error(current
->thread
.fault_address
, &sf
->uc
.uc_mcontext
.fault_address
, err
);
633 err
|= __copy_to_user(&sf
->uc
.uc_sigmask
, set
, sizeof(*set
));
635 if (err
== 0 && system_supports_fpsimd()) {
636 struct fpsimd_context __user
*fpsimd_ctx
=
637 apply_user_offset(user
, user
->fpsimd_offset
);
638 err
|= preserve_fpsimd_context(fpsimd_ctx
);
641 /* fault information, if valid */
642 if (err
== 0 && user
->esr_offset
) {
643 struct esr_context __user
*esr_ctx
=
644 apply_user_offset(user
, user
->esr_offset
);
646 __put_user_error(ESR_MAGIC
, &esr_ctx
->head
.magic
, err
);
647 __put_user_error(sizeof(*esr_ctx
), &esr_ctx
->head
.size
, err
);
648 __put_user_error(current
->thread
.fault_code
, &esr_ctx
->esr
, err
);
651 /* Scalable Vector Extension state, if present */
652 if (system_supports_sve() && err
== 0 && user
->sve_offset
) {
653 struct sve_context __user
*sve_ctx
=
654 apply_user_offset(user
, user
->sve_offset
);
655 err
|= preserve_sve_context(sve_ctx
);
658 if (err
== 0 && user
->extra_offset
) {
659 char __user
*sfp
= (char __user
*)user
->sigframe
;
661 apply_user_offset(user
, user
->extra_offset
);
663 struct extra_context __user
*extra
;
664 struct _aarch64_ctx __user
*end
;
668 extra
= (struct extra_context __user
*)userp
;
669 userp
+= EXTRA_CONTEXT_SIZE
;
671 end
= (struct _aarch64_ctx __user
*)userp
;
672 userp
+= TERMINATOR_SIZE
;
675 * extra_datap is just written to the signal frame.
676 * The value gets cast back to a void __user *
679 extra_datap
= (__force u64
)userp
;
680 extra_size
= sfp
+ round_up(user
->size
, 16) - userp
;
682 __put_user_error(EXTRA_MAGIC
, &extra
->head
.magic
, err
);
683 __put_user_error(EXTRA_CONTEXT_SIZE
, &extra
->head
.size
, err
);
684 __put_user_error(extra_datap
, &extra
->datap
, err
);
685 __put_user_error(extra_size
, &extra
->size
, err
);
687 /* Add the terminator */
688 __put_user_error(0, &end
->magic
, err
);
689 __put_user_error(0, &end
->size
, err
);
692 /* set the "end" magic */
694 struct _aarch64_ctx __user
*end
=
695 apply_user_offset(user
, user
->end_offset
);
697 __put_user_error(0, &end
->magic
, err
);
698 __put_user_error(0, &end
->size
, err
);
704 static int get_sigframe(struct rt_sigframe_user_layout
*user
,
705 struct ksignal
*ksig
, struct pt_regs
*regs
)
707 unsigned long sp
, sp_top
;
710 init_user_layout(user
);
711 err
= setup_sigframe_layout(user
, false);
715 sp
= sp_top
= sigsp(regs
->sp
, ksig
);
717 sp
= round_down(sp
- sizeof(struct frame_record
), 16);
718 user
->next_frame
= (struct frame_record __user
*)sp
;
720 sp
= round_down(sp
, 16) - sigframe_size(user
);
721 user
->sigframe
= (struct rt_sigframe __user
*)sp
;
724 * Check that we can actually write to the signal frame.
726 if (!access_ok(user
->sigframe
, sp_top
- sp
))
732 static void setup_return(struct pt_regs
*regs
, struct k_sigaction
*ka
,
733 struct rt_sigframe_user_layout
*user
, int usig
)
735 __sigrestore_t sigtramp
;
737 regs
->regs
[0] = usig
;
738 regs
->sp
= (unsigned long)user
->sigframe
;
739 regs
->regs
[29] = (unsigned long)&user
->next_frame
->fp
;
740 regs
->pc
= (unsigned long)ka
->sa
.sa_handler
;
743 * Signal delivery is a (wacky) indirect function call in
744 * userspace, so simulate the same setting of BTYPE as a BLR
745 * <register containing the signal handler entry point>.
746 * Signal delivery to a location in a PROT_BTI guarded page
747 * that is not a function entry point will now trigger a
748 * SIGILL in userspace.
750 * If the signal handler entry point is not in a PROT_BTI
751 * guarded page, this is harmless.
753 if (system_supports_bti()) {
754 regs
->pstate
&= ~PSR_BTYPE_MASK
;
755 regs
->pstate
|= PSR_BTYPE_C
;
758 /* TCO (Tag Check Override) always cleared for signal handlers */
759 regs
->pstate
&= ~PSR_TCO_BIT
;
761 if (ka
->sa
.sa_flags
& SA_RESTORER
)
762 sigtramp
= ka
->sa
.sa_restorer
;
764 sigtramp
= VDSO_SYMBOL(current
->mm
->context
.vdso
, sigtramp
);
766 regs
->regs
[30] = (unsigned long)sigtramp
;
769 static int setup_rt_frame(int usig
, struct ksignal
*ksig
, sigset_t
*set
,
770 struct pt_regs
*regs
)
772 struct rt_sigframe_user_layout user
;
773 struct rt_sigframe __user
*frame
;
776 fpsimd_signal_preserve_current_state();
778 if (get_sigframe(&user
, ksig
, regs
))
781 frame
= user
.sigframe
;
783 __put_user_error(0, &frame
->uc
.uc_flags
, err
);
784 __put_user_error(NULL
, &frame
->uc
.uc_link
, err
);
786 err
|= __save_altstack(&frame
->uc
.uc_stack
, regs
->sp
);
787 err
|= setup_sigframe(&user
, regs
, set
);
789 setup_return(regs
, &ksig
->ka
, &user
, usig
);
790 if (ksig
->ka
.sa
.sa_flags
& SA_SIGINFO
) {
791 err
|= copy_siginfo_to_user(&frame
->info
, &ksig
->info
);
792 regs
->regs
[1] = (unsigned long)&frame
->info
;
793 regs
->regs
[2] = (unsigned long)&frame
->uc
;
800 static void setup_restart_syscall(struct pt_regs
*regs
)
802 if (is_compat_task())
803 compat_setup_restart_syscall(regs
);
805 regs
->regs
[8] = __NR_restart_syscall
;
809 * OK, we're invoking a handler
811 static void handle_signal(struct ksignal
*ksig
, struct pt_regs
*regs
)
813 sigset_t
*oldset
= sigmask_to_save();
814 int usig
= ksig
->sig
;
817 rseq_signal_deliver(ksig
, regs
);
820 * Set up the stack frame
822 if (is_compat_task()) {
823 if (ksig
->ka
.sa
.sa_flags
& SA_SIGINFO
)
824 ret
= compat_setup_rt_frame(usig
, ksig
, oldset
, regs
);
826 ret
= compat_setup_frame(usig
, ksig
, oldset
, regs
);
828 ret
= setup_rt_frame(usig
, ksig
, oldset
, regs
);
832 * Check that the resulting registers are actually sane.
834 ret
|= !valid_user_regs(®s
->user_regs
, current
);
836 /* Step into the signal handler if we are stepping */
837 signal_setup_done(ret
, ksig
, test_thread_flag(TIF_SINGLESTEP
));
841 * Note that 'init' is a special process: it doesn't get signals it doesn't
842 * want to handle. Thus you cannot kill init even with a SIGKILL even by
845 * Note that we go through the signals twice: once to check the signals that
846 * the kernel can handle, and then we build all the user-level signal handling
847 * stack-frames in one go after that.
849 static void do_signal(struct pt_regs
*regs
)
851 unsigned long continue_addr
= 0, restart_addr
= 0;
854 bool syscall
= in_syscall(regs
);
857 * If we were from a system call, check for system call restarting...
860 continue_addr
= regs
->pc
;
861 restart_addr
= continue_addr
- (compat_thumb_mode(regs
) ? 2 : 4);
862 retval
= regs
->regs
[0];
865 * Avoid additional syscall restarting via ret_to_user.
867 forget_syscall(regs
);
870 * Prepare for system call restart. We do this here so that a
871 * debugger will see the already changed PC.
874 case -ERESTARTNOHAND
:
876 case -ERESTARTNOINTR
:
877 case -ERESTART_RESTARTBLOCK
:
878 regs
->regs
[0] = regs
->orig_x0
;
879 regs
->pc
= restart_addr
;
885 * Get the signal to deliver. When running under ptrace, at this point
886 * the debugger may change all of our registers.
888 if (get_signal(&ksig
)) {
890 * Depending on the signal settings, we may need to revert the
891 * decision to restart the system call, but skip this if a
892 * debugger has chosen to restart at a different PC.
894 if (regs
->pc
== restart_addr
&&
895 (retval
== -ERESTARTNOHAND
||
896 retval
== -ERESTART_RESTARTBLOCK
||
897 (retval
== -ERESTARTSYS
&&
898 !(ksig
.ka
.sa
.sa_flags
& SA_RESTART
)))) {
899 syscall_set_return_value(current
, regs
, -EINTR
, 0);
900 regs
->pc
= continue_addr
;
903 handle_signal(&ksig
, regs
);
908 * Handle restarting a different system call. As above, if a debugger
909 * has chosen to restart at a different PC, ignore the restart.
911 if (syscall
&& regs
->pc
== restart_addr
) {
912 if (retval
== -ERESTART_RESTARTBLOCK
)
913 setup_restart_syscall(regs
);
914 user_rewind_single_step(current
);
917 restore_saved_sigmask();
920 void do_notify_resume(struct pt_regs
*regs
, unsigned long thread_flags
)
923 if (thread_flags
& _TIF_NEED_RESCHED
) {
924 /* Unmask Debug and SError for the next task */
925 local_daif_restore(DAIF_PROCCTX_NOIRQ
);
929 local_daif_restore(DAIF_PROCCTX
);
931 if (thread_flags
& _TIF_UPROBE
)
932 uprobe_notify_resume(regs
);
934 if (thread_flags
& _TIF_MTE_ASYNC_FAULT
) {
935 clear_thread_flag(TIF_MTE_ASYNC_FAULT
);
936 send_sig_fault(SIGSEGV
, SEGV_MTEAERR
,
937 (void __user
*)NULL
, current
);
940 if (thread_flags
& (_TIF_SIGPENDING
| _TIF_NOTIFY_SIGNAL
))
943 if (thread_flags
& _TIF_NOTIFY_RESUME
) {
944 tracehook_notify_resume(regs
);
945 rseq_handle_notify_resume(NULL
, regs
);
948 if (thread_flags
& _TIF_FOREIGN_FPSTATE
)
949 fpsimd_restore_current_state();
953 thread_flags
= READ_ONCE(current_thread_info()->flags
);
954 } while (thread_flags
& _TIF_WORK_MASK
);
957 unsigned long __ro_after_init signal_minsigstksz
;
960 * Determine the stack space required for guaranteed signal devliery.
961 * This function is used to populate AT_MINSIGSTKSZ at process startup.
962 * cpufeatures setup is assumed to be complete.
964 void __init
minsigstksz_setup(void)
966 struct rt_sigframe_user_layout user
;
968 init_user_layout(&user
);
971 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't
972 * be big enough, but it's our best guess:
974 if (WARN_ON(setup_sigframe_layout(&user
, true)))
977 signal_minsigstksz
= sigframe_size(&user
) +
978 round_up(sizeof(struct frame_record
), 16) +
979 16; /* max alignment padding */