2 * Emulation of Linux signals
4 * Copyright (c) 2003 Fabrice Bellard
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/bitops.h"
21 #include "qemu/cutils.h"
22 #include "gdbstub/user.h"
23 #include "exec/page-protection.h"
24 #include "accel/tcg/cpu-ops.h"
26 #include <sys/ucontext.h>
27 #include <sys/resource.h>
30 #include "user-internals.h"
34 #include "signal-common.h"
35 #include "host-signal.h"
36 #include "user/cpu_loop.h"
37 #include "user/page-protection.h"
38 #include "user/safe-syscall.h"
39 #include "user/signal.h"
42 /* target_siginfo_t must fit in gdbstub's siginfo save area. */
43 QEMU_BUILD_BUG_ON(sizeof(target_siginfo_t
) > MAX_SIGINFO_LENGTH
);
45 static struct target_sigaction sigact_table
[TARGET_NSIG
];
47 static void host_signal_handler(int host_signum
, siginfo_t
*info
,
50 /* Fallback addresses into sigtramp page. */
51 abi_ulong default_sigreturn
;
52 abi_ulong default_rt_sigreturn
;
55 * System includes define _NSIG as SIGRTMAX + 1, but qemu (like the kernel)
56 * defines TARGET_NSIG as TARGET_SIGRTMAX and the first signal is 1.
57 * Signal number 0 is reserved for use as kill(pid, 0), to test whether
58 * a process exists without sending it a signal.
61 QEMU_BUILD_BUG_ON(__SIGRTMAX
+ 1 != _NSIG
);
63 static uint8_t host_to_target_signal_table
[_NSIG
] = {
64 #define MAKE_SIG_ENTRY(sig) [sig] = TARGET_##sig,
69 static uint8_t target_to_host_signal_table
[TARGET_NSIG
+ 1];
71 /* valid sig is between 1 and _NSIG - 1 */
72 int host_to_target_signal(int sig
)
78 return TARGET_NSIG
+ 1;
80 return host_to_target_signal_table
[sig
];
83 /* valid sig is between 1 and TARGET_NSIG */
84 int target_to_host_signal(int sig
)
89 if (sig
> TARGET_NSIG
) {
92 return target_to_host_signal_table
[sig
];
95 static inline void target_sigaddset(target_sigset_t
*set
, int signum
)
98 abi_ulong mask
= (abi_ulong
)1 << (signum
% TARGET_NSIG_BPW
);
99 set
->sig
[signum
/ TARGET_NSIG_BPW
] |= mask
;
102 static inline int target_sigismember(const target_sigset_t
*set
, int signum
)
105 abi_ulong mask
= (abi_ulong
)1 << (signum
% TARGET_NSIG_BPW
);
106 return ((set
->sig
[signum
/ TARGET_NSIG_BPW
] & mask
) != 0);
109 void host_to_target_sigset_internal(target_sigset_t
*d
,
112 int host_sig
, target_sig
;
113 target_sigemptyset(d
);
114 for (host_sig
= 1; host_sig
< _NSIG
; host_sig
++) {
115 target_sig
= host_to_target_signal(host_sig
);
116 if (target_sig
< 1 || target_sig
> TARGET_NSIG
) {
119 if (sigismember(s
, host_sig
)) {
120 target_sigaddset(d
, target_sig
);
125 void host_to_target_sigset(target_sigset_t
*d
, const sigset_t
*s
)
130 host_to_target_sigset_internal(&d1
, s
);
131 for(i
= 0;i
< TARGET_NSIG_WORDS
; i
++)
132 d
->sig
[i
] = tswapal(d1
.sig
[i
]);
135 void target_to_host_sigset_internal(sigset_t
*d
,
136 const target_sigset_t
*s
)
138 int host_sig
, target_sig
;
140 for (target_sig
= 1; target_sig
<= TARGET_NSIG
; target_sig
++) {
141 host_sig
= target_to_host_signal(target_sig
);
142 if (host_sig
< 1 || host_sig
>= _NSIG
) {
145 if (target_sigismember(s
, target_sig
)) {
146 sigaddset(d
, host_sig
);
151 void target_to_host_sigset(sigset_t
*d
, const target_sigset_t
*s
)
156 for(i
= 0;i
< TARGET_NSIG_WORDS
; i
++)
157 s1
.sig
[i
] = tswapal(s
->sig
[i
]);
158 target_to_host_sigset_internal(d
, &s1
);
161 void host_to_target_old_sigset(abi_ulong
*old_sigset
,
162 const sigset_t
*sigset
)
165 host_to_target_sigset(&d
, sigset
);
166 *old_sigset
= d
.sig
[0];
169 void target_to_host_old_sigset(sigset_t
*sigset
,
170 const abi_ulong
*old_sigset
)
175 d
.sig
[0] = *old_sigset
;
176 for(i
= 1;i
< TARGET_NSIG_WORDS
; i
++)
178 target_to_host_sigset(sigset
, &d
);
181 int block_signals(void)
183 TaskState
*ts
= get_task_state(thread_cpu
);
186 /* It's OK to block everything including SIGSEGV, because we won't
187 * run any further guest code before unblocking signals in
188 * process_pending_signals().
191 sigprocmask(SIG_SETMASK
, &set
, 0);
193 return qatomic_xchg(&ts
->signal_pending
, 1);
196 /* Wrapper for sigprocmask function
197 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
198 * are host signal set, not guest ones. Returns -QEMU_ERESTARTSYS if
199 * a signal was already pending and the syscall must be restarted, or
201 * If set is NULL, this is guaranteed not to fail.
203 int do_sigprocmask(int how
, const sigset_t
*set
, sigset_t
*oldset
)
205 TaskState
*ts
= get_task_state(thread_cpu
);
208 *oldset
= ts
->signal_mask
;
214 if (block_signals()) {
215 return -QEMU_ERESTARTSYS
;
220 sigorset(&ts
->signal_mask
, &ts
->signal_mask
, set
);
223 for (i
= 1; i
<= NSIG
; ++i
) {
224 if (sigismember(set
, i
)) {
225 sigdelset(&ts
->signal_mask
, i
);
230 ts
->signal_mask
= *set
;
233 g_assert_not_reached();
236 /* Silently ignore attempts to change blocking status of KILL or STOP */
237 sigdelset(&ts
->signal_mask
, SIGKILL
);
238 sigdelset(&ts
->signal_mask
, SIGSTOP
);
243 /* Just set the guest's signal mask to the specified value; the
244 * caller is assumed to have called block_signals() already.
246 void set_sigmask(const sigset_t
*set
)
248 TaskState
*ts
= get_task_state(thread_cpu
);
250 ts
->signal_mask
= *set
;
253 /* sigaltstack management */
255 int on_sig_stack(unsigned long sp
)
257 TaskState
*ts
= get_task_state(thread_cpu
);
259 return (sp
- ts
->sigaltstack_used
.ss_sp
260 < ts
->sigaltstack_used
.ss_size
);
263 int sas_ss_flags(unsigned long sp
)
265 TaskState
*ts
= get_task_state(thread_cpu
);
267 return (ts
->sigaltstack_used
.ss_size
== 0 ? SS_DISABLE
268 : on_sig_stack(sp
) ? SS_ONSTACK
: 0);
271 abi_ulong
target_sigsp(abi_ulong sp
, struct target_sigaction
*ka
)
274 * This is the X/Open sanctioned signal stack switching.
276 TaskState
*ts
= get_task_state(thread_cpu
);
278 if ((ka
->sa_flags
& TARGET_SA_ONSTACK
) && !sas_ss_flags(sp
)) {
279 return ts
->sigaltstack_used
.ss_sp
+ ts
->sigaltstack_used
.ss_size
;
284 void target_save_altstack(target_stack_t
*uss
, CPUArchState
*env
)
286 TaskState
*ts
= get_task_state(thread_cpu
);
288 __put_user(ts
->sigaltstack_used
.ss_sp
, &uss
->ss_sp
);
289 __put_user(sas_ss_flags(get_sp_from_cpustate(env
)), &uss
->ss_flags
);
290 __put_user(ts
->sigaltstack_used
.ss_size
, &uss
->ss_size
);
293 abi_long
target_restore_altstack(target_stack_t
*uss
, CPUArchState
*env
)
295 TaskState
*ts
= get_task_state(thread_cpu
);
296 size_t minstacksize
= TARGET_MINSIGSTKSZ
;
299 #if defined(TARGET_PPC64)
300 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
301 struct image_info
*image
= ts
->info
;
302 if (get_ppc64_abi(image
) > 1) {
307 __get_user(ss
.ss_sp
, &uss
->ss_sp
);
308 __get_user(ss
.ss_size
, &uss
->ss_size
);
309 __get_user(ss
.ss_flags
, &uss
->ss_flags
);
311 if (on_sig_stack(get_sp_from_cpustate(env
))) {
312 return -TARGET_EPERM
;
315 switch (ss
.ss_flags
) {
317 return -TARGET_EINVAL
;
319 case TARGET_SS_DISABLE
:
324 case TARGET_SS_ONSTACK
:
326 if (ss
.ss_size
< minstacksize
) {
327 return -TARGET_ENOMEM
;
332 ts
->sigaltstack_used
.ss_sp
= ss
.ss_sp
;
333 ts
->sigaltstack_used
.ss_size
= ss
.ss_size
;
337 /* siginfo conversion */
339 static inline void host_to_target_siginfo_noswap(target_siginfo_t
*tinfo
,
340 const siginfo_t
*info
)
342 int sig
= host_to_target_signal(info
->si_signo
);
343 int si_code
= info
->si_code
;
345 tinfo
->si_signo
= sig
;
347 tinfo
->si_code
= info
->si_code
;
349 /* This memset serves two purposes:
350 * (1) ensure we don't leak random junk to the guest later
351 * (2) placate false positives from gcc about fields
352 * being used uninitialized if it chooses to inline both this
353 * function and tswap_siginfo() into host_to_target_siginfo().
355 memset(tinfo
->_sifields
._pad
, 0, sizeof(tinfo
->_sifields
._pad
));
357 /* This is awkward, because we have to use a combination of
358 * the si_code and si_signo to figure out which of the union's
359 * members are valid. (Within the host kernel it is always possible
360 * to tell, but the kernel carefully avoids giving userspace the
361 * high 16 bits of si_code, so we don't have the information to
362 * do this the easy way...) We therefore make our best guess,
363 * bearing in mind that a guest can spoof most of the si_codes
364 * via rt_sigqueueinfo() if it likes.
366 * Once we have made our guess, we record it in the top 16 bits of
367 * the si_code, so that tswap_siginfo() later can use it.
368 * tswap_siginfo() will strip these top bits out before writing
369 * si_code to the guest (sign-extending the lower bits).
376 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
377 * These are the only unspoofable si_code values.
379 tinfo
->_sifields
._kill
._pid
= info
->si_pid
;
380 tinfo
->_sifields
._kill
._uid
= info
->si_uid
;
381 si_type
= QEMU_SI_KILL
;
384 /* Everything else is spoofable. Make best guess based on signal */
387 tinfo
->_sifields
._sigchld
._pid
= info
->si_pid
;
388 tinfo
->_sifields
._sigchld
._uid
= info
->si_uid
;
389 if (si_code
== CLD_EXITED
)
390 tinfo
->_sifields
._sigchld
._status
= info
->si_status
;
392 tinfo
->_sifields
._sigchld
._status
393 = host_to_target_signal(info
->si_status
& 0x7f)
394 | (info
->si_status
& ~0x7f);
395 tinfo
->_sifields
._sigchld
._utime
= info
->si_utime
;
396 tinfo
->_sifields
._sigchld
._stime
= info
->si_stime
;
397 si_type
= QEMU_SI_CHLD
;
400 tinfo
->_sifields
._sigpoll
._band
= info
->si_band
;
401 tinfo
->_sifields
._sigpoll
._fd
= info
->si_fd
;
402 si_type
= QEMU_SI_POLL
;
405 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
406 tinfo
->_sifields
._rt
._pid
= info
->si_pid
;
407 tinfo
->_sifields
._rt
._uid
= info
->si_uid
;
408 /* XXX: potential problem if 64 bit */
409 tinfo
->_sifields
._rt
._sigval
.sival_ptr
410 = (abi_ulong
)(unsigned long)info
->si_value
.sival_ptr
;
411 si_type
= QEMU_SI_RT
;
417 tinfo
->si_code
= deposit32(si_code
, 16, 16, si_type
);
420 static void tswap_siginfo(target_siginfo_t
*tinfo
,
421 const target_siginfo_t
*info
)
423 int si_type
= extract32(info
->si_code
, 16, 16);
424 int si_code
= sextract32(info
->si_code
, 0, 16);
426 __put_user(info
->si_signo
, &tinfo
->si_signo
);
427 __put_user(info
->si_errno
, &tinfo
->si_errno
);
428 __put_user(si_code
, &tinfo
->si_code
);
430 /* We can use our internal marker of which fields in the structure
431 * are valid, rather than duplicating the guesswork of
432 * host_to_target_siginfo_noswap() here.
436 __put_user(info
->_sifields
._kill
._pid
, &tinfo
->_sifields
._kill
._pid
);
437 __put_user(info
->_sifields
._kill
._uid
, &tinfo
->_sifields
._kill
._uid
);
440 __put_user(info
->_sifields
._timer
._timer1
,
441 &tinfo
->_sifields
._timer
._timer1
);
442 __put_user(info
->_sifields
._timer
._timer2
,
443 &tinfo
->_sifields
._timer
._timer2
);
446 __put_user(info
->_sifields
._sigpoll
._band
,
447 &tinfo
->_sifields
._sigpoll
._band
);
448 __put_user(info
->_sifields
._sigpoll
._fd
,
449 &tinfo
->_sifields
._sigpoll
._fd
);
452 __put_user(info
->_sifields
._sigfault
._addr
,
453 &tinfo
->_sifields
._sigfault
._addr
);
456 __put_user(info
->_sifields
._sigchld
._pid
,
457 &tinfo
->_sifields
._sigchld
._pid
);
458 __put_user(info
->_sifields
._sigchld
._uid
,
459 &tinfo
->_sifields
._sigchld
._uid
);
460 __put_user(info
->_sifields
._sigchld
._status
,
461 &tinfo
->_sifields
._sigchld
._status
);
462 __put_user(info
->_sifields
._sigchld
._utime
,
463 &tinfo
->_sifields
._sigchld
._utime
);
464 __put_user(info
->_sifields
._sigchld
._stime
,
465 &tinfo
->_sifields
._sigchld
._stime
);
468 __put_user(info
->_sifields
._rt
._pid
, &tinfo
->_sifields
._rt
._pid
);
469 __put_user(info
->_sifields
._rt
._uid
, &tinfo
->_sifields
._rt
._uid
);
470 __put_user(info
->_sifields
._rt
._sigval
.sival_ptr
,
471 &tinfo
->_sifields
._rt
._sigval
.sival_ptr
);
474 g_assert_not_reached();
478 void host_to_target_siginfo(target_siginfo_t
*tinfo
, const siginfo_t
*info
)
480 target_siginfo_t tgt_tmp
;
481 host_to_target_siginfo_noswap(&tgt_tmp
, info
);
482 tswap_siginfo(tinfo
, &tgt_tmp
);
485 /* XXX: we support only POSIX RT signals are used. */
486 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
487 void target_to_host_siginfo(siginfo_t
*info
, const target_siginfo_t
*tinfo
)
489 /* This conversion is used only for the rt_sigqueueinfo syscall,
490 * and so we know that the _rt fields are the valid ones.
494 __get_user(info
->si_signo
, &tinfo
->si_signo
);
495 __get_user(info
->si_errno
, &tinfo
->si_errno
);
496 __get_user(info
->si_code
, &tinfo
->si_code
);
497 __get_user(info
->si_pid
, &tinfo
->_sifields
._rt
._pid
);
498 __get_user(info
->si_uid
, &tinfo
->_sifields
._rt
._uid
);
499 __get_user(sival_ptr
, &tinfo
->_sifields
._rt
._sigval
.sival_ptr
);
500 info
->si_value
.sival_ptr
= (void *)(long)sival_ptr
;
503 /* returns 1 if given signal should dump core if not handled */
504 static int core_dump_signal(int sig
)
520 int host_interrupt_signal
;
522 static void signal_table_init(const char *rtsig_map
)
524 int hsig
, tsig
, count
;
528 * Map host RT signals to target RT signals according to the
529 * user-provided specification.
531 const char *s
= rtsig_map
;
536 if (qemu_strtoi(s
, &s
, 10, &tsig
) || *s
++ != ' ') {
537 fprintf(stderr
, "Malformed target signal in QEMU_RTSIG_MAP\n");
540 if (qemu_strtoi(s
, &s
, 10, &hsig
) || *s
++ != ' ') {
541 fprintf(stderr
, "Malformed host signal in QEMU_RTSIG_MAP\n");
544 if (qemu_strtoi(s
, &s
, 10, &count
) || (*s
&& *s
!= ',')) {
545 fprintf(stderr
, "Malformed signal count in QEMU_RTSIG_MAP\n");
549 for (i
= 0; i
< count
; i
++, tsig
++, hsig
++) {
550 if (tsig
< TARGET_SIGRTMIN
|| tsig
> TARGET_NSIG
) {
551 fprintf(stderr
, "%d is not a target rt signal\n", tsig
);
554 if (hsig
< SIGRTMIN
|| hsig
> SIGRTMAX
) {
555 fprintf(stderr
, "%d is not a host rt signal\n", hsig
);
558 if (host_to_target_signal_table
[hsig
]) {
559 fprintf(stderr
, "%d already maps %d\n",
560 hsig
, host_to_target_signal_table
[hsig
]);
563 host_to_target_signal_table
[hsig
] = tsig
;
574 * Default host-to-target RT signal mapping.
576 * Signals are supported starting from TARGET_SIGRTMIN and going up
577 * until we run out of host realtime signals. Glibc uses the lower 2
578 * RT signals and (hopefully) nobody uses the upper ones.
579 * This is why SIGRTMIN (34) is generally greater than __SIGRTMIN (32).
580 * To fix this properly we would need to do manual signal delivery
581 * multiplexed over a single host signal.
582 * Attempts for configure "missing" signals via sigaction will be
585 * Reserve two signals for internal usage (see below).
589 for (tsig
= TARGET_SIGRTMIN
;
590 hsig
<= SIGRTMAX
&& tsig
<= TARGET_NSIG
;
592 host_to_target_signal_table
[hsig
] = tsig
;
597 * Remap the target SIGABRT, so that we can distinguish host abort
598 * from guest abort. When the guest registers a signal handler or
599 * calls raise(SIGABRT), the host will raise SIG_RTn. If the guest
600 * arrives at dump_core_and_abort(), we will map back to host SIGABRT
601 * so that the parent (native or emulated) sees the correct signal.
602 * Finally, also map host to guest SIGABRT so that the emulated
603 * parent sees the correct mapping from wait status.
606 host_to_target_signal_table
[SIGABRT
] = 0;
607 for (hsig
= SIGRTMIN
; hsig
<= SIGRTMAX
; hsig
++) {
608 if (!host_to_target_signal_table
[hsig
]) {
609 if (host_interrupt_signal
) {
610 host_to_target_signal_table
[hsig
] = TARGET_SIGABRT
;
613 host_interrupt_signal
= hsig
;
617 if (hsig
> SIGRTMAX
) {
619 "No rt signals left for interrupt and SIGABRT mapping\n");
623 /* Invert the mapping that has already been assigned. */
624 for (hsig
= 1; hsig
< _NSIG
; hsig
++) {
625 tsig
= host_to_target_signal_table
[hsig
];
627 if (target_to_host_signal_table
[tsig
]) {
628 fprintf(stderr
, "%d is already mapped to %d\n",
629 tsig
, target_to_host_signal_table
[tsig
]);
632 target_to_host_signal_table
[tsig
] = hsig
;
636 host_to_target_signal_table
[SIGABRT
] = TARGET_SIGABRT
;
638 /* Map everything else out-of-bounds. */
639 for (hsig
= 1; hsig
< _NSIG
; hsig
++) {
640 if (host_to_target_signal_table
[hsig
] == 0) {
641 host_to_target_signal_table
[hsig
] = TARGET_NSIG
+ 1;
644 for (count
= 0, tsig
= 1; tsig
<= TARGET_NSIG
; tsig
++) {
645 if (target_to_host_signal_table
[tsig
] == 0) {
646 target_to_host_signal_table
[tsig
] = _NSIG
;
651 trace_signal_table_init(count
);
654 void signal_init(const char *rtsig_map
)
656 TaskState
*ts
= get_task_state(thread_cpu
);
657 struct sigaction act
, oact
;
659 /* initialize signal conversion tables */
660 signal_table_init(rtsig_map
);
662 /* Set the signal mask from the host mask. */
663 sigprocmask(0, 0, &ts
->signal_mask
);
665 sigfillset(&act
.sa_mask
);
666 act
.sa_flags
= SA_SIGINFO
;
667 act
.sa_sigaction
= host_signal_handler
;
670 * A parent process may configure ignored signals, but all other
671 * signals are default. For any target signals that have no host
672 * mapping, set to ignore. For all core_dump_signal, install our
673 * host signal handler so that we may invoke dump_core_and_abort.
674 * This includes SIGSEGV and SIGBUS, which are also need our signal
675 * handler for paging and exceptions.
677 for (int tsig
= 1; tsig
<= TARGET_NSIG
; tsig
++) {
678 int hsig
= target_to_host_signal(tsig
);
679 abi_ptr thand
= TARGET_SIG_IGN
;
685 /* As we force remap SIGABRT, cannot probe and install in one step. */
686 if (tsig
== TARGET_SIGABRT
) {
687 sigaction(SIGABRT
, NULL
, &oact
);
688 sigaction(hsig
, &act
, NULL
);
690 struct sigaction
*iact
= core_dump_signal(tsig
) ? &act
: NULL
;
691 sigaction(hsig
, iact
, &oact
);
694 if (oact
.sa_sigaction
!= (void *)SIG_IGN
) {
695 thand
= TARGET_SIG_DFL
;
697 sigact_table
[tsig
- 1]._sa_handler
= thand
;
700 sigaction(host_interrupt_signal
, &act
, NULL
);
703 /* Force a synchronously taken signal. The kernel force_sig() function
704 * also forces the signal to "not blocked, not ignored", but for QEMU
705 * that work is done in process_pending_signals().
707 void force_sig(int sig
)
709 CPUState
*cpu
= thread_cpu
;
710 target_siginfo_t info
= {};
714 info
.si_code
= TARGET_SI_KERNEL
;
715 info
._sifields
._kill
._pid
= 0;
716 info
._sifields
._kill
._uid
= 0;
717 queue_signal(cpu_env(cpu
), info
.si_signo
, QEMU_SI_KILL
, &info
);
721 * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
722 * 'force' part is handled in process_pending_signals().
724 void force_sig_fault(int sig
, int code
, abi_ulong addr
)
726 CPUState
*cpu
= thread_cpu
;
727 target_siginfo_t info
= {};
732 info
._sifields
._sigfault
._addr
= addr
;
733 queue_signal(cpu_env(cpu
), sig
, QEMU_SI_FAULT
, &info
);
736 /* Force a SIGSEGV if we couldn't write to memory trying to set
737 * up the signal frame. oldsig is the signal we were trying to handle
738 * at the point of failure.
740 #if !defined(TARGET_RISCV)
741 void force_sigsegv(int oldsig
)
743 if (oldsig
== SIGSEGV
) {
744 /* Make sure we don't try to deliver the signal again; this will
745 * end up with handle_pending_signal() calling dump_core_and_abort().
747 sigact_table
[oldsig
- 1]._sa_handler
= TARGET_SIG_DFL
;
749 force_sig(TARGET_SIGSEGV
);
753 void cpu_loop_exit_sigsegv(CPUState
*cpu
, vaddr addr
,
754 MMUAccessType access_type
, bool maperr
, uintptr_t ra
)
756 const TCGCPUOps
*tcg_ops
= cpu
->cc
->tcg_ops
;
758 if (tcg_ops
->record_sigsegv
) {
759 tcg_ops
->record_sigsegv(cpu
, addr
, access_type
, maperr
, ra
);
762 force_sig_fault(TARGET_SIGSEGV
,
763 maperr
? TARGET_SEGV_MAPERR
: TARGET_SEGV_ACCERR
,
765 cpu
->exception_index
= EXCP_INTERRUPT
;
766 cpu_loop_exit_restore(cpu
, ra
);
769 void cpu_loop_exit_sigbus(CPUState
*cpu
, vaddr addr
,
770 MMUAccessType access_type
, uintptr_t ra
)
772 const TCGCPUOps
*tcg_ops
= cpu
->cc
->tcg_ops
;
774 if (tcg_ops
->record_sigbus
) {
775 tcg_ops
->record_sigbus(cpu
, addr
, access_type
, ra
);
778 force_sig_fault(TARGET_SIGBUS
, TARGET_BUS_ADRALN
, addr
);
779 cpu
->exception_index
= EXCP_INTERRUPT
;
780 cpu_loop_exit_restore(cpu
, ra
);
783 /* abort execution with signal */
785 void die_with_signal(int host_sig
)
787 struct sigaction act
= {
788 .sa_handler
= SIG_DFL
,
792 * The proper exit code for dying from an uncaught signal is -<signal>.
793 * The kernel doesn't allow exit() or _exit() to pass a negative value.
794 * To get the proper exit code we need to actually die from an uncaught
795 * signal. Here the default signal handler is installed, we send
796 * the signal and we wait for it to arrive.
798 sigfillset(&act
.sa_mask
);
799 sigaction(host_sig
, &act
, NULL
);
801 kill(getpid(), host_sig
);
803 /* Make sure the signal isn't masked (reusing the mask inside of act). */
804 sigdelset(&act
.sa_mask
, host_sig
);
805 sigsuspend(&act
.sa_mask
);
812 void dump_core_and_abort(CPUArchState
*env
, int target_sig
)
814 CPUState
*cpu
= env_cpu(env
);
815 TaskState
*ts
= get_task_state(cpu
);
816 int host_sig
, core_dumped
= 0;
818 /* On exit, undo the remapping of SIGABRT. */
819 if (target_sig
== TARGET_SIGABRT
) {
822 host_sig
= target_to_host_signal(target_sig
);
824 trace_user_dump_core_and_abort(env
, target_sig
, host_sig
);
825 gdb_signalled(env
, target_sig
);
827 /* dump core if supported by target binary format */
828 if (core_dump_signal(target_sig
) && (ts
->bprm
->core_dump
!= NULL
)) {
831 ((*ts
->bprm
->core_dump
)(target_sig
, env
) == 0);
834 /* we already dumped the core of target process, we don't want
835 * a coredump of qemu itself */
836 struct rlimit nodump
;
837 getrlimit(RLIMIT_CORE
, &nodump
);
839 setrlimit(RLIMIT_CORE
, &nodump
);
840 (void) fprintf(stderr
, "qemu: uncaught target signal %d (%s) - %s\n",
841 target_sig
, strsignal(host_sig
), "core dumped" );
844 preexit_cleanup(env
, 128 + target_sig
);
845 die_with_signal(host_sig
);
848 /* queue a signal so that it will be send to the virtual CPU as soon
850 void queue_signal(CPUArchState
*env
, int sig
, int si_type
,
851 target_siginfo_t
*info
)
853 CPUState
*cpu
= env_cpu(env
);
854 TaskState
*ts
= get_task_state(cpu
);
856 trace_user_queue_signal(env
, sig
);
858 info
->si_code
= deposit32(info
->si_code
, 16, 16, si_type
);
860 ts
->sync_signal
.info
= *info
;
861 ts
->sync_signal
.pending
= sig
;
862 /* signal that a new signal is pending */
863 qatomic_set(&ts
->signal_pending
, 1);
867 /* Adjust the signal context to rewind out of safe-syscall if we're in it */
868 static inline void rewind_if_in_safe_syscall(void *puc
)
870 host_sigcontext
*uc
= (host_sigcontext
*)puc
;
871 uintptr_t pcreg
= host_signal_pc(uc
);
873 if (pcreg
> (uintptr_t)safe_syscall_start
874 && pcreg
< (uintptr_t)safe_syscall_end
) {
875 host_signal_set_pc(uc
, (uintptr_t)safe_syscall_start
);
880 void die_from_signal(siginfo_t
*info
)
882 char sigbuf
[4], codebuf
[12];
883 const char *sig
, *code
= NULL
;
885 switch (info
->si_signo
) {
888 switch (info
->si_code
) {
899 switch (info
->si_code
) {
910 switch (info
->si_code
) {
933 switch (info
->si_code
) {
946 snprintf(sigbuf
, sizeof(sigbuf
), "%d", info
->si_signo
);
951 snprintf(codebuf
, sizeof(sigbuf
), "%d", info
->si_code
);
955 error_report("QEMU internal SIG%s {code=%s, addr=%p}",
956 sig
, code
, info
->si_addr
);
957 die_with_signal(info
->si_signo
);
960 static void host_sigsegv_handler(CPUState
*cpu
, siginfo_t
*info
,
963 uintptr_t host_addr
= (uintptr_t)info
->si_addr
;
965 * Convert forcefully to guest address space: addresses outside
966 * reserved_va are still valid to report via SEGV_MAPERR.
968 bool is_valid
= h2g_valid(host_addr
);
969 abi_ptr guest_addr
= h2g_nocheck(host_addr
);
970 uintptr_t pc
= host_signal_pc(uc
);
971 bool is_write
= host_signal_write(info
, uc
);
972 MMUAccessType access_type
= adjust_signal_pc(&pc
, is_write
);
975 /* If this was a write to a TB protected page, restart. */
978 && info
->si_code
== SEGV_ACCERR
979 && handle_sigsegv_accerr_write(cpu
, host_signal_mask(uc
),
985 * If the access was not on behalf of the guest, within the executable
986 * mapping of the generated code buffer, then it is a host bug.
988 if (access_type
!= MMU_INST_FETCH
989 && !in_code_gen_buffer((void *)(pc
- tcg_splitwx_diff
))) {
990 die_from_signal(info
);
994 if (is_valid
&& info
->si_code
== SEGV_ACCERR
) {
996 * With reserved_va, the whole address space is PROT_NONE,
997 * which means that we may get ACCERR when we want MAPERR.
999 if (page_get_flags(guest_addr
) & PAGE_VALID
) {
1002 info
->si_code
= SEGV_MAPERR
;
1006 sigprocmask(SIG_SETMASK
, host_signal_mask(uc
), NULL
);
1007 cpu_loop_exit_sigsegv(cpu
, guest_addr
, access_type
, maperr
, pc
);
1010 static uintptr_t host_sigbus_handler(CPUState
*cpu
, siginfo_t
*info
,
1011 host_sigcontext
*uc
)
1013 uintptr_t pc
= host_signal_pc(uc
);
1014 bool is_write
= host_signal_write(info
, uc
);
1015 MMUAccessType access_type
= adjust_signal_pc(&pc
, is_write
);
1018 * If the access was not on behalf of the guest, within the executable
1019 * mapping of the generated code buffer, then it is a host bug.
1021 if (!in_code_gen_buffer((void *)(pc
- tcg_splitwx_diff
))) {
1022 die_from_signal(info
);
1025 if (info
->si_code
== BUS_ADRALN
) {
1026 uintptr_t host_addr
= (uintptr_t)info
->si_addr
;
1027 abi_ptr guest_addr
= h2g_nocheck(host_addr
);
1029 sigprocmask(SIG_SETMASK
, host_signal_mask(uc
), NULL
);
1030 cpu_loop_exit_sigbus(cpu
, guest_addr
, access_type
, pc
);
1035 static void host_signal_handler(int host_sig
, siginfo_t
*info
, void *puc
)
1037 CPUState
*cpu
= thread_cpu
;
1038 CPUArchState
*env
= cpu_env(cpu
);
1039 TaskState
*ts
= get_task_state(cpu
);
1040 target_siginfo_t tinfo
;
1041 host_sigcontext
*uc
= puc
;
1042 struct emulated_sigtable
*k
;
1045 bool sync_sig
= false;
1048 if (host_sig
== host_interrupt_signal
) {
1049 ts
->signal_pending
= 1;
1050 cpu_exit(thread_cpu
);
1055 * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
1056 * handling wrt signal blocking and unwinding. Non-spoofed SIGILL,
1057 * SIGFPE, SIGTRAP are always host bugs.
1059 if (info
->si_code
> 0) {
1062 /* Only returns on handle_sigsegv_accerr_write success. */
1063 host_sigsegv_handler(cpu
, info
, uc
);
1066 pc
= host_sigbus_handler(cpu
, info
, uc
);
1072 die_from_signal(info
);
1076 /* get target signal number */
1077 guest_sig
= host_to_target_signal(host_sig
);
1078 if (guest_sig
< 1 || guest_sig
> TARGET_NSIG
) {
1081 trace_user_host_signal(env
, host_sig
, guest_sig
);
1083 host_to_target_siginfo_noswap(&tinfo
, info
);
1084 k
= &ts
->sigtab
[guest_sig
- 1];
1086 k
->pending
= guest_sig
;
1087 ts
->signal_pending
= 1;
1090 * For synchronous signals, unwind the cpu state to the faulting
1091 * insn and then exit back to the main loop so that the signal
1092 * is delivered immediately.
1095 cpu
->exception_index
= EXCP_INTERRUPT
;
1096 cpu_loop_exit_restore(cpu
, pc
);
1099 rewind_if_in_safe_syscall(puc
);
1102 * Block host signals until target signal handler entered. We
1103 * can't block SIGSEGV or SIGBUS while we're executing guest
1104 * code in case the guest code provokes one in the window between
1105 * now and it getting out to the main loop. Signals will be
1106 * unblocked again in process_pending_signals().
1108 * WARNING: we cannot use sigfillset() here because the sigmask
1109 * field is a kernel sigset_t, which is much smaller than the
1110 * libc sigset_t which sigfillset() operates on. Using sigfillset()
1111 * would write 0xff bytes off the end of the structure and trash
1112 * data on the struct.
1114 sigmask
= host_signal_mask(uc
);
1115 memset(sigmask
, 0xff, SIGSET_T_SIZE
);
1116 sigdelset(sigmask
, SIGSEGV
);
1117 sigdelset(sigmask
, SIGBUS
);
1119 /* interrupt the virtual CPU as soon as possible */
1120 cpu_exit(thread_cpu
);
1123 /* do_sigaltstack() returns target values and errnos. */
1124 /* compare linux/kernel/signal.c:do_sigaltstack() */
1125 abi_long
do_sigaltstack(abi_ulong uss_addr
, abi_ulong uoss_addr
,
1128 target_stack_t oss
, *uoss
= NULL
;
1129 abi_long ret
= -TARGET_EFAULT
;
1132 /* Verify writability now, but do not alter user memory yet. */
1133 if (!lock_user_struct(VERIFY_WRITE
, uoss
, uoss_addr
, 0)) {
1136 target_save_altstack(&oss
, env
);
1140 target_stack_t
*uss
;
1142 if (!lock_user_struct(VERIFY_READ
, uss
, uss_addr
, 1)) {
1145 ret
= target_restore_altstack(uss
, env
);
1152 memcpy(uoss
, &oss
, sizeof(oss
));
1153 unlock_user_struct(uoss
, uoss_addr
, 1);
1160 unlock_user_struct(uoss
, uoss_addr
, 0);
1165 /* do_sigaction() return target values and host errnos */
1166 int do_sigaction(int sig
, const struct target_sigaction
*act
,
1167 struct target_sigaction
*oact
, abi_ulong ka_restorer
)
1169 struct target_sigaction
*k
;
1173 trace_signal_do_sigaction_guest(sig
, TARGET_NSIG
);
1175 if (sig
< 1 || sig
> TARGET_NSIG
) {
1176 return -TARGET_EINVAL
;
1179 if (act
&& (sig
== TARGET_SIGKILL
|| sig
== TARGET_SIGSTOP
)) {
1180 return -TARGET_EINVAL
;
1183 if (block_signals()) {
1184 return -QEMU_ERESTARTSYS
;
1187 k
= &sigact_table
[sig
- 1];
1189 __put_user(k
->_sa_handler
, &oact
->_sa_handler
);
1190 __put_user(k
->sa_flags
, &oact
->sa_flags
);
1191 #ifdef TARGET_ARCH_HAS_SA_RESTORER
1192 __put_user(k
->sa_restorer
, &oact
->sa_restorer
);
1195 oact
->sa_mask
= k
->sa_mask
;
1198 __get_user(k
->_sa_handler
, &act
->_sa_handler
);
1199 __get_user(k
->sa_flags
, &act
->sa_flags
);
1200 #ifdef TARGET_ARCH_HAS_SA_RESTORER
1201 __get_user(k
->sa_restorer
, &act
->sa_restorer
);
1203 #ifdef TARGET_ARCH_HAS_KA_RESTORER
1204 k
->ka_restorer
= ka_restorer
;
1206 /* To be swapped in target_to_host_sigset. */
1207 k
->sa_mask
= act
->sa_mask
;
1209 /* we update the host linux signal state */
1210 host_sig
= target_to_host_signal(sig
);
1211 trace_signal_do_sigaction_host(host_sig
, TARGET_NSIG
);
1212 if (host_sig
> SIGRTMAX
) {
1213 /* we don't have enough host signals to map all target signals */
1214 qemu_log_mask(LOG_UNIMP
, "Unsupported target signal #%d, ignored\n",
1217 * we don't return an error here because some programs try to
1218 * register an handler for all possible rt signals even if they
1220 * An error here can abort them whereas there can be no problem
1221 * to not have the signal available later.
1222 * This is the case for golang,
1223 * See https://github.com/golang/go/issues/33746
1224 * So we silently ignore the error.
1228 if (host_sig
!= SIGSEGV
&& host_sig
!= SIGBUS
) {
1229 struct sigaction act1
;
1231 sigfillset(&act1
.sa_mask
);
1232 act1
.sa_flags
= SA_SIGINFO
;
1233 if (k
->_sa_handler
== TARGET_SIG_IGN
) {
1235 * It is important to update the host kernel signal ignore
1236 * state to avoid getting unexpected interrupted syscalls.
1238 act1
.sa_sigaction
= (void *)SIG_IGN
;
1239 } else if (k
->_sa_handler
== TARGET_SIG_DFL
) {
1240 if (core_dump_signal(sig
)) {
1241 act1
.sa_sigaction
= host_signal_handler
;
1243 act1
.sa_sigaction
= (void *)SIG_DFL
;
1246 act1
.sa_sigaction
= host_signal_handler
;
1247 if (k
->sa_flags
& TARGET_SA_RESTART
) {
1248 act1
.sa_flags
|= SA_RESTART
;
1251 ret
= sigaction(host_sig
, &act1
, NULL
);
1257 static void handle_pending_signal(CPUArchState
*cpu_env
, int sig
,
1258 struct emulated_sigtable
*k
)
1260 CPUState
*cpu
= env_cpu(cpu_env
);
1263 target_siginfo_t unswapped
;
1264 target_sigset_t target_old_set
;
1265 struct target_sigaction
*sa
;
1266 TaskState
*ts
= get_task_state(cpu
);
1268 trace_user_handle_signal(cpu_env
, sig
);
1269 /* dequeue signal */
1273 * Writes out siginfo values byteswapped, accordingly to the target.
1274 * It also cleans the si_type from si_code making it correct for
1275 * the target. We must hold on to the original unswapped copy for
1276 * strace below, because si_type is still required there.
1278 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
1279 unswapped
= k
->info
;
1281 tswap_siginfo(&k
->info
, &k
->info
);
1283 sig
= gdb_handlesig(cpu
, sig
, NULL
, &k
->info
, sizeof(k
->info
));
1286 handler
= TARGET_SIG_IGN
;
1288 sa
= &sigact_table
[sig
- 1];
1289 handler
= sa
->_sa_handler
;
1292 if (unlikely(qemu_loglevel_mask(LOG_STRACE
))) {
1293 print_taken_signal(sig
, &unswapped
);
1296 if (handler
== TARGET_SIG_DFL
) {
1297 /* default handler : ignore some signal. The other are job control or fatal */
1298 if (sig
== TARGET_SIGTSTP
|| sig
== TARGET_SIGTTIN
|| sig
== TARGET_SIGTTOU
) {
1299 kill(getpid(),SIGSTOP
);
1300 } else if (sig
!= TARGET_SIGCHLD
&&
1301 sig
!= TARGET_SIGURG
&&
1302 sig
!= TARGET_SIGWINCH
&&
1303 sig
!= TARGET_SIGCONT
) {
1304 dump_core_and_abort(cpu_env
, sig
);
1306 } else if (handler
== TARGET_SIG_IGN
) {
1308 } else if (handler
== TARGET_SIG_ERR
) {
1309 dump_core_and_abort(cpu_env
, sig
);
1311 /* compute the blocked signals during the handler execution */
1312 sigset_t
*blocked_set
;
1314 target_to_host_sigset(&set
, &sa
->sa_mask
);
1315 /* SA_NODEFER indicates that the current signal should not be
1316 blocked during the handler */
1317 if (!(sa
->sa_flags
& TARGET_SA_NODEFER
))
1318 sigaddset(&set
, target_to_host_signal(sig
));
1320 /* save the previous blocked signal state to restore it at the
1321 end of the signal execution (see do_sigreturn) */
1322 host_to_target_sigset_internal(&target_old_set
, &ts
->signal_mask
);
1324 /* block signals in the handler */
1325 blocked_set
= ts
->in_sigsuspend
?
1326 &ts
->sigsuspend_mask
: &ts
->signal_mask
;
1327 sigorset(&ts
->signal_mask
, blocked_set
, &set
);
1328 ts
->in_sigsuspend
= 0;
1330 /* if the CPU is in VM86 mode, we restore the 32 bit values */
1331 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
1333 CPUX86State
*env
= cpu_env
;
1334 if (env
->eflags
& VM_MASK
)
1335 save_v86_state(env
);
1338 /* prepare the stack frame of the virtual CPU */
1339 #if defined(TARGET_ARCH_HAS_SETUP_FRAME)
1340 if (sa
->sa_flags
& TARGET_SA_SIGINFO
) {
1341 setup_rt_frame(sig
, sa
, &k
->info
, &target_old_set
, cpu_env
);
1343 setup_frame(sig
, sa
, &target_old_set
, cpu_env
);
1346 /* These targets do not have traditional signals. */
1347 setup_rt_frame(sig
, sa
, &k
->info
, &target_old_set
, cpu_env
);
1349 if (sa
->sa_flags
& TARGET_SA_RESETHAND
) {
1350 sa
->_sa_handler
= TARGET_SIG_DFL
;
1355 void process_pending_signals(CPUArchState
*cpu_env
)
1357 CPUState
*cpu
= env_cpu(cpu_env
);
1359 TaskState
*ts
= get_task_state(cpu
);
1361 sigset_t
*blocked_set
;
1363 while (qatomic_read(&ts
->signal_pending
)) {
1365 sigprocmask(SIG_SETMASK
, &set
, 0);
1368 sig
= ts
->sync_signal
.pending
;
1370 /* Synchronous signals are forced,
1371 * see force_sig_info() and callers in Linux
1372 * Note that not all of our queue_signal() calls in QEMU correspond
1373 * to force_sig_info() calls in Linux (some are send_sig_info()).
1374 * However it seems like a kernel bug to me to allow the process
1375 * to block a synchronous signal since it could then just end up
1376 * looping round and round indefinitely.
1378 if (sigismember(&ts
->signal_mask
, target_to_host_signal_table
[sig
])
1379 || sigact_table
[sig
- 1]._sa_handler
== TARGET_SIG_IGN
) {
1380 sigdelset(&ts
->signal_mask
, target_to_host_signal_table
[sig
]);
1381 sigact_table
[sig
- 1]._sa_handler
= TARGET_SIG_DFL
;
1384 handle_pending_signal(cpu_env
, sig
, &ts
->sync_signal
);
1387 for (sig
= 1; sig
<= TARGET_NSIG
; sig
++) {
1388 blocked_set
= ts
->in_sigsuspend
?
1389 &ts
->sigsuspend_mask
: &ts
->signal_mask
;
1391 if (ts
->sigtab
[sig
- 1].pending
&&
1392 (!sigismember(blocked_set
,
1393 target_to_host_signal_table
[sig
]))) {
1394 handle_pending_signal(cpu_env
, sig
, &ts
->sigtab
[sig
- 1]);
1395 /* Restart scan from the beginning, as handle_pending_signal
1396 * might have resulted in a new synchronous signal (eg SIGSEGV).
1402 /* if no signal is pending, unblock signals and recheck (the act
1403 * of unblocking might cause us to take another host signal which
1404 * will set signal_pending again).
1406 qatomic_set(&ts
->signal_pending
, 0);
1407 ts
->in_sigsuspend
= 0;
1408 set
= ts
->signal_mask
;
1409 sigdelset(&set
, SIGSEGV
);
1410 sigdelset(&set
, SIGBUS
);
1411 sigprocmask(SIG_SETMASK
, &set
, 0);
1413 ts
->in_sigsuspend
= 0;
1416 int process_sigsuspend_mask(sigset_t
**pset
, target_ulong sigset
,
1417 target_ulong sigsize
)
1419 TaskState
*ts
= get_task_state(thread_cpu
);
1420 sigset_t
*host_set
= &ts
->sigsuspend_mask
;
1421 target_sigset_t
*target_sigset
;
1423 if (sigsize
!= sizeof(*target_sigset
)) {
1424 /* Like the kernel, we enforce correct size sigsets */
1425 return -TARGET_EINVAL
;
1428 target_sigset
= lock_user(VERIFY_READ
, sigset
, sigsize
, 1);
1429 if (!target_sigset
) {
1430 return -TARGET_EFAULT
;
1432 target_to_host_sigset(host_set
, target_sigset
);
1433 unlock_user(target_sigset
, sigset
, 0);