]> git.ipfire.org Git - thirdparty/qemu.git/blame - linux-user/signal.c
Merge tag 'pull-riscv-to-apply-20250704' of https://github.com/alistair23/qemu into...
[thirdparty/qemu.git] / linux-user / signal.c
CommitLineData
31e31b8a 1/*
66fb9763 2 * Emulation of Linux signals
5fafdf24 3 *
31e31b8a
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
8167ee88 17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
31e31b8a 18 */
d39594e9 19#include "qemu/osdep.h"
a70dadc7 20#include "qemu/bitops.h"
c107521e 21#include "qemu/cutils.h"
d96bf49b 22#include "gdbstub/user.h"
74781c08 23#include "exec/page-protection.h"
15017436 24#include "accel/tcg/cpu-ops.h"
85b4fa0c 25
31e31b8a 26#include <sys/ucontext.h>
edf8e2af 27#include <sys/resource.h>
31e31b8a 28
3ef693a0 29#include "qemu.h"
3b249d26 30#include "user-internals.h"
a44d57a3 31#include "strace.h"
3ad0a769 32#include "loader.h"
c8ee0a44 33#include "trace.h"
befb7447 34#include "signal-common.h"
e6037d04 35#include "host-signal.h"
1bf0d6e4 36#include "user/cpu_loop.h"
f47dcf51 37#include "user/page-protection.h"
bbf15aaf 38#include "user/safe-syscall.h"
08916fd4 39#include "user/signal.h"
7dfd3ca8 40#include "tcg/tcg.h"
66fb9763 41
f84e313e
GR
42/* target_siginfo_t must fit in gdbstub's siginfo save area. */
43QEMU_BUILD_BUG_ON(sizeof(target_siginfo_t) > MAX_SIGINFO_LENGTH);
44
624f7979 45static struct target_sigaction sigact_table[TARGET_NSIG];
31e31b8a 46
5fafdf24 47static void host_signal_handler(int host_signum, siginfo_t *info,
66fb9763
FB
48 void *puc);
49
db2af69d
RH
50/* Fallback addresses into sigtramp page. */
51abi_ulong default_sigreturn;
52abi_ulong default_rt_sigreturn;
9fcff3a6
LV
53
54/*
b60b91aa
RH
55 * System includes define _NSIG as SIGRTMAX + 1, but qemu (like the kernel)
56 * defines TARGET_NSIG as TARGET_SIGRTMAX and the first signal is 1.
9fcff3a6
LV
57 * Signal number 0 is reserved for use as kill(pid, 0), to test whether
58 * a process exists without sending it a signal.
59 */
144bff03 60#ifdef __SIGRTMAX
9fcff3a6 61QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG);
144bff03 62#endif
3ca05588 63static uint8_t host_to_target_signal_table[_NSIG] = {
7b72aa1d
HD
64#define MAKE_SIG_ENTRY(sig) [sig] = TARGET_##sig,
65 MAKE_SIGNAL_LIST
66#undef MAKE_SIG_ENTRY
9e5f5284 67};
9e5f5284 68
9fcff3a6
LV
69static uint8_t target_to_host_signal_table[TARGET_NSIG + 1];
70
71/* valid sig is between 1 and _NSIG - 1 */
1d9d8b55 72int host_to_target_signal(int sig)
31e31b8a 73{
b60b91aa 74 if (sig < 1) {
4cb05961 75 return sig;
9fcff3a6 76 }
b60b91aa
RH
77 if (sig >= _NSIG) {
78 return TARGET_NSIG + 1;
79 }
9e5f5284 80 return host_to_target_signal_table[sig];
31e31b8a
FB
81}
82
9fcff3a6 83/* valid sig is between 1 and TARGET_NSIG */
4cb05961 84int target_to_host_signal(int sig)
31e31b8a 85{
b60b91aa 86 if (sig < 1) {
4cb05961 87 return sig;
9fcff3a6 88 }
b60b91aa
RH
89 if (sig > TARGET_NSIG) {
90 return _NSIG;
91 }
9e5f5284 92 return target_to_host_signal_table[sig];
31e31b8a
FB
93}
94
c227f099 95static inline void target_sigaddset(target_sigset_t *set, int signum)
f5545b5c
PB
96{
97 signum--;
98 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
99 set->sig[signum / TARGET_NSIG_BPW] |= mask;
100}
101
c227f099 102static inline int target_sigismember(const target_sigset_t *set, int signum)
f5545b5c
PB
103{
104 signum--;
105 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
106 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
107}
108
befb7447
LV
109void host_to_target_sigset_internal(target_sigset_t *d,
110 const sigset_t *s)
66fb9763 111{
9fcff3a6 112 int host_sig, target_sig;
f5545b5c 113 target_sigemptyset(d);
9fcff3a6
LV
114 for (host_sig = 1; host_sig < _NSIG; host_sig++) {
115 target_sig = host_to_target_signal(host_sig);
116 if (target_sig < 1 || target_sig > TARGET_NSIG) {
117 continue;
118 }
119 if (sigismember(s, host_sig)) {
120 target_sigaddset(d, target_sig);
f5545b5c 121 }
66fb9763
FB
122 }
123}
124
c227f099 125void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
9231944d 126{
c227f099 127 target_sigset_t d1;
9231944d
FB
128 int i;
129
130 host_to_target_sigset_internal(&d1, s);
131 for(i = 0;i < TARGET_NSIG_WORDS; i++)
cbb21eed 132 d->sig[i] = tswapal(d1.sig[i]);
9231944d
FB
133}
134
befb7447
LV
135void target_to_host_sigset_internal(sigset_t *d,
136 const target_sigset_t *s)
66fb9763 137{
9fcff3a6 138 int host_sig, target_sig;
f5545b5c 139 sigemptyset(d);
9fcff3a6
LV
140 for (target_sig = 1; target_sig <= TARGET_NSIG; target_sig++) {
141 host_sig = target_to_host_signal(target_sig);
142 if (host_sig < 1 || host_sig >= _NSIG) {
143 continue;
144 }
145 if (target_sigismember(s, target_sig)) {
146 sigaddset(d, host_sig);
f5545b5c 147 }
da7c8647 148 }
66fb9763
FB
149}
150
c227f099 151void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
9231944d 152{
c227f099 153 target_sigset_t s1;
9231944d
FB
154 int i;
155
156 for(i = 0;i < TARGET_NSIG_WORDS; i++)
cbb21eed 157 s1.sig[i] = tswapal(s->sig[i]);
9231944d
FB
158 target_to_host_sigset_internal(d, &s1);
159}
3b46e624 160
992f48a0 161void host_to_target_old_sigset(abi_ulong *old_sigset,
66fb9763
FB
162 const sigset_t *sigset)
163{
c227f099 164 target_sigset_t d;
9e5f5284
FB
165 host_to_target_sigset(&d, sigset);
166 *old_sigset = d.sig[0];
66fb9763
FB
167}
168
5fafdf24 169void target_to_host_old_sigset(sigset_t *sigset,
992f48a0 170 const abi_ulong *old_sigset)
66fb9763 171{
c227f099 172 target_sigset_t d;
9e5f5284
FB
173 int i;
174
175 d.sig[0] = *old_sigset;
176 for(i = 1;i < TARGET_NSIG_WORDS; i++)
177 d.sig[i] = 0;
178 target_to_host_sigset(sigset, &d);
66fb9763
FB
179}
180
3d3efba0
PM
181int block_signals(void)
182{
e4e5cb4a 183 TaskState *ts = get_task_state(thread_cpu);
3d3efba0 184 sigset_t set;
3d3efba0
PM
185
186 /* It's OK to block everything including SIGSEGV, because we won't
187 * run any further guest code before unblocking signals in
188 * process_pending_signals().
189 */
190 sigfillset(&set);
191 sigprocmask(SIG_SETMASK, &set, 0);
192
d73415a3 193 return qatomic_xchg(&ts->signal_pending, 1);
3d3efba0
PM
194}
195
1c275925
AB
196/* Wrapper for sigprocmask function
197 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
af254a27 198 * are host signal set, not guest ones. Returns -QEMU_ERESTARTSYS if
3d3efba0
PM
199 * a signal was already pending and the syscall must be restarted, or
200 * 0 on success.
201 * If set is NULL, this is guaranteed not to fail.
1c275925
AB
202 */
203int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
204{
e4e5cb4a 205 TaskState *ts = get_task_state(thread_cpu);
3d3efba0
PM
206
207 if (oldset) {
208 *oldset = ts->signal_mask;
209 }
a7ec0f98
PM
210
211 if (set) {
3d3efba0 212 int i;
a7ec0f98 213
3d3efba0 214 if (block_signals()) {
af254a27 215 return -QEMU_ERESTARTSYS;
3d3efba0 216 }
a7ec0f98
PM
217
218 switch (how) {
219 case SIG_BLOCK:
3d3efba0 220 sigorset(&ts->signal_mask, &ts->signal_mask, set);
a7ec0f98
PM
221 break;
222 case SIG_UNBLOCK:
3d3efba0
PM
223 for (i = 1; i <= NSIG; ++i) {
224 if (sigismember(set, i)) {
225 sigdelset(&ts->signal_mask, i);
226 }
a7ec0f98
PM
227 }
228 break;
229 case SIG_SETMASK:
3d3efba0 230 ts->signal_mask = *set;
a7ec0f98
PM
231 break;
232 default:
233 g_assert_not_reached();
234 }
a7ec0f98 235
3d3efba0
PM
236 /* Silently ignore attempts to change blocking status of KILL or STOP */
237 sigdelset(&ts->signal_mask, SIGKILL);
238 sigdelset(&ts->signal_mask, SIGSTOP);
a7ec0f98 239 }
3d3efba0 240 return 0;
1c275925
AB
241}
242
3d3efba0
PM
243/* Just set the guest's signal mask to the specified value; the
244 * caller is assumed to have called block_signals() already.
245 */
befb7447 246void set_sigmask(const sigset_t *set)
9eede5b6 247{
e4e5cb4a 248 TaskState *ts = get_task_state(thread_cpu);
3d3efba0
PM
249
250 ts->signal_mask = *set;
9eede5b6 251}
9eede5b6 252
465e237b
LV
253/* sigaltstack management */
254
255int on_sig_stack(unsigned long sp)
256{
e4e5cb4a 257 TaskState *ts = get_task_state(thread_cpu);
5bfce0b7
PM
258
259 return (sp - ts->sigaltstack_used.ss_sp
260 < ts->sigaltstack_used.ss_size);
465e237b
LV
261}
262
263int sas_ss_flags(unsigned long sp)
264{
e4e5cb4a 265 TaskState *ts = get_task_state(thread_cpu);
5bfce0b7
PM
266
267 return (ts->sigaltstack_used.ss_size == 0 ? SS_DISABLE
465e237b
LV
268 : on_sig_stack(sp) ? SS_ONSTACK : 0);
269}
270
271abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
272{
273 /*
274 * This is the X/Open sanctioned signal stack switching.
275 */
e4e5cb4a 276 TaskState *ts = get_task_state(thread_cpu);
5bfce0b7 277
465e237b 278 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
5bfce0b7 279 return ts->sigaltstack_used.ss_sp + ts->sigaltstack_used.ss_size;
465e237b
LV
280 }
281 return sp;
282}
283
284void target_save_altstack(target_stack_t *uss, CPUArchState *env)
285{
e4e5cb4a 286 TaskState *ts = get_task_state(thread_cpu);
5bfce0b7
PM
287
288 __put_user(ts->sigaltstack_used.ss_sp, &uss->ss_sp);
465e237b 289 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
5bfce0b7 290 __put_user(ts->sigaltstack_used.ss_size, &uss->ss_size);
465e237b
LV
291}
292
ddc3e74d 293abi_long target_restore_altstack(target_stack_t *uss, CPUArchState *env)
92bad948 294{
e4e5cb4a 295 TaskState *ts = get_task_state(thread_cpu);
92bad948
RH
296 size_t minstacksize = TARGET_MINSIGSTKSZ;
297 target_stack_t ss;
298
299#if defined(TARGET_PPC64)
300 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
301 struct image_info *image = ts->info;
302 if (get_ppc64_abi(image) > 1) {
303 minstacksize = 4096;
304 }
305#endif
306
307 __get_user(ss.ss_sp, &uss->ss_sp);
308 __get_user(ss.ss_size, &uss->ss_size);
309 __get_user(ss.ss_flags, &uss->ss_flags);
310
ddc3e74d 311 if (on_sig_stack(get_sp_from_cpustate(env))) {
92bad948
RH
312 return -TARGET_EPERM;
313 }
314
315 switch (ss.ss_flags) {
316 default:
317 return -TARGET_EINVAL;
318
319 case TARGET_SS_DISABLE:
320 ss.ss_size = 0;
321 ss.ss_sp = 0;
322 break;
323
324 case TARGET_SS_ONSTACK:
325 case 0:
326 if (ss.ss_size < minstacksize) {
327 return -TARGET_ENOMEM;
328 }
329 break;
330 }
331
332 ts->sigaltstack_used.ss_sp = ss.ss_sp;
333 ts->sigaltstack_used.ss_size = ss.ss_size;
334 return 0;
335}
336
9de5e440
FB
337/* siginfo conversion */
338
c227f099 339static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
9de5e440 340 const siginfo_t *info)
66fb9763 341{
a05c6409 342 int sig = host_to_target_signal(info->si_signo);
a70dadc7
PM
343 int si_code = info->si_code;
344 int si_type;
9de5e440
FB
345 tinfo->si_signo = sig;
346 tinfo->si_errno = 0;
afd7cd92 347 tinfo->si_code = info->si_code;
a05c6409 348
55d72a7e
PM
349 /* This memset serves two purposes:
350 * (1) ensure we don't leak random junk to the guest later
351 * (2) placate false positives from gcc about fields
352 * being used uninitialized if it chooses to inline both this
353 * function and tswap_siginfo() into host_to_target_siginfo().
354 */
355 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
356
a70dadc7
PM
357 /* This is awkward, because we have to use a combination of
358 * the si_code and si_signo to figure out which of the union's
359 * members are valid. (Within the host kernel it is always possible
360 * to tell, but the kernel carefully avoids giving userspace the
361 * high 16 bits of si_code, so we don't have the information to
362 * do this the easy way...) We therefore make our best guess,
363 * bearing in mind that a guest can spoof most of the si_codes
364 * via rt_sigqueueinfo() if it likes.
365 *
366 * Once we have made our guess, we record it in the top 16 bits of
367 * the si_code, so that tswap_siginfo() later can use it.
368 * tswap_siginfo() will strip these top bits out before writing
369 * si_code to the guest (sign-extending the lower bits).
370 */
371
372 switch (si_code) {
373 case SI_USER:
374 case SI_TKILL:
375 case SI_KERNEL:
376 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
377 * These are the only unspoofable si_code values.
378 */
379 tinfo->_sifields._kill._pid = info->si_pid;
380 tinfo->_sifields._kill._uid = info->si_uid;
381 si_type = QEMU_SI_KILL;
382 break;
383 default:
384 /* Everything else is spoofable. Make best guess based on signal */
385 switch (sig) {
386 case TARGET_SIGCHLD:
387 tinfo->_sifields._sigchld._pid = info->si_pid;
388 tinfo->_sifields._sigchld._uid = info->si_uid;
139e5de7
MS
389 if (si_code == CLD_EXITED)
390 tinfo->_sifields._sigchld._status = info->si_status;
391 else
392 tinfo->_sifields._sigchld._status
393 = host_to_target_signal(info->si_status & 0x7f)
394 | (info->si_status & ~0x7f);
a70dadc7
PM
395 tinfo->_sifields._sigchld._utime = info->si_utime;
396 tinfo->_sifields._sigchld._stime = info->si_stime;
397 si_type = QEMU_SI_CHLD;
398 break;
399 case TARGET_SIGIO:
400 tinfo->_sifields._sigpoll._band = info->si_band;
401 tinfo->_sifields._sigpoll._fd = info->si_fd;
402 si_type = QEMU_SI_POLL;
403 break;
404 default:
405 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
406 tinfo->_sifields._rt._pid = info->si_pid;
407 tinfo->_sifields._rt._uid = info->si_uid;
408 /* XXX: potential problem if 64 bit */
409 tinfo->_sifields._rt._sigval.sival_ptr
da7c8647 410 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
a70dadc7
PM
411 si_type = QEMU_SI_RT;
412 break;
413 }
414 break;
9de5e440 415 }
a70dadc7
PM
416
417 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
9de5e440
FB
418}
419
4d6d8a05
GR
420static void tswap_siginfo(target_siginfo_t *tinfo,
421 const target_siginfo_t *info)
9de5e440 422{
a70dadc7
PM
423 int si_type = extract32(info->si_code, 16, 16);
424 int si_code = sextract32(info->si_code, 0, 16);
425
426 __put_user(info->si_signo, &tinfo->si_signo);
427 __put_user(info->si_errno, &tinfo->si_errno);
428 __put_user(si_code, &tinfo->si_code);
429
430 /* We can use our internal marker of which fields in the structure
431 * are valid, rather than duplicating the guesswork of
432 * host_to_target_siginfo_noswap() here.
433 */
434 switch (si_type) {
435 case QEMU_SI_KILL:
436 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
437 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
438 break;
439 case QEMU_SI_TIMER:
440 __put_user(info->_sifields._timer._timer1,
441 &tinfo->_sifields._timer._timer1);
442 __put_user(info->_sifields._timer._timer2,
443 &tinfo->_sifields._timer._timer2);
444 break;
445 case QEMU_SI_POLL:
446 __put_user(info->_sifields._sigpoll._band,
447 &tinfo->_sifields._sigpoll._band);
448 __put_user(info->_sifields._sigpoll._fd,
449 &tinfo->_sifields._sigpoll._fd);
450 break;
451 case QEMU_SI_FAULT:
452 __put_user(info->_sifields._sigfault._addr,
453 &tinfo->_sifields._sigfault._addr);
454 break;
455 case QEMU_SI_CHLD:
456 __put_user(info->_sifields._sigchld._pid,
457 &tinfo->_sifields._sigchld._pid);
458 __put_user(info->_sifields._sigchld._uid,
459 &tinfo->_sifields._sigchld._uid);
460 __put_user(info->_sifields._sigchld._status,
461 &tinfo->_sifields._sigchld._status);
462 __put_user(info->_sifields._sigchld._utime,
463 &tinfo->_sifields._sigchld._utime);
464 __put_user(info->_sifields._sigchld._stime,
465 &tinfo->_sifields._sigchld._stime);
466 break;
467 case QEMU_SI_RT:
468 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
469 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
470 __put_user(info->_sifields._rt._sigval.sival_ptr,
471 &tinfo->_sifields._rt._sigval.sival_ptr);
472 break;
473 default:
474 g_assert_not_reached();
9de5e440
FB
475 }
476}
477
c227f099 478void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
9de5e440 479{
55d72a7e
PM
480 target_siginfo_t tgt_tmp;
481 host_to_target_siginfo_noswap(&tgt_tmp, info);
482 tswap_siginfo(tinfo, &tgt_tmp);
66fb9763
FB
483}
484
9de5e440 485/* XXX: we support only POSIX RT signals are used. */
aa1f17c1 486/* XXX: find a solution for 64 bit (additional malloced data is needed) */
c227f099 487void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
66fb9763 488{
90c0f080
PM
489 /* This conversion is used only for the rt_sigqueueinfo syscall,
490 * and so we know that the _rt fields are the valid ones.
491 */
492 abi_ulong sival_ptr;
493
494 __get_user(info->si_signo, &tinfo->si_signo);
495 __get_user(info->si_errno, &tinfo->si_errno);
496 __get_user(info->si_code, &tinfo->si_code);
497 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
498 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
499 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
500 info->si_value.sival_ptr = (void *)(long)sival_ptr;
66fb9763
FB
501}
502
edf8e2af
MW
503/* returns 1 if given signal should dump core if not handled */
504static int core_dump_signal(int sig)
505{
506 switch (sig) {
507 case TARGET_SIGABRT:
508 case TARGET_SIGFPE:
509 case TARGET_SIGILL:
510 case TARGET_SIGQUIT:
511 case TARGET_SIGSEGV:
512 case TARGET_SIGTRAP:
513 case TARGET_SIGBUS:
514 return (1);
515 default:
516 return (0);
517 }
518}
519
2b3ccf5f
IL
520int host_interrupt_signal;
521
c107521e 522static void signal_table_init(const char *rtsig_map)
365510fb 523{
b60b91aa 524 int hsig, tsig, count;
365510fb 525
c107521e
IL
526 if (rtsig_map) {
527 /*
528 * Map host RT signals to target RT signals according to the
529 * user-provided specification.
530 */
531 const char *s = rtsig_map;
532
533 while (true) {
534 int i;
535
536 if (qemu_strtoi(s, &s, 10, &tsig) || *s++ != ' ') {
537 fprintf(stderr, "Malformed target signal in QEMU_RTSIG_MAP\n");
538 exit(EXIT_FAILURE);
539 }
540 if (qemu_strtoi(s, &s, 10, &hsig) || *s++ != ' ') {
541 fprintf(stderr, "Malformed host signal in QEMU_RTSIG_MAP\n");
542 exit(EXIT_FAILURE);
543 }
544 if (qemu_strtoi(s, &s, 10, &count) || (*s && *s != ',')) {
545 fprintf(stderr, "Malformed signal count in QEMU_RTSIG_MAP\n");
546 exit(EXIT_FAILURE);
547 }
548
549 for (i = 0; i < count; i++, tsig++, hsig++) {
550 if (tsig < TARGET_SIGRTMIN || tsig > TARGET_NSIG) {
551 fprintf(stderr, "%d is not a target rt signal\n", tsig);
552 exit(EXIT_FAILURE);
553 }
554 if (hsig < SIGRTMIN || hsig > SIGRTMAX) {
555 fprintf(stderr, "%d is not a host rt signal\n", hsig);
556 exit(EXIT_FAILURE);
557 }
558 if (host_to_target_signal_table[hsig]) {
559 fprintf(stderr, "%d already maps %d\n",
560 hsig, host_to_target_signal_table[hsig]);
561 exit(EXIT_FAILURE);
562 }
563 host_to_target_signal_table[hsig] = tsig;
564 }
565
566 if (*s) {
567 s++;
568 } else {
569 break;
570 }
571 }
572 } else {
573 /*
574 * Default host-to-target RT signal mapping.
575 *
576 * Signals are supported starting from TARGET_SIGRTMIN and going up
577 * until we run out of host realtime signals. Glibc uses the lower 2
578 * RT signals and (hopefully) nobody uses the upper ones.
579 * This is why SIGRTMIN (34) is generally greater than __SIGRTMIN (32).
580 * To fix this properly we would need to do manual signal delivery
581 * multiplexed over a single host signal.
582 * Attempts for configure "missing" signals via sigaction will be
583 * silently ignored.
584 *
2b3ccf5f 585 * Reserve two signals for internal usage (see below).
c107521e
IL
586 */
587
2b3ccf5f 588 hsig = SIGRTMIN + 2;
c107521e
IL
589 for (tsig = TARGET_SIGRTMIN;
590 hsig <= SIGRTMAX && tsig <= TARGET_NSIG;
591 hsig++, tsig++) {
592 host_to_target_signal_table[hsig] = tsig;
593 }
594 }
595
365510fb 596 /*
38ee0a7d
RH
597 * Remap the target SIGABRT, so that we can distinguish host abort
598 * from guest abort. When the guest registers a signal handler or
599 * calls raise(SIGABRT), the host will raise SIG_RTn. If the guest
600 * arrives at dump_core_and_abort(), we will map back to host SIGABRT
601 * so that the parent (native or emulated) sees the correct signal.
602 * Finally, also map host to guest SIGABRT so that the emulated
603 * parent sees the correct mapping from wait status.
365510fb 604 */
38ee0a7d 605
38ee0a7d 606 host_to_target_signal_table[SIGABRT] = 0;
c107521e
IL
607 for (hsig = SIGRTMIN; hsig <= SIGRTMAX; hsig++) {
608 if (!host_to_target_signal_table[hsig]) {
2b3ccf5f
IL
609 if (host_interrupt_signal) {
610 host_to_target_signal_table[hsig] = TARGET_SIGABRT;
611 break;
612 } else {
613 host_interrupt_signal = hsig;
614 }
c107521e
IL
615 }
616 }
617 if (hsig > SIGRTMAX) {
2b3ccf5f
IL
618 fprintf(stderr,
619 "No rt signals left for interrupt and SIGABRT mapping\n");
c107521e 620 exit(EXIT_FAILURE);
6bc024e7 621 }
365510fb 622
b60b91aa
RH
623 /* Invert the mapping that has already been assigned. */
624 for (hsig = 1; hsig < _NSIG; hsig++) {
625 tsig = host_to_target_signal_table[hsig];
626 if (tsig) {
c107521e
IL
627 if (target_to_host_signal_table[tsig]) {
628 fprintf(stderr, "%d is already mapped to %d\n",
629 tsig, target_to_host_signal_table[tsig]);
630 exit(EXIT_FAILURE);
631 }
b60b91aa 632 target_to_host_signal_table[tsig] = hsig;
9fcff3a6 633 }
365510fb 634 }
6bc024e7 635
38ee0a7d
RH
636 host_to_target_signal_table[SIGABRT] = TARGET_SIGABRT;
637
b60b91aa
RH
638 /* Map everything else out-of-bounds. */
639 for (hsig = 1; hsig < _NSIG; hsig++) {
640 if (host_to_target_signal_table[hsig] == 0) {
641 host_to_target_signal_table[hsig] = TARGET_NSIG + 1;
6bc024e7 642 }
6bc024e7 643 }
b60b91aa
RH
644 for (count = 0, tsig = 1; tsig <= TARGET_NSIG; tsig++) {
645 if (target_to_host_signal_table[tsig] == 0) {
646 target_to_host_signal_table[tsig] = _NSIG;
647 count++;
648 }
649 }
650
651 trace_signal_table_init(count);
365510fb
LV
652}
653
c107521e 654void signal_init(const char *rtsig_map)
31e31b8a 655{
e4e5cb4a 656 TaskState *ts = get_task_state(thread_cpu);
58c4e36c 657 struct sigaction act, oact;
31e31b8a 658
365510fb 659 /* initialize signal conversion tables */
c107521e 660 signal_table_init(rtsig_map);
3b46e624 661
3d3efba0
PM
662 /* Set the signal mask from the host mask. */
663 sigprocmask(0, 0, &ts->signal_mask);
664
9de5e440 665 sigfillset(&act.sa_mask);
31e31b8a
FB
666 act.sa_flags = SA_SIGINFO;
667 act.sa_sigaction = host_signal_handler;
58c4e36c
RH
668
669 /*
670 * A parent process may configure ignored signals, but all other
671 * signals are default. For any target signals that have no host
672 * mapping, set to ignore. For all core_dump_signal, install our
673 * host signal handler so that we may invoke dump_core_and_abort.
674 * This includes SIGSEGV and SIGBUS, which are also need our signal
675 * handler for paging and exceptions.
676 */
677 for (int tsig = 1; tsig <= TARGET_NSIG; tsig++) {
678 int hsig = target_to_host_signal(tsig);
679 abi_ptr thand = TARGET_SIG_IGN;
680
38ee0a7d
RH
681 if (hsig >= _NSIG) {
682 continue;
683 }
58c4e36c 684
38ee0a7d
RH
685 /* As we force remap SIGABRT, cannot probe and install in one step. */
686 if (tsig == TARGET_SIGABRT) {
687 sigaction(SIGABRT, NULL, &oact);
688 sigaction(hsig, &act, NULL);
689 } else {
690 struct sigaction *iact = core_dump_signal(tsig) ? &act : NULL;
58c4e36c 691 sigaction(hsig, iact, &oact);
38ee0a7d
RH
692 }
693
694 if (oact.sa_sigaction != (void *)SIG_IGN) {
695 thand = TARGET_SIG_DFL;
dbde2c0c 696 }
58c4e36c 697 sigact_table[tsig - 1]._sa_handler = thand;
31e31b8a 698 }
2b3ccf5f
IL
699
700 sigaction(host_interrupt_signal, &act, NULL);
66fb9763
FB
701}
702
c599d4d6
PM
703/* Force a synchronously taken signal. The kernel force_sig() function
704 * also forces the signal to "not blocked, not ignored", but for QEMU
705 * that work is done in process_pending_signals().
706 */
befb7447 707void force_sig(int sig)
c599d4d6
PM
708{
709 CPUState *cpu = thread_cpu;
819121b9 710 target_siginfo_t info = {};
c599d4d6
PM
711
712 info.si_signo = sig;
713 info.si_errno = 0;
714 info.si_code = TARGET_SI_KERNEL;
715 info._sifields._kill._pid = 0;
716 info._sifields._kill._uid = 0;
42e62aad 717 queue_signal(cpu_env(cpu), info.si_signo, QEMU_SI_KILL, &info);
c599d4d6 718}
09391669 719
af796960
PM
720/*
721 * Force a synchronously taken QEMU_SI_FAULT signal. For QEMU the
722 * 'force' part is handled in process_pending_signals().
723 */
724void force_sig_fault(int sig, int code, abi_ulong addr)
725{
726 CPUState *cpu = thread_cpu;
af796960
PM
727 target_siginfo_t info = {};
728
729 info.si_signo = sig;
730 info.si_errno = 0;
731 info.si_code = code;
732 info._sifields._sigfault._addr = addr;
42e62aad 733 queue_signal(cpu_env(cpu), sig, QEMU_SI_FAULT, &info);
af796960
PM
734}
735
09391669
PM
736/* Force a SIGSEGV if we couldn't write to memory trying to set
737 * up the signal frame. oldsig is the signal we were trying to handle
738 * at the point of failure.
739 */
47ae93cd 740#if !defined(TARGET_RISCV)
befb7447 741void force_sigsegv(int oldsig)
09391669 742{
09391669
PM
743 if (oldsig == SIGSEGV) {
744 /* Make sure we don't try to deliver the signal again; this will
c599d4d6 745 * end up with handle_pending_signal() calling dump_core_and_abort().
09391669
PM
746 */
747 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
748 }
c4b35744 749 force_sig(TARGET_SIGSEGV);
09391669 750}
47ae93cd
MC
751#endif
752
d551b822 753void cpu_loop_exit_sigsegv(CPUState *cpu, vaddr addr,
72d2bbf9
RH
754 MMUAccessType access_type, bool maperr, uintptr_t ra)
755{
18b3abb7 756 const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
72d2bbf9
RH
757
758 if (tcg_ops->record_sigsegv) {
759 tcg_ops->record_sigsegv(cpu, addr, access_type, maperr, ra);
72d2bbf9
RH
760 }
761
762 force_sig_fault(TARGET_SIGSEGV,
763 maperr ? TARGET_SEGV_MAPERR : TARGET_SEGV_ACCERR,
764 addr);
765 cpu->exception_index = EXCP_INTERRUPT;
766 cpu_loop_exit_restore(cpu, ra);
767}
768
d551b822 769void cpu_loop_exit_sigbus(CPUState *cpu, vaddr addr,
12ed5640
RH
770 MMUAccessType access_type, uintptr_t ra)
771{
18b3abb7 772 const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
12ed5640
RH
773
774 if (tcg_ops->record_sigbus) {
775 tcg_ops->record_sigbus(cpu, addr, access_type, ra);
776 }
777
778 force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr);
779 cpu->exception_index = EXCP_INTERRUPT;
780 cpu_loop_exit_restore(cpu, ra);
781}
782
9de5e440 783/* abort execution with signal */
b8b50f1e
RH
784static G_NORETURN
785void die_with_signal(int host_sig)
786{
787 struct sigaction act = {
788 .sa_handler = SIG_DFL,
789 };
790
791 /*
792 * The proper exit code for dying from an uncaught signal is -<signal>.
793 * The kernel doesn't allow exit() or _exit() to pass a negative value.
794 * To get the proper exit code we need to actually die from an uncaught
795 * signal. Here the default signal handler is installed, we send
796 * the signal and we wait for it to arrive.
797 */
798 sigfillset(&act.sa_mask);
799 sigaction(host_sig, &act, NULL);
800
801 kill(getpid(), host_sig);
802
803 /* Make sure the signal isn't masked (reusing the mask inside of act). */
804 sigdelset(&act.sa_mask, host_sig);
805 sigsuspend(&act.sa_mask);
806
807 /* unreachable */
ee72c47e 808 _exit(EXIT_FAILURE);
b8b50f1e
RH
809}
810
8905770b 811static G_NORETURN
b77af26e 812void dump_core_and_abort(CPUArchState *env, int target_sig)
66fb9763 813{
b77af26e 814 CPUState *cpu = env_cpu(env);
e4e5cb4a 815 TaskState *ts = get_task_state(cpu);
edf8e2af 816 int host_sig, core_dumped = 0;
c8ee0a44 817
38ee0a7d
RH
818 /* On exit, undo the remapping of SIGABRT. */
819 if (target_sig == TARGET_SIGABRT) {
820 host_sig = SIGABRT;
821 } else {
822 host_sig = target_to_host_signal(target_sig);
823 }
b5f95366 824 trace_user_dump_core_and_abort(env, target_sig, host_sig);
a2247f8e 825 gdb_signalled(env, target_sig);
603e4fd7 826
edf8e2af 827 /* dump core if supported by target binary format */
66393fb9 828 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
edf8e2af
MW
829 stop_all_tasks();
830 core_dumped =
a2247f8e 831 ((*ts->bprm->core_dump)(target_sig, env) == 0);
edf8e2af
MW
832 }
833 if (core_dumped) {
834 /* we already dumped the core of target process, we don't want
835 * a coredump of qemu itself */
836 struct rlimit nodump;
837 getrlimit(RLIMIT_CORE, &nodump);
838 nodump.rlim_cur=0;
839 setrlimit(RLIMIT_CORE, &nodump);
840 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
66393fb9 841 target_sig, strsignal(host_sig), "core dumped" );
edf8e2af
MW
842 }
843
b77af26e 844 preexit_cleanup(env, 128 + target_sig);
b8b50f1e 845 die_with_signal(host_sig);
66fb9763
FB
846}
847
9de5e440
FB
848/* queue a signal so that it will be send to the virtual CPU as soon
849 as possible */
337e88d8
PM
850void queue_signal(CPUArchState *env, int sig, int si_type,
851 target_siginfo_t *info)
31e31b8a 852{
29a0af61 853 CPUState *cpu = env_cpu(env);
e4e5cb4a 854 TaskState *ts = get_task_state(cpu);
66fb9763 855
c8ee0a44 856 trace_user_queue_signal(env, sig);
907f5fdd 857
9d2803f7 858 info->si_code = deposit32(info->si_code, 16, 16, si_type);
a70dadc7 859
655ed67c
TB
860 ts->sync_signal.info = *info;
861 ts->sync_signal.pending = sig;
907f5fdd 862 /* signal that a new signal is pending */
d73415a3 863 qatomic_set(&ts->signal_pending, 1);
9de5e440
FB
864}
865
07637888
WL
866
867/* Adjust the signal context to rewind out of safe-syscall if we're in it */
4d330cee
TB
868static inline void rewind_if_in_safe_syscall(void *puc)
869{
9940799b 870 host_sigcontext *uc = (host_sigcontext *)puc;
07637888
WL
871 uintptr_t pcreg = host_signal_pc(uc);
872
873 if (pcreg > (uintptr_t)safe_syscall_start
874 && pcreg < (uintptr_t)safe_syscall_end) {
875 host_signal_set_pc(uc, (uintptr_t)safe_syscall_start);
876 }
07637888 877}
4d330cee 878
7dfd3ca8
HD
879static G_NORETURN
880void die_from_signal(siginfo_t *info)
881{
882 char sigbuf[4], codebuf[12];
883 const char *sig, *code = NULL;
884
885 switch (info->si_signo) {
886 case SIGSEGV:
887 sig = "SEGV";
888 switch (info->si_code) {
889 case SEGV_MAPERR:
890 code = "MAPERR";
891 break;
892 case SEGV_ACCERR:
893 code = "ACCERR";
894 break;
895 }
896 break;
897 case SIGBUS:
898 sig = "BUS";
899 switch (info->si_code) {
900 case BUS_ADRALN:
901 code = "ADRALN";
902 break;
903 case BUS_ADRERR:
904 code = "ADRERR";
905 break;
906 }
907 break;
4a6ebc19
RH
908 case SIGILL:
909 sig = "ILL";
910 switch (info->si_code) {
911 case ILL_ILLOPC:
912 code = "ILLOPC";
913 break;
914 case ILL_ILLOPN:
915 code = "ILLOPN";
916 break;
917 case ILL_ILLADR:
918 code = "ILLADR";
919 break;
920 case ILL_PRVOPC:
921 code = "PRVOPC";
922 break;
923 case ILL_PRVREG:
924 code = "PRVREG";
925 break;
926 case ILL_COPROC:
927 code = "COPROC";
928 break;
929 }
930 break;
931 case SIGFPE:
932 sig = "FPE";
933 switch (info->si_code) {
934 case FPE_INTDIV:
935 code = "INTDIV";
936 break;
937 case FPE_INTOVF:
938 code = "INTOVF";
939 break;
940 }
941 break;
942 case SIGTRAP:
943 sig = "TRAP";
944 break;
7dfd3ca8
HD
945 default:
946 snprintf(sigbuf, sizeof(sigbuf), "%d", info->si_signo);
947 sig = sigbuf;
948 break;
949 }
950 if (code == NULL) {
951 snprintf(codebuf, sizeof(sigbuf), "%d", info->si_code);
952 code = codebuf;
953 }
954
955 error_report("QEMU internal SIG%s {code=%s, addr=%p}",
956 sig, code, info->si_addr);
957 die_with_signal(info->si_signo);
958}
959
f4e11681
RH
960static void host_sigsegv_handler(CPUState *cpu, siginfo_t *info,
961 host_sigcontext *uc)
962{
963 uintptr_t host_addr = (uintptr_t)info->si_addr;
964 /*
965 * Convert forcefully to guest address space: addresses outside
966 * reserved_va are still valid to report via SEGV_MAPERR.
967 */
968 bool is_valid = h2g_valid(host_addr);
969 abi_ptr guest_addr = h2g_nocheck(host_addr);
970 uintptr_t pc = host_signal_pc(uc);
971 bool is_write = host_signal_write(info, uc);
972 MMUAccessType access_type = adjust_signal_pc(&pc, is_write);
973 bool maperr;
974
975 /* If this was a write to a TB protected page, restart. */
976 if (is_write
977 && is_valid
978 && info->si_code == SEGV_ACCERR
979 && handle_sigsegv_accerr_write(cpu, host_signal_mask(uc),
980 pc, guest_addr)) {
981 return;
982 }
983
984 /*
985 * If the access was not on behalf of the guest, within the executable
986 * mapping of the generated code buffer, then it is a host bug.
987 */
988 if (access_type != MMU_INST_FETCH
989 && !in_code_gen_buffer((void *)(pc - tcg_splitwx_diff))) {
990 die_from_signal(info);
991 }
992
993 maperr = true;
994 if (is_valid && info->si_code == SEGV_ACCERR) {
995 /*
996 * With reserved_va, the whole address space is PROT_NONE,
997 * which means that we may get ACCERR when we want MAPERR.
998 */
999 if (page_get_flags(guest_addr) & PAGE_VALID) {
1000 maperr = false;
1001 } else {
1002 info->si_code = SEGV_MAPERR;
1003 }
1004 }
1005
1006 sigprocmask(SIG_SETMASK, host_signal_mask(uc), NULL);
1007 cpu_loop_exit_sigsegv(cpu, guest_addr, access_type, maperr, pc);
1008}
1009
6d913158 1010static uintptr_t host_sigbus_handler(CPUState *cpu, siginfo_t *info,
f4e11681
RH
1011 host_sigcontext *uc)
1012{
1013 uintptr_t pc = host_signal_pc(uc);
1014 bool is_write = host_signal_write(info, uc);
1015 MMUAccessType access_type = adjust_signal_pc(&pc, is_write);
1016
1017 /*
1018 * If the access was not on behalf of the guest, within the executable
1019 * mapping of the generated code buffer, then it is a host bug.
1020 */
1021 if (!in_code_gen_buffer((void *)(pc - tcg_splitwx_diff))) {
1022 die_from_signal(info);
1023 }
1024
1025 if (info->si_code == BUS_ADRALN) {
1026 uintptr_t host_addr = (uintptr_t)info->si_addr;
1027 abi_ptr guest_addr = h2g_nocheck(host_addr);
1028
1029 sigprocmask(SIG_SETMASK, host_signal_mask(uc), NULL);
1030 cpu_loop_exit_sigbus(cpu, guest_addr, access_type, pc);
1031 }
6d913158 1032 return pc;
f4e11681
RH
1033}
1034
e6037d04 1035static void host_signal_handler(int host_sig, siginfo_t *info, void *puc)
9de5e440 1036{
b77af26e
RH
1037 CPUState *cpu = thread_cpu;
1038 CPUArchState *env = cpu_env(cpu);
e4e5cb4a 1039 TaskState *ts = get_task_state(cpu);
c227f099 1040 target_siginfo_t tinfo;
9940799b 1041 host_sigcontext *uc = puc;
655ed67c 1042 struct emulated_sigtable *k;
e6037d04 1043 int guest_sig;
e6037d04
RH
1044 uintptr_t pc = 0;
1045 bool sync_sig = false;
f4e11681 1046 void *sigmask;
e6037d04 1047
2b3ccf5f
IL
1048 if (host_sig == host_interrupt_signal) {
1049 ts->signal_pending = 1;
1050 cpu_exit(thread_cpu);
1051 return;
1052 }
1053
e6037d04
RH
1054 /*
1055 * Non-spoofed SIGSEGV and SIGBUS are synchronous, and need special
4a6ebc19
RH
1056 * handling wrt signal blocking and unwinding. Non-spoofed SIGILL,
1057 * SIGFPE, SIGTRAP are always host bugs.
e6037d04 1058 */
f4e11681
RH
1059 if (info->si_code > 0) {
1060 switch (host_sig) {
1061 case SIGSEGV:
1062 /* Only returns on handle_sigsegv_accerr_write success. */
1063 host_sigsegv_handler(cpu, info, uc);
7dfd3ca8 1064 return;
f4e11681 1065 case SIGBUS:
6d913158 1066 pc = host_sigbus_handler(cpu, info, uc);
f4e11681
RH
1067 sync_sig = true;
1068 break;
4a6ebc19
RH
1069 case SIGILL:
1070 case SIGFPE:
1071 case SIGTRAP:
1072 die_from_signal(info);
7dfd3ca8 1073 }
9de5e440
FB
1074 }
1075
1076 /* get target signal number */
e6037d04
RH
1077 guest_sig = host_to_target_signal(host_sig);
1078 if (guest_sig < 1 || guest_sig > TARGET_NSIG) {
9de5e440 1079 return;
e6037d04
RH
1080 }
1081 trace_user_host_signal(env, host_sig, guest_sig);
4d330cee 1082
9de5e440 1083 host_to_target_siginfo_noswap(&tinfo, info);
e6037d04 1084 k = &ts->sigtab[guest_sig - 1];
655ed67c 1085 k->info = tinfo;
e6037d04 1086 k->pending = guest_sig;
655ed67c
TB
1087 ts->signal_pending = 1;
1088
e6037d04
RH
1089 /*
1090 * For synchronous signals, unwind the cpu state to the faulting
1091 * insn and then exit back to the main loop so that the signal
1092 * is delivered immediately.
1093 */
1094 if (sync_sig) {
1095 cpu->exception_index = EXCP_INTERRUPT;
1096 cpu_loop_exit_restore(cpu, pc);
1097 }
e6037d04
RH
1098
1099 rewind_if_in_safe_syscall(puc);
1100
1101 /*
1102 * Block host signals until target signal handler entered. We
655ed67c
TB
1103 * can't block SIGSEGV or SIGBUS while we're executing guest
1104 * code in case the guest code provokes one in the window between
1105 * now and it getting out to the main loop. Signals will be
1106 * unblocked again in process_pending_signals().
1d48fdd9 1107 *
c8c89a6a 1108 * WARNING: we cannot use sigfillset() here because the sigmask
1d48fdd9
PM
1109 * field is a kernel sigset_t, which is much smaller than the
1110 * libc sigset_t which sigfillset() operates on. Using sigfillset()
1111 * would write 0xff bytes off the end of the structure and trash
1112 * data on the struct.
655ed67c 1113 */
f4e11681 1114 sigmask = host_signal_mask(uc);
c8c89a6a
RH
1115 memset(sigmask, 0xff, SIGSET_T_SIZE);
1116 sigdelset(sigmask, SIGSEGV);
1117 sigdelset(sigmask, SIGBUS);
3d3efba0 1118
655ed67c
TB
1119 /* interrupt the virtual CPU as soon as possible */
1120 cpu_exit(thread_cpu);
66fb9763
FB
1121}
1122
0da46a6e 1123/* do_sigaltstack() returns target values and errnos. */
579a97f7 1124/* compare linux/kernel/signal.c:do_sigaltstack() */
6b208755
RH
1125abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr,
1126 CPUArchState *env)
a04e134a 1127{
92bad948
RH
1128 target_stack_t oss, *uoss = NULL;
1129 abi_long ret = -TARGET_EFAULT;
a04e134a 1130
92bad948 1131 if (uoss_addr) {
92bad948
RH
1132 /* Verify writability now, but do not alter user memory yet. */
1133 if (!lock_user_struct(VERIFY_WRITE, uoss, uoss_addr, 0)) {
1134 goto out;
1135 }
6b208755 1136 target_save_altstack(&oss, env);
a04e134a
TS
1137 }
1138
92bad948
RH
1139 if (uss_addr) {
1140 target_stack_t *uss;
a04e134a 1141
9eeb8306 1142 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
a04e134a 1143 goto out;
9eeb8306 1144 }
ddc3e74d 1145 ret = target_restore_altstack(uss, env);
92bad948 1146 if (ret) {
a04e134a 1147 goto out;
7d37435b 1148 }
a04e134a
TS
1149 }
1150
579a97f7 1151 if (uoss_addr) {
92bad948
RH
1152 memcpy(uoss, &oss, sizeof(oss));
1153 unlock_user_struct(uoss, uoss_addr, 1);
1154 uoss = NULL;
a04e134a 1155 }
a04e134a 1156 ret = 0;
92bad948
RH
1157
1158 out:
1159 if (uoss) {
1160 unlock_user_struct(uoss, uoss_addr, 0);
1161 }
a04e134a
TS
1162 return ret;
1163}
1164
ef6a778e 1165/* do_sigaction() return target values and host errnos */
66fb9763 1166int do_sigaction(int sig, const struct target_sigaction *act,
02fb28e8 1167 struct target_sigaction *oact, abi_ulong ka_restorer)
66fb9763 1168{
624f7979 1169 struct target_sigaction *k;
773b93ee 1170 int host_sig;
0da46a6e 1171 int ret = 0;
66fb9763 1172
6bc024e7
LV
1173 trace_signal_do_sigaction_guest(sig, TARGET_NSIG);
1174
ee3500d3
IL
1175 if (sig < 1 || sig > TARGET_NSIG) {
1176 return -TARGET_EINVAL;
1177 }
1178
1179 if (act && (sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)) {
ef6a778e
TB
1180 return -TARGET_EINVAL;
1181 }
1182
1183 if (block_signals()) {
af254a27 1184 return -QEMU_ERESTARTSYS;
ef6a778e
TB
1185 }
1186
66fb9763 1187 k = &sigact_table[sig - 1];
66fb9763 1188 if (oact) {
d2565875
RH
1189 __put_user(k->_sa_handler, &oact->_sa_handler);
1190 __put_user(k->sa_flags, &oact->sa_flags);
7f047de1 1191#ifdef TARGET_ARCH_HAS_SA_RESTORER
d2565875 1192 __put_user(k->sa_restorer, &oact->sa_restorer);
388bb21a 1193#endif
d2565875 1194 /* Not swapped. */
624f7979 1195 oact->sa_mask = k->sa_mask;
66fb9763
FB
1196 }
1197 if (act) {
d2565875
RH
1198 __get_user(k->_sa_handler, &act->_sa_handler);
1199 __get_user(k->sa_flags, &act->sa_flags);
7f047de1 1200#ifdef TARGET_ARCH_HAS_SA_RESTORER
d2565875 1201 __get_user(k->sa_restorer, &act->sa_restorer);
02fb28e8
RH
1202#endif
1203#ifdef TARGET_ARCH_HAS_KA_RESTORER
1204 k->ka_restorer = ka_restorer;
388bb21a 1205#endif
d2565875 1206 /* To be swapped in target_to_host_sigset. */
624f7979 1207 k->sa_mask = act->sa_mask;
773b93ee
FB
1208
1209 /* we update the host linux signal state */
1210 host_sig = target_to_host_signal(sig);
6bc024e7
LV
1211 trace_signal_do_sigaction_host(host_sig, TARGET_NSIG);
1212 if (host_sig > SIGRTMAX) {
1213 /* we don't have enough host signals to map all target signals */
1214 qemu_log_mask(LOG_UNIMP, "Unsupported target signal #%d, ignored\n",
1215 sig);
1216 /*
1217 * we don't return an error here because some programs try to
1218 * register an handler for all possible rt signals even if they
1219 * don't need it.
1220 * An error here can abort them whereas there can be no problem
1221 * to not have the signal available later.
1222 * This is the case for golang,
1223 * See https://github.com/golang/go/issues/33746
1224 * So we silently ignore the error.
1225 */
1226 return 0;
1227 }
773b93ee 1228 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
dbde2c0c
RH
1229 struct sigaction act1;
1230
773b93ee
FB
1231 sigfillset(&act1.sa_mask);
1232 act1.sa_flags = SA_SIGINFO;
624f7979 1233 if (k->_sa_handler == TARGET_SIG_IGN) {
dbde2c0c
RH
1234 /*
1235 * It is important to update the host kernel signal ignore
1236 * state to avoid getting unexpected interrupted syscalls.
1237 */
773b93ee 1238 act1.sa_sigaction = (void *)SIG_IGN;
624f7979 1239 } else if (k->_sa_handler == TARGET_SIG_DFL) {
dbde2c0c 1240 if (core_dump_signal(sig)) {
ca587a8e 1241 act1.sa_sigaction = host_signal_handler;
dbde2c0c 1242 } else {
ca587a8e 1243 act1.sa_sigaction = (void *)SIG_DFL;
dbde2c0c 1244 }
773b93ee
FB
1245 } else {
1246 act1.sa_sigaction = host_signal_handler;
dbde2c0c
RH
1247 if (k->sa_flags & TARGET_SA_RESTART) {
1248 act1.sa_flags |= SA_RESTART;
1249 }
773b93ee 1250 }
0da46a6e 1251 ret = sigaction(host_sig, &act1, NULL);
773b93ee 1252 }
66fb9763 1253 }
0da46a6e 1254 return ret;
66fb9763
FB
1255}
1256
31efaef1
PM
1257static void handle_pending_signal(CPUArchState *cpu_env, int sig,
1258 struct emulated_sigtable *k)
eb552501 1259{
29a0af61 1260 CPUState *cpu = env_cpu(cpu_env);
eb552501 1261 abi_ulong handler;
3d3efba0 1262 sigset_t set;
143bcc1d 1263 target_siginfo_t unswapped;
eb552501
PM
1264 target_sigset_t target_old_set;
1265 struct target_sigaction *sa;
e4e5cb4a 1266 TaskState *ts = get_task_state(cpu);
66fb9763 1267
c8ee0a44 1268 trace_user_handle_signal(cpu_env, sig);
66fb9763 1269 /* dequeue signal */
907f5fdd 1270 k->pending = 0;
3b46e624 1271
4d6d8a05 1272 /*
143bcc1d
RH
1273 * Writes out siginfo values byteswapped, accordingly to the target.
1274 * It also cleans the si_type from si_code making it correct for
1275 * the target. We must hold on to the original unswapped copy for
1276 * strace below, because si_type is still required there.
4d6d8a05 1277 */
143bcc1d
RH
1278 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
1279 unswapped = k->info;
1280 }
4d6d8a05
GR
1281 tswap_siginfo(&k->info, &k->info);
1282
f84e313e 1283 sig = gdb_handlesig(cpu, sig, NULL, &k->info, sizeof(k->info));
1fddef4b 1284 if (!sig) {
ca587a8e
AJ
1285 sa = NULL;
1286 handler = TARGET_SIG_IGN;
1287 } else {
1288 sa = &sigact_table[sig - 1];
1289 handler = sa->_sa_handler;
1fddef4b 1290 }
66fb9763 1291
4b25a506 1292 if (unlikely(qemu_loglevel_mask(LOG_STRACE))) {
143bcc1d 1293 print_taken_signal(sig, &unswapped);
0cb581d6
PM
1294 }
1295
66fb9763 1296 if (handler == TARGET_SIG_DFL) {
ca587a8e
AJ
1297 /* default handler : ignore some signal. The other are job control or fatal */
1298 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
1299 kill(getpid(),SIGSTOP);
1300 } else if (sig != TARGET_SIGCHLD &&
1301 sig != TARGET_SIGURG &&
1302 sig != TARGET_SIGWINCH &&
1303 sig != TARGET_SIGCONT) {
da91c192 1304 dump_core_and_abort(cpu_env, sig);
66fb9763
FB
1305 }
1306 } else if (handler == TARGET_SIG_IGN) {
1307 /* ignore sig */
1308 } else if (handler == TARGET_SIG_ERR) {
da91c192 1309 dump_core_and_abort(cpu_env, sig);
66fb9763 1310 } else {
9de5e440 1311 /* compute the blocked signals during the handler execution */
3d3efba0
PM
1312 sigset_t *blocked_set;
1313
624f7979 1314 target_to_host_sigset(&set, &sa->sa_mask);
9de5e440
FB
1315 /* SA_NODEFER indicates that the current signal should not be
1316 blocked during the handler */
624f7979 1317 if (!(sa->sa_flags & TARGET_SA_NODEFER))
9de5e440 1318 sigaddset(&set, target_to_host_signal(sig));
3b46e624 1319
9de5e440
FB
1320 /* save the previous blocked signal state to restore it at the
1321 end of the signal execution (see do_sigreturn) */
3d3efba0
PM
1322 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
1323
1324 /* block signals in the handler */
1325 blocked_set = ts->in_sigsuspend ?
1326 &ts->sigsuspend_mask : &ts->signal_mask;
1327 sigorset(&ts->signal_mask, blocked_set, &set);
1328 ts->in_sigsuspend = 0;
9de5e440 1329
bc8a22cc 1330 /* if the CPU is in VM86 mode, we restore the 32 bit values */
84409ddb 1331#if defined(TARGET_I386) && !defined(TARGET_X86_64)
bc8a22cc
FB
1332 {
1333 CPUX86State *env = cpu_env;
1334 if (env->eflags & VM_MASK)
1335 save_v86_state(env);
1336 }
1337#endif
9de5e440 1338 /* prepare the stack frame of the virtual CPU */
cb6ac802
LV
1339#if defined(TARGET_ARCH_HAS_SETUP_FRAME)
1340 if (sa->sa_flags & TARGET_SA_SIGINFO) {
907f5fdd 1341 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
cb6ac802 1342 } else {
624f7979 1343 setup_frame(sig, sa, &target_old_set, cpu_env);
cb6ac802
LV
1344 }
1345#else
1346 /* These targets do not have traditional signals. */
1347 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
ff970904 1348#endif
7ec87e06 1349 if (sa->sa_flags & TARGET_SA_RESETHAND) {
624f7979 1350 sa->_sa_handler = TARGET_SIG_DFL;
7ec87e06 1351 }
31e31b8a 1352 }
66fb9763 1353}
e902d588
PM
1354
1355void process_pending_signals(CPUArchState *cpu_env)
1356{
29a0af61 1357 CPUState *cpu = env_cpu(cpu_env);
e902d588 1358 int sig;
e4e5cb4a 1359 TaskState *ts = get_task_state(cpu);
3d3efba0
PM
1360 sigset_t set;
1361 sigset_t *blocked_set;
e902d588 1362
d73415a3 1363 while (qatomic_read(&ts->signal_pending)) {
3d3efba0
PM
1364 sigfillset(&set);
1365 sigprocmask(SIG_SETMASK, &set, 0);
1366
8bd3773c 1367 restart_scan:
655ed67c
TB
1368 sig = ts->sync_signal.pending;
1369 if (sig) {
1370 /* Synchronous signals are forced,
1371 * see force_sig_info() and callers in Linux
1372 * Note that not all of our queue_signal() calls in QEMU correspond
1373 * to force_sig_info() calls in Linux (some are send_sig_info()).
1374 * However it seems like a kernel bug to me to allow the process
1375 * to block a synchronous signal since it could then just end up
1376 * looping round and round indefinitely.
1377 */
1378 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
1379 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
1380 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
1381 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
1382 }
1383
31efaef1 1384 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
655ed67c
TB
1385 }
1386
3d3efba0
PM
1387 for (sig = 1; sig <= TARGET_NSIG; sig++) {
1388 blocked_set = ts->in_sigsuspend ?
1389 &ts->sigsuspend_mask : &ts->signal_mask;
1390
1391 if (ts->sigtab[sig - 1].pending &&
1392 (!sigismember(blocked_set,
655ed67c 1393 target_to_host_signal_table[sig]))) {
31efaef1 1394 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
8bd3773c
PM
1395 /* Restart scan from the beginning, as handle_pending_signal
1396 * might have resulted in a new synchronous signal (eg SIGSEGV).
1397 */
1398 goto restart_scan;
3d3efba0 1399 }
e902d588 1400 }
3d3efba0
PM
1401
1402 /* if no signal is pending, unblock signals and recheck (the act
1403 * of unblocking might cause us to take another host signal which
1404 * will set signal_pending again).
1405 */
d73415a3 1406 qatomic_set(&ts->signal_pending, 0);
3d3efba0
PM
1407 ts->in_sigsuspend = 0;
1408 set = ts->signal_mask;
1409 sigdelset(&set, SIGSEGV);
1410 sigdelset(&set, SIGBUS);
1411 sigprocmask(SIG_SETMASK, &set, 0);
1412 }
1413 ts->in_sigsuspend = 0;
e902d588 1414}
0a99f093
RH
1415
1416int process_sigsuspend_mask(sigset_t **pset, target_ulong sigset,
1417 target_ulong sigsize)
1418{
e4e5cb4a 1419 TaskState *ts = get_task_state(thread_cpu);
0a99f093
RH
1420 sigset_t *host_set = &ts->sigsuspend_mask;
1421 target_sigset_t *target_sigset;
1422
1423 if (sigsize != sizeof(*target_sigset)) {
1424 /* Like the kernel, we enforce correct size sigsets */
1425 return -TARGET_EINVAL;
1426 }
1427
1428 target_sigset = lock_user(VERIFY_READ, sigset, sigsize, 1);
1429 if (!target_sigset) {
1430 return -TARGET_EFAULT;
1431 }
1432 target_to_host_sigset(host_set, target_sigset);
1433 unlock_user(target_sigset, sigset, 0);
1434
1435 *pset = host_set;
1436 return 0;
1437}