]> git.ipfire.org Git - thirdparty/qemu.git/blame - linux-user/signal.c
configure: fix pam test warning
[thirdparty/qemu.git] / linux-user / signal.c
CommitLineData
31e31b8a 1/*
66fb9763 2 * Emulation of Linux signals
5fafdf24 3 *
31e31b8a
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
8167ee88 17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
31e31b8a 18 */
d39594e9 19#include "qemu/osdep.h"
a70dadc7 20#include "qemu/bitops.h"
31e31b8a 21#include <sys/ucontext.h>
edf8e2af 22#include <sys/resource.h>
31e31b8a 23
3ef693a0 24#include "qemu.h"
7d99a001 25#include "qemu-common.h"
c8ee0a44 26#include "trace.h"
befb7447 27#include "signal-common.h"
66fb9763 28
befb7447 29struct target_sigaltstack target_sigaltstack_used = {
a04e134a
TS
30 .ss_sp = 0,
31 .ss_size = 0,
32 .ss_flags = TARGET_SS_DISABLE,
33};
34
624f7979 35static struct target_sigaction sigact_table[TARGET_NSIG];
31e31b8a 36
5fafdf24 37static void host_signal_handler(int host_signum, siginfo_t *info,
66fb9763
FB
38 void *puc);
39
3ca05588 40static uint8_t host_to_target_signal_table[_NSIG] = {
9e5f5284
FB
41 [SIGHUP] = TARGET_SIGHUP,
42 [SIGINT] = TARGET_SIGINT,
43 [SIGQUIT] = TARGET_SIGQUIT,
44 [SIGILL] = TARGET_SIGILL,
45 [SIGTRAP] = TARGET_SIGTRAP,
46 [SIGABRT] = TARGET_SIGABRT,
01e3b763 47/* [SIGIOT] = TARGET_SIGIOT,*/
9e5f5284
FB
48 [SIGBUS] = TARGET_SIGBUS,
49 [SIGFPE] = TARGET_SIGFPE,
50 [SIGKILL] = TARGET_SIGKILL,
51 [SIGUSR1] = TARGET_SIGUSR1,
52 [SIGSEGV] = TARGET_SIGSEGV,
53 [SIGUSR2] = TARGET_SIGUSR2,
54 [SIGPIPE] = TARGET_SIGPIPE,
55 [SIGALRM] = TARGET_SIGALRM,
56 [SIGTERM] = TARGET_SIGTERM,
57#ifdef SIGSTKFLT
58 [SIGSTKFLT] = TARGET_SIGSTKFLT,
59#endif
60 [SIGCHLD] = TARGET_SIGCHLD,
61 [SIGCONT] = TARGET_SIGCONT,
62 [SIGSTOP] = TARGET_SIGSTOP,
63 [SIGTSTP] = TARGET_SIGTSTP,
64 [SIGTTIN] = TARGET_SIGTTIN,
65 [SIGTTOU] = TARGET_SIGTTOU,
66 [SIGURG] = TARGET_SIGURG,
67 [SIGXCPU] = TARGET_SIGXCPU,
68 [SIGXFSZ] = TARGET_SIGXFSZ,
69 [SIGVTALRM] = TARGET_SIGVTALRM,
70 [SIGPROF] = TARGET_SIGPROF,
71 [SIGWINCH] = TARGET_SIGWINCH,
72 [SIGIO] = TARGET_SIGIO,
73 [SIGPWR] = TARGET_SIGPWR,
74 [SIGSYS] = TARGET_SIGSYS,
75 /* next signals stay the same */
624f7979 76 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
b4916d7b 77 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
624f7979
PB
78 To fix this properly we need to do manual signal delivery multiplexed
79 over a single host signal. */
80 [__SIGRTMIN] = __SIGRTMAX,
81 [__SIGRTMAX] = __SIGRTMIN,
9e5f5284 82};
3ca05588 83static uint8_t target_to_host_signal_table[_NSIG];
9e5f5284 84
1d9d8b55 85int host_to_target_signal(int sig)
31e31b8a 86{
167c50d8 87 if (sig < 0 || sig >= _NSIG)
4cb05961 88 return sig;
9e5f5284 89 return host_to_target_signal_table[sig];
31e31b8a
FB
90}
91
4cb05961 92int target_to_host_signal(int sig)
31e31b8a 93{
167c50d8 94 if (sig < 0 || sig >= _NSIG)
4cb05961 95 return sig;
9e5f5284 96 return target_to_host_signal_table[sig];
31e31b8a
FB
97}
98
c227f099 99static inline void target_sigaddset(target_sigset_t *set, int signum)
f5545b5c
PB
100{
101 signum--;
102 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
103 set->sig[signum / TARGET_NSIG_BPW] |= mask;
104}
105
c227f099 106static inline int target_sigismember(const target_sigset_t *set, int signum)
f5545b5c
PB
107{
108 signum--;
109 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
110 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
111}
112
befb7447
LV
113void host_to_target_sigset_internal(target_sigset_t *d,
114 const sigset_t *s)
66fb9763
FB
115{
116 int i;
f5545b5c
PB
117 target_sigemptyset(d);
118 for (i = 1; i <= TARGET_NSIG; i++) {
119 if (sigismember(s, i)) {
120 target_sigaddset(d, host_to_target_signal(i));
121 }
66fb9763
FB
122 }
123}
124
c227f099 125void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
9231944d 126{
c227f099 127 target_sigset_t d1;
9231944d
FB
128 int i;
129
130 host_to_target_sigset_internal(&d1, s);
131 for(i = 0;i < TARGET_NSIG_WORDS; i++)
cbb21eed 132 d->sig[i] = tswapal(d1.sig[i]);
9231944d
FB
133}
134
befb7447
LV
135void target_to_host_sigset_internal(sigset_t *d,
136 const target_sigset_t *s)
66fb9763
FB
137{
138 int i;
f5545b5c
PB
139 sigemptyset(d);
140 for (i = 1; i <= TARGET_NSIG; i++) {
141 if (target_sigismember(s, i)) {
142 sigaddset(d, target_to_host_signal(i));
143 }
da7c8647 144 }
66fb9763
FB
145}
146
c227f099 147void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
9231944d 148{
c227f099 149 target_sigset_t s1;
9231944d
FB
150 int i;
151
152 for(i = 0;i < TARGET_NSIG_WORDS; i++)
cbb21eed 153 s1.sig[i] = tswapal(s->sig[i]);
9231944d
FB
154 target_to_host_sigset_internal(d, &s1);
155}
3b46e624 156
992f48a0 157void host_to_target_old_sigset(abi_ulong *old_sigset,
66fb9763
FB
158 const sigset_t *sigset)
159{
c227f099 160 target_sigset_t d;
9e5f5284
FB
161 host_to_target_sigset(&d, sigset);
162 *old_sigset = d.sig[0];
66fb9763
FB
163}
164
5fafdf24 165void target_to_host_old_sigset(sigset_t *sigset,
992f48a0 166 const abi_ulong *old_sigset)
66fb9763 167{
c227f099 168 target_sigset_t d;
9e5f5284
FB
169 int i;
170
171 d.sig[0] = *old_sigset;
172 for(i = 1;i < TARGET_NSIG_WORDS; i++)
173 d.sig[i] = 0;
174 target_to_host_sigset(sigset, &d);
66fb9763
FB
175}
176
3d3efba0
PM
177int block_signals(void)
178{
179 TaskState *ts = (TaskState *)thread_cpu->opaque;
180 sigset_t set;
3d3efba0
PM
181
182 /* It's OK to block everything including SIGSEGV, because we won't
183 * run any further guest code before unblocking signals in
184 * process_pending_signals().
185 */
186 sigfillset(&set);
187 sigprocmask(SIG_SETMASK, &set, 0);
188
9be38598 189 return atomic_xchg(&ts->signal_pending, 1);
3d3efba0
PM
190}
191
1c275925
AB
192/* Wrapper for sigprocmask function
193 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
3d3efba0
PM
194 * are host signal set, not guest ones. Returns -TARGET_ERESTARTSYS if
195 * a signal was already pending and the syscall must be restarted, or
196 * 0 on success.
197 * If set is NULL, this is guaranteed not to fail.
1c275925
AB
198 */
199int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
200{
3d3efba0
PM
201 TaskState *ts = (TaskState *)thread_cpu->opaque;
202
203 if (oldset) {
204 *oldset = ts->signal_mask;
205 }
a7ec0f98
PM
206
207 if (set) {
3d3efba0 208 int i;
a7ec0f98 209
3d3efba0
PM
210 if (block_signals()) {
211 return -TARGET_ERESTARTSYS;
212 }
a7ec0f98
PM
213
214 switch (how) {
215 case SIG_BLOCK:
3d3efba0 216 sigorset(&ts->signal_mask, &ts->signal_mask, set);
a7ec0f98
PM
217 break;
218 case SIG_UNBLOCK:
3d3efba0
PM
219 for (i = 1; i <= NSIG; ++i) {
220 if (sigismember(set, i)) {
221 sigdelset(&ts->signal_mask, i);
222 }
a7ec0f98
PM
223 }
224 break;
225 case SIG_SETMASK:
3d3efba0 226 ts->signal_mask = *set;
a7ec0f98
PM
227 break;
228 default:
229 g_assert_not_reached();
230 }
a7ec0f98 231
3d3efba0
PM
232 /* Silently ignore attempts to change blocking status of KILL or STOP */
233 sigdelset(&ts->signal_mask, SIGKILL);
234 sigdelset(&ts->signal_mask, SIGSTOP);
a7ec0f98 235 }
3d3efba0 236 return 0;
1c275925
AB
237}
238
e8f29049 239#if !defined(TARGET_NIOS2)
3d3efba0
PM
240/* Just set the guest's signal mask to the specified value; the
241 * caller is assumed to have called block_signals() already.
242 */
befb7447 243void set_sigmask(const sigset_t *set)
9eede5b6 244{
3d3efba0
PM
245 TaskState *ts = (TaskState *)thread_cpu->opaque;
246
247 ts->signal_mask = *set;
9eede5b6
PM
248}
249#endif
250
465e237b
LV
251/* sigaltstack management */
252
253int on_sig_stack(unsigned long sp)
254{
255 return (sp - target_sigaltstack_used.ss_sp
256 < target_sigaltstack_used.ss_size);
257}
258
259int sas_ss_flags(unsigned long sp)
260{
261 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
262 : on_sig_stack(sp) ? SS_ONSTACK : 0);
263}
264
265abi_ulong target_sigsp(abi_ulong sp, struct target_sigaction *ka)
266{
267 /*
268 * This is the X/Open sanctioned signal stack switching.
269 */
270 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
271 return target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
272 }
273 return sp;
274}
275
276void target_save_altstack(target_stack_t *uss, CPUArchState *env)
277{
278 __put_user(target_sigaltstack_used.ss_sp, &uss->ss_sp);
279 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &uss->ss_flags);
280 __put_user(target_sigaltstack_used.ss_size, &uss->ss_size);
281}
282
9de5e440
FB
283/* siginfo conversion */
284
c227f099 285static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
9de5e440 286 const siginfo_t *info)
66fb9763 287{
a05c6409 288 int sig = host_to_target_signal(info->si_signo);
a70dadc7
PM
289 int si_code = info->si_code;
290 int si_type;
9de5e440
FB
291 tinfo->si_signo = sig;
292 tinfo->si_errno = 0;
afd7cd92 293 tinfo->si_code = info->si_code;
a05c6409 294
55d72a7e
PM
295 /* This memset serves two purposes:
296 * (1) ensure we don't leak random junk to the guest later
297 * (2) placate false positives from gcc about fields
298 * being used uninitialized if it chooses to inline both this
299 * function and tswap_siginfo() into host_to_target_siginfo().
300 */
301 memset(tinfo->_sifields._pad, 0, sizeof(tinfo->_sifields._pad));
302
a70dadc7
PM
303 /* This is awkward, because we have to use a combination of
304 * the si_code and si_signo to figure out which of the union's
305 * members are valid. (Within the host kernel it is always possible
306 * to tell, but the kernel carefully avoids giving userspace the
307 * high 16 bits of si_code, so we don't have the information to
308 * do this the easy way...) We therefore make our best guess,
309 * bearing in mind that a guest can spoof most of the si_codes
310 * via rt_sigqueueinfo() if it likes.
311 *
312 * Once we have made our guess, we record it in the top 16 bits of
313 * the si_code, so that tswap_siginfo() later can use it.
314 * tswap_siginfo() will strip these top bits out before writing
315 * si_code to the guest (sign-extending the lower bits).
316 */
317
318 switch (si_code) {
319 case SI_USER:
320 case SI_TKILL:
321 case SI_KERNEL:
322 /* Sent via kill(), tkill() or tgkill(), or direct from the kernel.
323 * These are the only unspoofable si_code values.
324 */
325 tinfo->_sifields._kill._pid = info->si_pid;
326 tinfo->_sifields._kill._uid = info->si_uid;
327 si_type = QEMU_SI_KILL;
328 break;
329 default:
330 /* Everything else is spoofable. Make best guess based on signal */
331 switch (sig) {
332 case TARGET_SIGCHLD:
333 tinfo->_sifields._sigchld._pid = info->si_pid;
334 tinfo->_sifields._sigchld._uid = info->si_uid;
335 tinfo->_sifields._sigchld._status
da7c8647 336 = host_to_target_waitstatus(info->si_status);
a70dadc7
PM
337 tinfo->_sifields._sigchld._utime = info->si_utime;
338 tinfo->_sifields._sigchld._stime = info->si_stime;
339 si_type = QEMU_SI_CHLD;
340 break;
341 case TARGET_SIGIO:
342 tinfo->_sifields._sigpoll._band = info->si_band;
343 tinfo->_sifields._sigpoll._fd = info->si_fd;
344 si_type = QEMU_SI_POLL;
345 break;
346 default:
347 /* Assume a sigqueue()/mq_notify()/rt_sigqueueinfo() source. */
348 tinfo->_sifields._rt._pid = info->si_pid;
349 tinfo->_sifields._rt._uid = info->si_uid;
350 /* XXX: potential problem if 64 bit */
351 tinfo->_sifields._rt._sigval.sival_ptr
da7c8647 352 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
a70dadc7
PM
353 si_type = QEMU_SI_RT;
354 break;
355 }
356 break;
9de5e440 357 }
a70dadc7
PM
358
359 tinfo->si_code = deposit32(si_code, 16, 16, si_type);
9de5e440
FB
360}
361
befb7447
LV
362void tswap_siginfo(target_siginfo_t *tinfo,
363 const target_siginfo_t *info)
9de5e440 364{
a70dadc7
PM
365 int si_type = extract32(info->si_code, 16, 16);
366 int si_code = sextract32(info->si_code, 0, 16);
367
368 __put_user(info->si_signo, &tinfo->si_signo);
369 __put_user(info->si_errno, &tinfo->si_errno);
370 __put_user(si_code, &tinfo->si_code);
371
372 /* We can use our internal marker of which fields in the structure
373 * are valid, rather than duplicating the guesswork of
374 * host_to_target_siginfo_noswap() here.
375 */
376 switch (si_type) {
377 case QEMU_SI_KILL:
378 __put_user(info->_sifields._kill._pid, &tinfo->_sifields._kill._pid);
379 __put_user(info->_sifields._kill._uid, &tinfo->_sifields._kill._uid);
380 break;
381 case QEMU_SI_TIMER:
382 __put_user(info->_sifields._timer._timer1,
383 &tinfo->_sifields._timer._timer1);
384 __put_user(info->_sifields._timer._timer2,
385 &tinfo->_sifields._timer._timer2);
386 break;
387 case QEMU_SI_POLL:
388 __put_user(info->_sifields._sigpoll._band,
389 &tinfo->_sifields._sigpoll._band);
390 __put_user(info->_sifields._sigpoll._fd,
391 &tinfo->_sifields._sigpoll._fd);
392 break;
393 case QEMU_SI_FAULT:
394 __put_user(info->_sifields._sigfault._addr,
395 &tinfo->_sifields._sigfault._addr);
396 break;
397 case QEMU_SI_CHLD:
398 __put_user(info->_sifields._sigchld._pid,
399 &tinfo->_sifields._sigchld._pid);
400 __put_user(info->_sifields._sigchld._uid,
401 &tinfo->_sifields._sigchld._uid);
402 __put_user(info->_sifields._sigchld._status,
403 &tinfo->_sifields._sigchld._status);
404 __put_user(info->_sifields._sigchld._utime,
405 &tinfo->_sifields._sigchld._utime);
406 __put_user(info->_sifields._sigchld._stime,
407 &tinfo->_sifields._sigchld._stime);
408 break;
409 case QEMU_SI_RT:
410 __put_user(info->_sifields._rt._pid, &tinfo->_sifields._rt._pid);
411 __put_user(info->_sifields._rt._uid, &tinfo->_sifields._rt._uid);
412 __put_user(info->_sifields._rt._sigval.sival_ptr,
413 &tinfo->_sifields._rt._sigval.sival_ptr);
414 break;
415 default:
416 g_assert_not_reached();
9de5e440
FB
417 }
418}
419
c227f099 420void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
9de5e440 421{
55d72a7e
PM
422 target_siginfo_t tgt_tmp;
423 host_to_target_siginfo_noswap(&tgt_tmp, info);
424 tswap_siginfo(tinfo, &tgt_tmp);
66fb9763
FB
425}
426
9de5e440 427/* XXX: we support only POSIX RT signals are used. */
aa1f17c1 428/* XXX: find a solution for 64 bit (additional malloced data is needed) */
c227f099 429void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
66fb9763 430{
90c0f080
PM
431 /* This conversion is used only for the rt_sigqueueinfo syscall,
432 * and so we know that the _rt fields are the valid ones.
433 */
434 abi_ulong sival_ptr;
435
436 __get_user(info->si_signo, &tinfo->si_signo);
437 __get_user(info->si_errno, &tinfo->si_errno);
438 __get_user(info->si_code, &tinfo->si_code);
439 __get_user(info->si_pid, &tinfo->_sifields._rt._pid);
440 __get_user(info->si_uid, &tinfo->_sifields._rt._uid);
441 __get_user(sival_ptr, &tinfo->_sifields._rt._sigval.sival_ptr);
442 info->si_value.sival_ptr = (void *)(long)sival_ptr;
66fb9763
FB
443}
444
ca587a8e
AJ
445static int fatal_signal (int sig)
446{
447 switch (sig) {
448 case TARGET_SIGCHLD:
449 case TARGET_SIGURG:
450 case TARGET_SIGWINCH:
451 /* Ignored by default. */
452 return 0;
453 case TARGET_SIGCONT:
454 case TARGET_SIGSTOP:
455 case TARGET_SIGTSTP:
456 case TARGET_SIGTTIN:
457 case TARGET_SIGTTOU:
458 /* Job control signals. */
459 return 0;
460 default:
461 return 1;
462 }
463}
464
edf8e2af
MW
465/* returns 1 if given signal should dump core if not handled */
466static int core_dump_signal(int sig)
467{
468 switch (sig) {
469 case TARGET_SIGABRT:
470 case TARGET_SIGFPE:
471 case TARGET_SIGILL:
472 case TARGET_SIGQUIT:
473 case TARGET_SIGSEGV:
474 case TARGET_SIGTRAP:
475 case TARGET_SIGBUS:
476 return (1);
477 default:
478 return (0);
479 }
480}
481
31e31b8a
FB
482void signal_init(void)
483{
3d3efba0 484 TaskState *ts = (TaskState *)thread_cpu->opaque;
31e31b8a 485 struct sigaction act;
624f7979 486 struct sigaction oact;
9e5f5284 487 int i, j;
624f7979 488 int host_sig;
31e31b8a 489
9e5f5284 490 /* generate signal conversion tables */
3ca05588 491 for(i = 1; i < _NSIG; i++) {
9e5f5284
FB
492 if (host_to_target_signal_table[i] == 0)
493 host_to_target_signal_table[i] = i;
494 }
3ca05588 495 for(i = 1; i < _NSIG; i++) {
9e5f5284
FB
496 j = host_to_target_signal_table[i];
497 target_to_host_signal_table[j] = i;
498 }
3b46e624 499
3d3efba0
PM
500 /* Set the signal mask from the host mask. */
501 sigprocmask(0, 0, &ts->signal_mask);
502
9de5e440
FB
503 /* set all host signal handlers. ALL signals are blocked during
504 the handlers to serialize them. */
624f7979
PB
505 memset(sigact_table, 0, sizeof(sigact_table));
506
9de5e440 507 sigfillset(&act.sa_mask);
31e31b8a
FB
508 act.sa_flags = SA_SIGINFO;
509 act.sa_sigaction = host_signal_handler;
624f7979
PB
510 for(i = 1; i <= TARGET_NSIG; i++) {
511 host_sig = target_to_host_signal(i);
512 sigaction(host_sig, NULL, &oact);
513 if (oact.sa_sigaction == (void *)SIG_IGN) {
514 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
515 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
516 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
517 }
518 /* If there's already a handler installed then something has
519 gone horribly wrong, so don't even try to handle that case. */
ca587a8e
AJ
520 /* Install some handlers for our own use. We need at least
521 SIGSEGV and SIGBUS, to detect exceptions. We can not just
522 trap all signals because it affects syscall interrupt
523 behavior. But do trap all default-fatal signals. */
524 if (fatal_signal (i))
624f7979 525 sigaction(host_sig, &act, NULL);
31e31b8a 526 }
66fb9763
FB
527}
528
c599d4d6
PM
529/* Force a synchronously taken signal. The kernel force_sig() function
530 * also forces the signal to "not blocked, not ignored", but for QEMU
531 * that work is done in process_pending_signals().
532 */
befb7447 533void force_sig(int sig)
c599d4d6
PM
534{
535 CPUState *cpu = thread_cpu;
536 CPUArchState *env = cpu->env_ptr;
537 target_siginfo_t info;
538
539 info.si_signo = sig;
540 info.si_errno = 0;
541 info.si_code = TARGET_SI_KERNEL;
542 info._sifields._kill._pid = 0;
543 info._sifields._kill._uid = 0;
544 queue_signal(env, info.si_signo, QEMU_SI_KILL, &info);
545}
09391669
PM
546
547/* Force a SIGSEGV if we couldn't write to memory trying to set
548 * up the signal frame. oldsig is the signal we were trying to handle
549 * at the point of failure.
550 */
47ae93cd 551#if !defined(TARGET_RISCV)
befb7447 552void force_sigsegv(int oldsig)
09391669 553{
09391669
PM
554 if (oldsig == SIGSEGV) {
555 /* Make sure we don't try to deliver the signal again; this will
c599d4d6 556 * end up with handle_pending_signal() calling dump_core_and_abort().
09391669
PM
557 */
558 sigact_table[oldsig - 1]._sa_handler = TARGET_SIG_DFL;
559 }
c4b35744 560 force_sig(TARGET_SIGSEGV);
09391669 561}
66fb9763 562
47ae93cd
MC
563#endif
564
9de5e440 565/* abort execution with signal */
c599d4d6 566static void QEMU_NORETURN dump_core_and_abort(int target_sig)
66fb9763 567{
0429a971
AF
568 CPUState *cpu = thread_cpu;
569 CPUArchState *env = cpu->env_ptr;
570 TaskState *ts = (TaskState *)cpu->opaque;
edf8e2af 571 int host_sig, core_dumped = 0;
603e4fd7 572 struct sigaction act;
c8ee0a44 573
66393fb9 574 host_sig = target_to_host_signal(target_sig);
c8ee0a44 575 trace_user_force_sig(env, target_sig, host_sig);
a2247f8e 576 gdb_signalled(env, target_sig);
603e4fd7 577
edf8e2af 578 /* dump core if supported by target binary format */
66393fb9 579 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
edf8e2af
MW
580 stop_all_tasks();
581 core_dumped =
a2247f8e 582 ((*ts->bprm->core_dump)(target_sig, env) == 0);
edf8e2af
MW
583 }
584 if (core_dumped) {
585 /* we already dumped the core of target process, we don't want
586 * a coredump of qemu itself */
587 struct rlimit nodump;
588 getrlimit(RLIMIT_CORE, &nodump);
589 nodump.rlim_cur=0;
590 setrlimit(RLIMIT_CORE, &nodump);
591 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
66393fb9 592 target_sig, strsignal(host_sig), "core dumped" );
edf8e2af
MW
593 }
594
0c58751c 595 /* The proper exit code for dying from an uncaught signal is
603e4fd7
AJ
596 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
597 * a negative value. To get the proper exit code we need to
598 * actually die from an uncaught signal. Here the default signal
599 * handler is installed, we send ourself a signal and we wait for
600 * it to arrive. */
601 sigfillset(&act.sa_mask);
602 act.sa_handler = SIG_DFL;
3a5d30bf 603 act.sa_flags = 0;
603e4fd7
AJ
604 sigaction(host_sig, &act, NULL);
605
606 /* For some reason raise(host_sig) doesn't send the signal when
607 * statically linked on x86-64. */
608 kill(getpid(), host_sig);
609
610 /* Make sure the signal isn't masked (just reuse the mask inside
611 of act) */
612 sigdelset(&act.sa_mask, host_sig);
613 sigsuspend(&act.sa_mask);
614
615 /* unreachable */
a6c6f76c 616 abort();
66fb9763
FB
617}
618
9de5e440
FB
619/* queue a signal so that it will be send to the virtual CPU as soon
620 as possible */
9d2803f7
PM
621int queue_signal(CPUArchState *env, int sig, int si_type,
622 target_siginfo_t *info)
31e31b8a 623{
0429a971
AF
624 CPUState *cpu = ENV_GET_CPU(env);
625 TaskState *ts = cpu->opaque;
66fb9763 626
c8ee0a44 627 trace_user_queue_signal(env, sig);
907f5fdd 628
9d2803f7 629 info->si_code = deposit32(info->si_code, 16, 16, si_type);
a70dadc7 630
655ed67c
TB
631 ts->sync_signal.info = *info;
632 ts->sync_signal.pending = sig;
907f5fdd
TB
633 /* signal that a new signal is pending */
634 atomic_set(&ts->signal_pending, 1);
635 return 1; /* indicates that the signal was queued */
9de5e440
FB
636}
637
4d330cee
TB
638#ifndef HAVE_SAFE_SYSCALL
639static inline void rewind_if_in_safe_syscall(void *puc)
640{
641 /* Default version: never rewind */
642}
643#endif
644
5fafdf24 645static void host_signal_handler(int host_signum, siginfo_t *info,
9de5e440
FB
646 void *puc)
647{
a2247f8e 648 CPUArchState *env = thread_cpu->env_ptr;
655ed67c
TB
649 CPUState *cpu = ENV_GET_CPU(env);
650 TaskState *ts = cpu->opaque;
651
9de5e440 652 int sig;
c227f099 653 target_siginfo_t tinfo;
3d3efba0 654 ucontext_t *uc = puc;
655ed67c 655 struct emulated_sigtable *k;
9de5e440
FB
656
657 /* the CPU emulator uses some host signals to detect exceptions,
eaa449b9 658 we forward to it some signals */
ca587a8e 659 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
eaa449b9 660 && info->si_code > 0) {
b346ff46 661 if (cpu_signal_handler(host_signum, info, puc))
9de5e440
FB
662 return;
663 }
664
665 /* get target signal number */
666 sig = host_to_target_signal(host_signum);
667 if (sig < 1 || sig > TARGET_NSIG)
668 return;
c8ee0a44 669 trace_user_host_signal(env, host_signum, sig);
4d330cee
TB
670
671 rewind_if_in_safe_syscall(puc);
672
9de5e440 673 host_to_target_siginfo_noswap(&tinfo, info);
655ed67c
TB
674 k = &ts->sigtab[sig - 1];
675 k->info = tinfo;
676 k->pending = sig;
677 ts->signal_pending = 1;
678
679 /* Block host signals until target signal handler entered. We
680 * can't block SIGSEGV or SIGBUS while we're executing guest
681 * code in case the guest code provokes one in the window between
682 * now and it getting out to the main loop. Signals will be
683 * unblocked again in process_pending_signals().
1d48fdd9
PM
684 *
685 * WARNING: we cannot use sigfillset() here because the uc_sigmask
686 * field is a kernel sigset_t, which is much smaller than the
687 * libc sigset_t which sigfillset() operates on. Using sigfillset()
688 * would write 0xff bytes off the end of the structure and trash
689 * data on the struct.
690 * We can't use sizeof(uc->uc_sigmask) either, because the libc
691 * headers define the struct field with the wrong (too large) type.
655ed67c 692 */
1d48fdd9 693 memset(&uc->uc_sigmask, 0xff, SIGSET_T_SIZE);
655ed67c
TB
694 sigdelset(&uc->uc_sigmask, SIGSEGV);
695 sigdelset(&uc->uc_sigmask, SIGBUS);
3d3efba0 696
655ed67c
TB
697 /* interrupt the virtual CPU as soon as possible */
698 cpu_exit(thread_cpu);
66fb9763
FB
699}
700
0da46a6e 701/* do_sigaltstack() returns target values and errnos. */
579a97f7
FB
702/* compare linux/kernel/signal.c:do_sigaltstack() */
703abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
a04e134a
TS
704{
705 int ret;
706 struct target_sigaltstack oss;
707
708 /* XXX: test errors */
579a97f7 709 if(uoss_addr)
a04e134a
TS
710 {
711 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
712 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
713 __put_user(sas_ss_flags(sp), &oss.ss_flags);
714 }
715
579a97f7 716 if(uss_addr)
a04e134a 717 {
579a97f7
FB
718 struct target_sigaltstack *uss;
719 struct target_sigaltstack ss;
0903c8be
TM
720 size_t minstacksize = TARGET_MINSIGSTKSZ;
721
722#if defined(TARGET_PPC64)
723 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
724 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
725 if (get_ppc64_abi(image) > 1) {
726 minstacksize = 4096;
727 }
728#endif
a04e134a 729
7d37435b 730 ret = -TARGET_EFAULT;
9eeb8306 731 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
a04e134a 732 goto out;
9eeb8306
RV
733 }
734 __get_user(ss.ss_sp, &uss->ss_sp);
735 __get_user(ss.ss_size, &uss->ss_size);
736 __get_user(ss.ss_flags, &uss->ss_flags);
579a97f7 737 unlock_user_struct(uss, uss_addr, 0);
a04e134a 738
7d37435b
PB
739 ret = -TARGET_EPERM;
740 if (on_sig_stack(sp))
a04e134a
TS
741 goto out;
742
7d37435b
PB
743 ret = -TARGET_EINVAL;
744 if (ss.ss_flags != TARGET_SS_DISABLE
a04e134a
TS
745 && ss.ss_flags != TARGET_SS_ONSTACK
746 && ss.ss_flags != 0)
747 goto out;
748
7d37435b 749 if (ss.ss_flags == TARGET_SS_DISABLE) {
a04e134a
TS
750 ss.ss_size = 0;
751 ss.ss_sp = 0;
7d37435b 752 } else {
0da46a6e 753 ret = -TARGET_ENOMEM;
0903c8be 754 if (ss.ss_size < minstacksize) {
a04e134a 755 goto out;
0903c8be 756 }
7d37435b 757 }
a04e134a
TS
758
759 target_sigaltstack_used.ss_sp = ss.ss_sp;
760 target_sigaltstack_used.ss_size = ss.ss_size;
761 }
762
579a97f7 763 if (uoss_addr) {
0da46a6e 764 ret = -TARGET_EFAULT;
579a97f7 765 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
a04e134a 766 goto out;
a04e134a
TS
767 }
768
769 ret = 0;
770out:
771 return ret;
772}
773
ef6a778e 774/* do_sigaction() return target values and host errnos */
66fb9763
FB
775int do_sigaction(int sig, const struct target_sigaction *act,
776 struct target_sigaction *oact)
777{
624f7979 778 struct target_sigaction *k;
773b93ee
FB
779 struct sigaction act1;
780 int host_sig;
0da46a6e 781 int ret = 0;
66fb9763 782
ef6a778e
TB
783 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP) {
784 return -TARGET_EINVAL;
785 }
786
787 if (block_signals()) {
788 return -TARGET_ERESTARTSYS;
789 }
790
66fb9763 791 k = &sigact_table[sig - 1];
66fb9763 792 if (oact) {
d2565875
RH
793 __put_user(k->_sa_handler, &oact->_sa_handler);
794 __put_user(k->sa_flags, &oact->sa_flags);
7f047de1 795#ifdef TARGET_ARCH_HAS_SA_RESTORER
d2565875 796 __put_user(k->sa_restorer, &oact->sa_restorer);
388bb21a 797#endif
d2565875 798 /* Not swapped. */
624f7979 799 oact->sa_mask = k->sa_mask;
66fb9763
FB
800 }
801 if (act) {
624f7979 802 /* FIXME: This is not threadsafe. */
d2565875
RH
803 __get_user(k->_sa_handler, &act->_sa_handler);
804 __get_user(k->sa_flags, &act->sa_flags);
7f047de1 805#ifdef TARGET_ARCH_HAS_SA_RESTORER
d2565875 806 __get_user(k->sa_restorer, &act->sa_restorer);
388bb21a 807#endif
d2565875 808 /* To be swapped in target_to_host_sigset. */
624f7979 809 k->sa_mask = act->sa_mask;
773b93ee
FB
810
811 /* we update the host linux signal state */
812 host_sig = target_to_host_signal(sig);
813 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
814 sigfillset(&act1.sa_mask);
815 act1.sa_flags = SA_SIGINFO;
624f7979 816 if (k->sa_flags & TARGET_SA_RESTART)
773b93ee
FB
817 act1.sa_flags |= SA_RESTART;
818 /* NOTE: it is important to update the host kernel signal
819 ignore state to avoid getting unexpected interrupted
820 syscalls */
624f7979 821 if (k->_sa_handler == TARGET_SIG_IGN) {
773b93ee 822 act1.sa_sigaction = (void *)SIG_IGN;
624f7979 823 } else if (k->_sa_handler == TARGET_SIG_DFL) {
ca587a8e
AJ
824 if (fatal_signal (sig))
825 act1.sa_sigaction = host_signal_handler;
826 else
827 act1.sa_sigaction = (void *)SIG_DFL;
773b93ee
FB
828 } else {
829 act1.sa_sigaction = host_signal_handler;
830 }
0da46a6e 831 ret = sigaction(host_sig, &act1, NULL);
773b93ee 832 }
66fb9763 833 }
0da46a6e 834 return ret;
66fb9763
FB
835}
836
31efaef1
PM
837static void handle_pending_signal(CPUArchState *cpu_env, int sig,
838 struct emulated_sigtable *k)
eb552501
PM
839{
840 CPUState *cpu = ENV_GET_CPU(cpu_env);
841 abi_ulong handler;
3d3efba0 842 sigset_t set;
eb552501
PM
843 target_sigset_t target_old_set;
844 struct target_sigaction *sa;
eb552501 845 TaskState *ts = cpu->opaque;
66fb9763 846
c8ee0a44 847 trace_user_handle_signal(cpu_env, sig);
66fb9763 848 /* dequeue signal */
907f5fdd 849 k->pending = 0;
3b46e624 850
db6b81d4 851 sig = gdb_handlesig(cpu, sig);
1fddef4b 852 if (!sig) {
ca587a8e
AJ
853 sa = NULL;
854 handler = TARGET_SIG_IGN;
855 } else {
856 sa = &sigact_table[sig - 1];
857 handler = sa->_sa_handler;
1fddef4b 858 }
66fb9763 859
0cb581d6
PM
860 if (do_strace) {
861 print_taken_signal(sig, &k->info);
862 }
863
66fb9763 864 if (handler == TARGET_SIG_DFL) {
ca587a8e
AJ
865 /* default handler : ignore some signal. The other are job control or fatal */
866 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
867 kill(getpid(),SIGSTOP);
868 } else if (sig != TARGET_SIGCHLD &&
869 sig != TARGET_SIGURG &&
870 sig != TARGET_SIGWINCH &&
871 sig != TARGET_SIGCONT) {
c599d4d6 872 dump_core_and_abort(sig);
66fb9763
FB
873 }
874 } else if (handler == TARGET_SIG_IGN) {
875 /* ignore sig */
876 } else if (handler == TARGET_SIG_ERR) {
c599d4d6 877 dump_core_and_abort(sig);
66fb9763 878 } else {
9de5e440 879 /* compute the blocked signals during the handler execution */
3d3efba0
PM
880 sigset_t *blocked_set;
881
624f7979 882 target_to_host_sigset(&set, &sa->sa_mask);
9de5e440
FB
883 /* SA_NODEFER indicates that the current signal should not be
884 blocked during the handler */
624f7979 885 if (!(sa->sa_flags & TARGET_SA_NODEFER))
9de5e440 886 sigaddset(&set, target_to_host_signal(sig));
3b46e624 887
9de5e440
FB
888 /* save the previous blocked signal state to restore it at the
889 end of the signal execution (see do_sigreturn) */
3d3efba0
PM
890 host_to_target_sigset_internal(&target_old_set, &ts->signal_mask);
891
892 /* block signals in the handler */
893 blocked_set = ts->in_sigsuspend ?
894 &ts->sigsuspend_mask : &ts->signal_mask;
895 sigorset(&ts->signal_mask, blocked_set, &set);
896 ts->in_sigsuspend = 0;
9de5e440 897
bc8a22cc 898 /* if the CPU is in VM86 mode, we restore the 32 bit values */
84409ddb 899#if defined(TARGET_I386) && !defined(TARGET_X86_64)
bc8a22cc
FB
900 {
901 CPUX86State *env = cpu_env;
902 if (env->eflags & VM_MASK)
903 save_v86_state(env);
904 }
905#endif
9de5e440 906 /* prepare the stack frame of the virtual CPU */
cb6ac802
LV
907#if defined(TARGET_ARCH_HAS_SETUP_FRAME)
908 if (sa->sa_flags & TARGET_SA_SIGINFO) {
907f5fdd 909 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
cb6ac802 910 } else {
624f7979 911 setup_frame(sig, sa, &target_old_set, cpu_env);
cb6ac802
LV
912 }
913#else
914 /* These targets do not have traditional signals. */
915 setup_rt_frame(sig, sa, &k->info, &target_old_set, cpu_env);
ff970904 916#endif
7ec87e06 917 if (sa->sa_flags & TARGET_SA_RESETHAND) {
624f7979 918 sa->_sa_handler = TARGET_SIG_DFL;
7ec87e06 919 }
31e31b8a 920 }
66fb9763 921}
e902d588
PM
922
923void process_pending_signals(CPUArchState *cpu_env)
924{
925 CPUState *cpu = ENV_GET_CPU(cpu_env);
926 int sig;
927 TaskState *ts = cpu->opaque;
3d3efba0
PM
928 sigset_t set;
929 sigset_t *blocked_set;
e902d588 930
3d3efba0
PM
931 while (atomic_read(&ts->signal_pending)) {
932 /* FIXME: This is not threadsafe. */
933 sigfillset(&set);
934 sigprocmask(SIG_SETMASK, &set, 0);
935
8bd3773c 936 restart_scan:
655ed67c
TB
937 sig = ts->sync_signal.pending;
938 if (sig) {
939 /* Synchronous signals are forced,
940 * see force_sig_info() and callers in Linux
941 * Note that not all of our queue_signal() calls in QEMU correspond
942 * to force_sig_info() calls in Linux (some are send_sig_info()).
943 * However it seems like a kernel bug to me to allow the process
944 * to block a synchronous signal since it could then just end up
945 * looping round and round indefinitely.
946 */
947 if (sigismember(&ts->signal_mask, target_to_host_signal_table[sig])
948 || sigact_table[sig - 1]._sa_handler == TARGET_SIG_IGN) {
949 sigdelset(&ts->signal_mask, target_to_host_signal_table[sig]);
950 sigact_table[sig - 1]._sa_handler = TARGET_SIG_DFL;
951 }
952
31efaef1 953 handle_pending_signal(cpu_env, sig, &ts->sync_signal);
655ed67c
TB
954 }
955
3d3efba0
PM
956 for (sig = 1; sig <= TARGET_NSIG; sig++) {
957 blocked_set = ts->in_sigsuspend ?
958 &ts->sigsuspend_mask : &ts->signal_mask;
959
960 if (ts->sigtab[sig - 1].pending &&
961 (!sigismember(blocked_set,
655ed67c 962 target_to_host_signal_table[sig]))) {
31efaef1 963 handle_pending_signal(cpu_env, sig, &ts->sigtab[sig - 1]);
8bd3773c
PM
964 /* Restart scan from the beginning, as handle_pending_signal
965 * might have resulted in a new synchronous signal (eg SIGSEGV).
966 */
967 goto restart_scan;
3d3efba0 968 }
e902d588 969 }
3d3efba0
PM
970
971 /* if no signal is pending, unblock signals and recheck (the act
972 * of unblocking might cause us to take another host signal which
973 * will set signal_pending again).
974 */
975 atomic_set(&ts->signal_pending, 0);
976 ts->in_sigsuspend = 0;
977 set = ts->signal_mask;
978 sigdelset(&set, SIGSEGV);
979 sigdelset(&set, SIGBUS);
980 sigprocmask(SIG_SETMASK, &set, 0);
981 }
982 ts->in_sigsuspend = 0;
e902d588 983}