]> git.ipfire.org Git - thirdparty/qemu.git/blob - linux-user/signal.c
translate-all: Change tb_flush() env argument to cpu
[thirdparty/qemu.git] / linux-user / signal.c
1 /*
2 * Emulation of Linux signals
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <stdarg.h>
23 #include <unistd.h>
24 #include <errno.h>
25 #include <assert.h>
26 #include <sys/ucontext.h>
27 #include <sys/resource.h>
28
29 #include "qemu.h"
30 #include "qemu-common.h"
31 #include "target_signal.h"
32
33 //#define DEBUG_SIGNAL
34
35 static struct target_sigaltstack target_sigaltstack_used = {
36 .ss_sp = 0,
37 .ss_size = 0,
38 .ss_flags = TARGET_SS_DISABLE,
39 };
40
41 static struct target_sigaction sigact_table[TARGET_NSIG];
42
43 static void host_signal_handler(int host_signum, siginfo_t *info,
44 void *puc);
45
46 static uint8_t host_to_target_signal_table[_NSIG] = {
47 [SIGHUP] = TARGET_SIGHUP,
48 [SIGINT] = TARGET_SIGINT,
49 [SIGQUIT] = TARGET_SIGQUIT,
50 [SIGILL] = TARGET_SIGILL,
51 [SIGTRAP] = TARGET_SIGTRAP,
52 [SIGABRT] = TARGET_SIGABRT,
53 /* [SIGIOT] = TARGET_SIGIOT,*/
54 [SIGBUS] = TARGET_SIGBUS,
55 [SIGFPE] = TARGET_SIGFPE,
56 [SIGKILL] = TARGET_SIGKILL,
57 [SIGUSR1] = TARGET_SIGUSR1,
58 [SIGSEGV] = TARGET_SIGSEGV,
59 [SIGUSR2] = TARGET_SIGUSR2,
60 [SIGPIPE] = TARGET_SIGPIPE,
61 [SIGALRM] = TARGET_SIGALRM,
62 [SIGTERM] = TARGET_SIGTERM,
63 #ifdef SIGSTKFLT
64 [SIGSTKFLT] = TARGET_SIGSTKFLT,
65 #endif
66 [SIGCHLD] = TARGET_SIGCHLD,
67 [SIGCONT] = TARGET_SIGCONT,
68 [SIGSTOP] = TARGET_SIGSTOP,
69 [SIGTSTP] = TARGET_SIGTSTP,
70 [SIGTTIN] = TARGET_SIGTTIN,
71 [SIGTTOU] = TARGET_SIGTTOU,
72 [SIGURG] = TARGET_SIGURG,
73 [SIGXCPU] = TARGET_SIGXCPU,
74 [SIGXFSZ] = TARGET_SIGXFSZ,
75 [SIGVTALRM] = TARGET_SIGVTALRM,
76 [SIGPROF] = TARGET_SIGPROF,
77 [SIGWINCH] = TARGET_SIGWINCH,
78 [SIGIO] = TARGET_SIGIO,
79 [SIGPWR] = TARGET_SIGPWR,
80 [SIGSYS] = TARGET_SIGSYS,
81 /* next signals stay the same */
82 /* Nasty hack: Reverse SIGRTMIN and SIGRTMAX to avoid overlap with
83 host libpthread signals. This assumes no one actually uses SIGRTMAX :-/
84 To fix this properly we need to do manual signal delivery multiplexed
85 over a single host signal. */
86 [__SIGRTMIN] = __SIGRTMAX,
87 [__SIGRTMAX] = __SIGRTMIN,
88 };
89 static uint8_t target_to_host_signal_table[_NSIG];
90
91 static inline int on_sig_stack(unsigned long sp)
92 {
93 return (sp - target_sigaltstack_used.ss_sp
94 < target_sigaltstack_used.ss_size);
95 }
96
97 static inline int sas_ss_flags(unsigned long sp)
98 {
99 return (target_sigaltstack_used.ss_size == 0 ? SS_DISABLE
100 : on_sig_stack(sp) ? SS_ONSTACK : 0);
101 }
102
103 int host_to_target_signal(int sig)
104 {
105 if (sig < 0 || sig >= _NSIG)
106 return sig;
107 return host_to_target_signal_table[sig];
108 }
109
110 int target_to_host_signal(int sig)
111 {
112 if (sig < 0 || sig >= _NSIG)
113 return sig;
114 return target_to_host_signal_table[sig];
115 }
116
117 static inline void target_sigemptyset(target_sigset_t *set)
118 {
119 memset(set, 0, sizeof(*set));
120 }
121
122 static inline void target_sigaddset(target_sigset_t *set, int signum)
123 {
124 signum--;
125 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
126 set->sig[signum / TARGET_NSIG_BPW] |= mask;
127 }
128
129 static inline int target_sigismember(const target_sigset_t *set, int signum)
130 {
131 signum--;
132 abi_ulong mask = (abi_ulong)1 << (signum % TARGET_NSIG_BPW);
133 return ((set->sig[signum / TARGET_NSIG_BPW] & mask) != 0);
134 }
135
136 static void host_to_target_sigset_internal(target_sigset_t *d,
137 const sigset_t *s)
138 {
139 int i;
140 target_sigemptyset(d);
141 for (i = 1; i <= TARGET_NSIG; i++) {
142 if (sigismember(s, i)) {
143 target_sigaddset(d, host_to_target_signal(i));
144 }
145 }
146 }
147
148 void host_to_target_sigset(target_sigset_t *d, const sigset_t *s)
149 {
150 target_sigset_t d1;
151 int i;
152
153 host_to_target_sigset_internal(&d1, s);
154 for(i = 0;i < TARGET_NSIG_WORDS; i++)
155 d->sig[i] = tswapal(d1.sig[i]);
156 }
157
158 static void target_to_host_sigset_internal(sigset_t *d,
159 const target_sigset_t *s)
160 {
161 int i;
162 sigemptyset(d);
163 for (i = 1; i <= TARGET_NSIG; i++) {
164 if (target_sigismember(s, i)) {
165 sigaddset(d, target_to_host_signal(i));
166 }
167 }
168 }
169
170 void target_to_host_sigset(sigset_t *d, const target_sigset_t *s)
171 {
172 target_sigset_t s1;
173 int i;
174
175 for(i = 0;i < TARGET_NSIG_WORDS; i++)
176 s1.sig[i] = tswapal(s->sig[i]);
177 target_to_host_sigset_internal(d, &s1);
178 }
179
180 void host_to_target_old_sigset(abi_ulong *old_sigset,
181 const sigset_t *sigset)
182 {
183 target_sigset_t d;
184 host_to_target_sigset(&d, sigset);
185 *old_sigset = d.sig[0];
186 }
187
188 void target_to_host_old_sigset(sigset_t *sigset,
189 const abi_ulong *old_sigset)
190 {
191 target_sigset_t d;
192 int i;
193
194 d.sig[0] = *old_sigset;
195 for(i = 1;i < TARGET_NSIG_WORDS; i++)
196 d.sig[i] = 0;
197 target_to_host_sigset(sigset, &d);
198 }
199
200 /* Wrapper for sigprocmask function
201 * Emulates a sigprocmask in a safe way for the guest. Note that set and oldset
202 * are host signal set, not guest ones. This wraps the sigprocmask host calls
203 * that should be protected (calls originated from guest)
204 */
205 int do_sigprocmask(int how, const sigset_t *set, sigset_t *oldset)
206 {
207 int ret;
208 sigset_t val;
209 sigset_t *temp = NULL;
210 CPUState *cpu = thread_cpu;
211 TaskState *ts = (TaskState *)cpu->opaque;
212 bool segv_was_blocked = ts->sigsegv_blocked;
213
214 if (set) {
215 bool has_sigsegv = sigismember(set, SIGSEGV);
216 val = *set;
217 temp = &val;
218
219 sigdelset(temp, SIGSEGV);
220
221 switch (how) {
222 case SIG_BLOCK:
223 if (has_sigsegv) {
224 ts->sigsegv_blocked = true;
225 }
226 break;
227 case SIG_UNBLOCK:
228 if (has_sigsegv) {
229 ts->sigsegv_blocked = false;
230 }
231 break;
232 case SIG_SETMASK:
233 ts->sigsegv_blocked = has_sigsegv;
234 break;
235 default:
236 g_assert_not_reached();
237 }
238 }
239
240 ret = sigprocmask(how, temp, oldset);
241
242 if (oldset && segv_was_blocked) {
243 sigaddset(oldset, SIGSEGV);
244 }
245
246 return ret;
247 }
248
249 /* siginfo conversion */
250
251 static inline void host_to_target_siginfo_noswap(target_siginfo_t *tinfo,
252 const siginfo_t *info)
253 {
254 int sig = host_to_target_signal(info->si_signo);
255 tinfo->si_signo = sig;
256 tinfo->si_errno = 0;
257 tinfo->si_code = info->si_code;
258
259 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
260 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
261 /* Should never come here, but who knows. The information for
262 the target is irrelevant. */
263 tinfo->_sifields._sigfault._addr = 0;
264 } else if (sig == TARGET_SIGIO) {
265 tinfo->_sifields._sigpoll._band = info->si_band;
266 tinfo->_sifields._sigpoll._fd = info->si_fd;
267 } else if (sig == TARGET_SIGCHLD) {
268 tinfo->_sifields._sigchld._pid = info->si_pid;
269 tinfo->_sifields._sigchld._uid = info->si_uid;
270 tinfo->_sifields._sigchld._status
271 = host_to_target_waitstatus(info->si_status);
272 tinfo->_sifields._sigchld._utime = info->si_utime;
273 tinfo->_sifields._sigchld._stime = info->si_stime;
274 } else if (sig >= TARGET_SIGRTMIN) {
275 tinfo->_sifields._rt._pid = info->si_pid;
276 tinfo->_sifields._rt._uid = info->si_uid;
277 /* XXX: potential problem if 64 bit */
278 tinfo->_sifields._rt._sigval.sival_ptr
279 = (abi_ulong)(unsigned long)info->si_value.sival_ptr;
280 }
281 }
282
283 static void tswap_siginfo(target_siginfo_t *tinfo,
284 const target_siginfo_t *info)
285 {
286 int sig = info->si_signo;
287 tinfo->si_signo = tswap32(sig);
288 tinfo->si_errno = tswap32(info->si_errno);
289 tinfo->si_code = tswap32(info->si_code);
290
291 if (sig == TARGET_SIGILL || sig == TARGET_SIGFPE || sig == TARGET_SIGSEGV
292 || sig == TARGET_SIGBUS || sig == TARGET_SIGTRAP) {
293 tinfo->_sifields._sigfault._addr
294 = tswapal(info->_sifields._sigfault._addr);
295 } else if (sig == TARGET_SIGIO) {
296 tinfo->_sifields._sigpoll._band
297 = tswap32(info->_sifields._sigpoll._band);
298 tinfo->_sifields._sigpoll._fd = tswap32(info->_sifields._sigpoll._fd);
299 } else if (sig == TARGET_SIGCHLD) {
300 tinfo->_sifields._sigchld._pid
301 = tswap32(info->_sifields._sigchld._pid);
302 tinfo->_sifields._sigchld._uid
303 = tswap32(info->_sifields._sigchld._uid);
304 tinfo->_sifields._sigchld._status
305 = tswap32(info->_sifields._sigchld._status);
306 tinfo->_sifields._sigchld._utime
307 = tswapal(info->_sifields._sigchld._utime);
308 tinfo->_sifields._sigchld._stime
309 = tswapal(info->_sifields._sigchld._stime);
310 } else if (sig >= TARGET_SIGRTMIN) {
311 tinfo->_sifields._rt._pid = tswap32(info->_sifields._rt._pid);
312 tinfo->_sifields._rt._uid = tswap32(info->_sifields._rt._uid);
313 tinfo->_sifields._rt._sigval.sival_ptr
314 = tswapal(info->_sifields._rt._sigval.sival_ptr);
315 }
316 }
317
318
319 void host_to_target_siginfo(target_siginfo_t *tinfo, const siginfo_t *info)
320 {
321 host_to_target_siginfo_noswap(tinfo, info);
322 tswap_siginfo(tinfo, tinfo);
323 }
324
325 /* XXX: we support only POSIX RT signals are used. */
326 /* XXX: find a solution for 64 bit (additional malloced data is needed) */
327 void target_to_host_siginfo(siginfo_t *info, const target_siginfo_t *tinfo)
328 {
329 info->si_signo = tswap32(tinfo->si_signo);
330 info->si_errno = tswap32(tinfo->si_errno);
331 info->si_code = tswap32(tinfo->si_code);
332 info->si_pid = tswap32(tinfo->_sifields._rt._pid);
333 info->si_uid = tswap32(tinfo->_sifields._rt._uid);
334 info->si_value.sival_ptr =
335 (void *)(long)tswapal(tinfo->_sifields._rt._sigval.sival_ptr);
336 }
337
338 static int fatal_signal (int sig)
339 {
340 switch (sig) {
341 case TARGET_SIGCHLD:
342 case TARGET_SIGURG:
343 case TARGET_SIGWINCH:
344 /* Ignored by default. */
345 return 0;
346 case TARGET_SIGCONT:
347 case TARGET_SIGSTOP:
348 case TARGET_SIGTSTP:
349 case TARGET_SIGTTIN:
350 case TARGET_SIGTTOU:
351 /* Job control signals. */
352 return 0;
353 default:
354 return 1;
355 }
356 }
357
358 /* returns 1 if given signal should dump core if not handled */
359 static int core_dump_signal(int sig)
360 {
361 switch (sig) {
362 case TARGET_SIGABRT:
363 case TARGET_SIGFPE:
364 case TARGET_SIGILL:
365 case TARGET_SIGQUIT:
366 case TARGET_SIGSEGV:
367 case TARGET_SIGTRAP:
368 case TARGET_SIGBUS:
369 return (1);
370 default:
371 return (0);
372 }
373 }
374
375 void signal_init(void)
376 {
377 struct sigaction act;
378 struct sigaction oact;
379 int i, j;
380 int host_sig;
381
382 /* generate signal conversion tables */
383 for(i = 1; i < _NSIG; i++) {
384 if (host_to_target_signal_table[i] == 0)
385 host_to_target_signal_table[i] = i;
386 }
387 for(i = 1; i < _NSIG; i++) {
388 j = host_to_target_signal_table[i];
389 target_to_host_signal_table[j] = i;
390 }
391
392 /* set all host signal handlers. ALL signals are blocked during
393 the handlers to serialize them. */
394 memset(sigact_table, 0, sizeof(sigact_table));
395
396 sigfillset(&act.sa_mask);
397 act.sa_flags = SA_SIGINFO;
398 act.sa_sigaction = host_signal_handler;
399 for(i = 1; i <= TARGET_NSIG; i++) {
400 host_sig = target_to_host_signal(i);
401 sigaction(host_sig, NULL, &oact);
402 if (oact.sa_sigaction == (void *)SIG_IGN) {
403 sigact_table[i - 1]._sa_handler = TARGET_SIG_IGN;
404 } else if (oact.sa_sigaction == (void *)SIG_DFL) {
405 sigact_table[i - 1]._sa_handler = TARGET_SIG_DFL;
406 }
407 /* If there's already a handler installed then something has
408 gone horribly wrong, so don't even try to handle that case. */
409 /* Install some handlers for our own use. We need at least
410 SIGSEGV and SIGBUS, to detect exceptions. We can not just
411 trap all signals because it affects syscall interrupt
412 behavior. But do trap all default-fatal signals. */
413 if (fatal_signal (i))
414 sigaction(host_sig, &act, NULL);
415 }
416 }
417
418 /* signal queue handling */
419
420 static inline struct sigqueue *alloc_sigqueue(CPUArchState *env)
421 {
422 CPUState *cpu = ENV_GET_CPU(env);
423 TaskState *ts = cpu->opaque;
424 struct sigqueue *q = ts->first_free;
425 if (!q)
426 return NULL;
427 ts->first_free = q->next;
428 return q;
429 }
430
431 static inline void free_sigqueue(CPUArchState *env, struct sigqueue *q)
432 {
433 CPUState *cpu = ENV_GET_CPU(env);
434 TaskState *ts = cpu->opaque;
435
436 q->next = ts->first_free;
437 ts->first_free = q;
438 }
439
440 /* abort execution with signal */
441 static void QEMU_NORETURN force_sig(int target_sig)
442 {
443 CPUState *cpu = thread_cpu;
444 CPUArchState *env = cpu->env_ptr;
445 TaskState *ts = (TaskState *)cpu->opaque;
446 int host_sig, core_dumped = 0;
447 struct sigaction act;
448 host_sig = target_to_host_signal(target_sig);
449 gdb_signalled(env, target_sig);
450
451 /* dump core if supported by target binary format */
452 if (core_dump_signal(target_sig) && (ts->bprm->core_dump != NULL)) {
453 stop_all_tasks();
454 core_dumped =
455 ((*ts->bprm->core_dump)(target_sig, env) == 0);
456 }
457 if (core_dumped) {
458 /* we already dumped the core of target process, we don't want
459 * a coredump of qemu itself */
460 struct rlimit nodump;
461 getrlimit(RLIMIT_CORE, &nodump);
462 nodump.rlim_cur=0;
463 setrlimit(RLIMIT_CORE, &nodump);
464 (void) fprintf(stderr, "qemu: uncaught target signal %d (%s) - %s\n",
465 target_sig, strsignal(host_sig), "core dumped" );
466 }
467
468 /* The proper exit code for dying from an uncaught signal is
469 * -<signal>. The kernel doesn't allow exit() or _exit() to pass
470 * a negative value. To get the proper exit code we need to
471 * actually die from an uncaught signal. Here the default signal
472 * handler is installed, we send ourself a signal and we wait for
473 * it to arrive. */
474 sigfillset(&act.sa_mask);
475 act.sa_handler = SIG_DFL;
476 act.sa_flags = 0;
477 sigaction(host_sig, &act, NULL);
478
479 /* For some reason raise(host_sig) doesn't send the signal when
480 * statically linked on x86-64. */
481 kill(getpid(), host_sig);
482
483 /* Make sure the signal isn't masked (just reuse the mask inside
484 of act) */
485 sigdelset(&act.sa_mask, host_sig);
486 sigsuspend(&act.sa_mask);
487
488 /* unreachable */
489 abort();
490 }
491
492 /* queue a signal so that it will be send to the virtual CPU as soon
493 as possible */
494 int queue_signal(CPUArchState *env, int sig, target_siginfo_t *info)
495 {
496 CPUState *cpu = ENV_GET_CPU(env);
497 TaskState *ts = cpu->opaque;
498 struct emulated_sigtable *k;
499 struct sigqueue *q, **pq;
500 abi_ulong handler;
501 int queue;
502
503 #if defined(DEBUG_SIGNAL)
504 fprintf(stderr, "queue_signal: sig=%d\n",
505 sig);
506 #endif
507 k = &ts->sigtab[sig - 1];
508 queue = gdb_queuesig ();
509 handler = sigact_table[sig - 1]._sa_handler;
510
511 if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) {
512 /* Guest has blocked SIGSEGV but we got one anyway. Assume this
513 * is a forced SIGSEGV (ie one the kernel handles via force_sig_info
514 * because it got a real MMU fault). A blocked SIGSEGV in that
515 * situation is treated as if using the default handler. This is
516 * not correct if some other process has randomly sent us a SIGSEGV
517 * via kill(), but that is not easy to distinguish at this point,
518 * so we assume it doesn't happen.
519 */
520 handler = TARGET_SIG_DFL;
521 }
522
523 if (!queue && handler == TARGET_SIG_DFL) {
524 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
525 kill(getpid(),SIGSTOP);
526 return 0;
527 } else
528 /* default handler : ignore some signal. The other are fatal */
529 if (sig != TARGET_SIGCHLD &&
530 sig != TARGET_SIGURG &&
531 sig != TARGET_SIGWINCH &&
532 sig != TARGET_SIGCONT) {
533 force_sig(sig);
534 } else {
535 return 0; /* indicate ignored */
536 }
537 } else if (!queue && handler == TARGET_SIG_IGN) {
538 /* ignore signal */
539 return 0;
540 } else if (!queue && handler == TARGET_SIG_ERR) {
541 force_sig(sig);
542 } else {
543 pq = &k->first;
544 if (sig < TARGET_SIGRTMIN) {
545 /* if non real time signal, we queue exactly one signal */
546 if (!k->pending)
547 q = &k->info;
548 else
549 return 0;
550 } else {
551 if (!k->pending) {
552 /* first signal */
553 q = &k->info;
554 } else {
555 q = alloc_sigqueue(env);
556 if (!q)
557 return -EAGAIN;
558 while (*pq != NULL)
559 pq = &(*pq)->next;
560 }
561 }
562 *pq = q;
563 q->info = *info;
564 q->next = NULL;
565 k->pending = 1;
566 /* signal that a new signal is pending */
567 ts->signal_pending = 1;
568 return 1; /* indicates that the signal was queued */
569 }
570 }
571
572 static void host_signal_handler(int host_signum, siginfo_t *info,
573 void *puc)
574 {
575 CPUArchState *env = thread_cpu->env_ptr;
576 int sig;
577 target_siginfo_t tinfo;
578
579 /* the CPU emulator uses some host signals to detect exceptions,
580 we forward to it some signals */
581 if ((host_signum == SIGSEGV || host_signum == SIGBUS)
582 && info->si_code > 0) {
583 if (cpu_signal_handler(host_signum, info, puc))
584 return;
585 }
586
587 /* get target signal number */
588 sig = host_to_target_signal(host_signum);
589 if (sig < 1 || sig > TARGET_NSIG)
590 return;
591 #if defined(DEBUG_SIGNAL)
592 fprintf(stderr, "qemu: got signal %d\n", sig);
593 #endif
594 host_to_target_siginfo_noswap(&tinfo, info);
595 if (queue_signal(env, sig, &tinfo) == 1) {
596 /* interrupt the virtual CPU as soon as possible */
597 cpu_exit(thread_cpu);
598 }
599 }
600
601 /* do_sigaltstack() returns target values and errnos. */
602 /* compare linux/kernel/signal.c:do_sigaltstack() */
603 abi_long do_sigaltstack(abi_ulong uss_addr, abi_ulong uoss_addr, abi_ulong sp)
604 {
605 int ret;
606 struct target_sigaltstack oss;
607
608 /* XXX: test errors */
609 if(uoss_addr)
610 {
611 __put_user(target_sigaltstack_used.ss_sp, &oss.ss_sp);
612 __put_user(target_sigaltstack_used.ss_size, &oss.ss_size);
613 __put_user(sas_ss_flags(sp), &oss.ss_flags);
614 }
615
616 if(uss_addr)
617 {
618 struct target_sigaltstack *uss;
619 struct target_sigaltstack ss;
620 size_t minstacksize = TARGET_MINSIGSTKSZ;
621
622 #if defined(TARGET_PPC64)
623 /* ELF V2 for PPC64 has a 4K minimum stack size for signal handlers */
624 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
625 if (get_ppc64_abi(image) > 1) {
626 minstacksize = 4096;
627 }
628 #endif
629
630 ret = -TARGET_EFAULT;
631 if (!lock_user_struct(VERIFY_READ, uss, uss_addr, 1)) {
632 goto out;
633 }
634 __get_user(ss.ss_sp, &uss->ss_sp);
635 __get_user(ss.ss_size, &uss->ss_size);
636 __get_user(ss.ss_flags, &uss->ss_flags);
637 unlock_user_struct(uss, uss_addr, 0);
638
639 ret = -TARGET_EPERM;
640 if (on_sig_stack(sp))
641 goto out;
642
643 ret = -TARGET_EINVAL;
644 if (ss.ss_flags != TARGET_SS_DISABLE
645 && ss.ss_flags != TARGET_SS_ONSTACK
646 && ss.ss_flags != 0)
647 goto out;
648
649 if (ss.ss_flags == TARGET_SS_DISABLE) {
650 ss.ss_size = 0;
651 ss.ss_sp = 0;
652 } else {
653 ret = -TARGET_ENOMEM;
654 if (ss.ss_size < minstacksize) {
655 goto out;
656 }
657 }
658
659 target_sigaltstack_used.ss_sp = ss.ss_sp;
660 target_sigaltstack_used.ss_size = ss.ss_size;
661 }
662
663 if (uoss_addr) {
664 ret = -TARGET_EFAULT;
665 if (copy_to_user(uoss_addr, &oss, sizeof(oss)))
666 goto out;
667 }
668
669 ret = 0;
670 out:
671 return ret;
672 }
673
674 /* do_sigaction() return host values and errnos */
675 int do_sigaction(int sig, const struct target_sigaction *act,
676 struct target_sigaction *oact)
677 {
678 struct target_sigaction *k;
679 struct sigaction act1;
680 int host_sig;
681 int ret = 0;
682
683 if (sig < 1 || sig > TARGET_NSIG || sig == TARGET_SIGKILL || sig == TARGET_SIGSTOP)
684 return -EINVAL;
685 k = &sigact_table[sig - 1];
686 #if defined(DEBUG_SIGNAL)
687 fprintf(stderr, "sigaction sig=%d act=0x%p, oact=0x%p\n",
688 sig, act, oact);
689 #endif
690 if (oact) {
691 __put_user(k->_sa_handler, &oact->_sa_handler);
692 __put_user(k->sa_flags, &oact->sa_flags);
693 #if !defined(TARGET_MIPS)
694 __put_user(k->sa_restorer, &oact->sa_restorer);
695 #endif
696 /* Not swapped. */
697 oact->sa_mask = k->sa_mask;
698 }
699 if (act) {
700 /* FIXME: This is not threadsafe. */
701 __get_user(k->_sa_handler, &act->_sa_handler);
702 __get_user(k->sa_flags, &act->sa_flags);
703 #if !defined(TARGET_MIPS)
704 __get_user(k->sa_restorer, &act->sa_restorer);
705 #endif
706 /* To be swapped in target_to_host_sigset. */
707 k->sa_mask = act->sa_mask;
708
709 /* we update the host linux signal state */
710 host_sig = target_to_host_signal(sig);
711 if (host_sig != SIGSEGV && host_sig != SIGBUS) {
712 sigfillset(&act1.sa_mask);
713 act1.sa_flags = SA_SIGINFO;
714 if (k->sa_flags & TARGET_SA_RESTART)
715 act1.sa_flags |= SA_RESTART;
716 /* NOTE: it is important to update the host kernel signal
717 ignore state to avoid getting unexpected interrupted
718 syscalls */
719 if (k->_sa_handler == TARGET_SIG_IGN) {
720 act1.sa_sigaction = (void *)SIG_IGN;
721 } else if (k->_sa_handler == TARGET_SIG_DFL) {
722 if (fatal_signal (sig))
723 act1.sa_sigaction = host_signal_handler;
724 else
725 act1.sa_sigaction = (void *)SIG_DFL;
726 } else {
727 act1.sa_sigaction = host_signal_handler;
728 }
729 ret = sigaction(host_sig, &act1, NULL);
730 }
731 }
732 return ret;
733 }
734
735 #if defined(TARGET_I386) && TARGET_ABI_BITS == 32
736
737 /* from the Linux kernel */
738
739 struct target_fpreg {
740 uint16_t significand[4];
741 uint16_t exponent;
742 };
743
744 struct target_fpxreg {
745 uint16_t significand[4];
746 uint16_t exponent;
747 uint16_t padding[3];
748 };
749
750 struct target_xmmreg {
751 abi_ulong element[4];
752 };
753
754 struct target_fpstate {
755 /* Regular FPU environment */
756 abi_ulong cw;
757 abi_ulong sw;
758 abi_ulong tag;
759 abi_ulong ipoff;
760 abi_ulong cssel;
761 abi_ulong dataoff;
762 abi_ulong datasel;
763 struct target_fpreg _st[8];
764 uint16_t status;
765 uint16_t magic; /* 0xffff = regular FPU data only */
766
767 /* FXSR FPU environment */
768 abi_ulong _fxsr_env[6]; /* FXSR FPU env is ignored */
769 abi_ulong mxcsr;
770 abi_ulong reserved;
771 struct target_fpxreg _fxsr_st[8]; /* FXSR FPU reg data is ignored */
772 struct target_xmmreg _xmm[8];
773 abi_ulong padding[56];
774 };
775
776 #define X86_FXSR_MAGIC 0x0000
777
778 struct target_sigcontext {
779 uint16_t gs, __gsh;
780 uint16_t fs, __fsh;
781 uint16_t es, __esh;
782 uint16_t ds, __dsh;
783 abi_ulong edi;
784 abi_ulong esi;
785 abi_ulong ebp;
786 abi_ulong esp;
787 abi_ulong ebx;
788 abi_ulong edx;
789 abi_ulong ecx;
790 abi_ulong eax;
791 abi_ulong trapno;
792 abi_ulong err;
793 abi_ulong eip;
794 uint16_t cs, __csh;
795 abi_ulong eflags;
796 abi_ulong esp_at_signal;
797 uint16_t ss, __ssh;
798 abi_ulong fpstate; /* pointer */
799 abi_ulong oldmask;
800 abi_ulong cr2;
801 };
802
803 struct target_ucontext {
804 abi_ulong tuc_flags;
805 abi_ulong tuc_link;
806 target_stack_t tuc_stack;
807 struct target_sigcontext tuc_mcontext;
808 target_sigset_t tuc_sigmask; /* mask last for extensibility */
809 };
810
811 struct sigframe
812 {
813 abi_ulong pretcode;
814 int sig;
815 struct target_sigcontext sc;
816 struct target_fpstate fpstate;
817 abi_ulong extramask[TARGET_NSIG_WORDS-1];
818 char retcode[8];
819 };
820
821 struct rt_sigframe
822 {
823 abi_ulong pretcode;
824 int sig;
825 abi_ulong pinfo;
826 abi_ulong puc;
827 struct target_siginfo info;
828 struct target_ucontext uc;
829 struct target_fpstate fpstate;
830 char retcode[8];
831 };
832
833 /*
834 * Set up a signal frame.
835 */
836
837 /* XXX: save x87 state */
838 static void setup_sigcontext(struct target_sigcontext *sc,
839 struct target_fpstate *fpstate, CPUX86State *env, abi_ulong mask,
840 abi_ulong fpstate_addr)
841 {
842 CPUState *cs = CPU(x86_env_get_cpu(env));
843 uint16_t magic;
844
845 /* already locked in setup_frame() */
846 __put_user(env->segs[R_GS].selector, (unsigned int *)&sc->gs);
847 __put_user(env->segs[R_FS].selector, (unsigned int *)&sc->fs);
848 __put_user(env->segs[R_ES].selector, (unsigned int *)&sc->es);
849 __put_user(env->segs[R_DS].selector, (unsigned int *)&sc->ds);
850 __put_user(env->regs[R_EDI], &sc->edi);
851 __put_user(env->regs[R_ESI], &sc->esi);
852 __put_user(env->regs[R_EBP], &sc->ebp);
853 __put_user(env->regs[R_ESP], &sc->esp);
854 __put_user(env->regs[R_EBX], &sc->ebx);
855 __put_user(env->regs[R_EDX], &sc->edx);
856 __put_user(env->regs[R_ECX], &sc->ecx);
857 __put_user(env->regs[R_EAX], &sc->eax);
858 __put_user(cs->exception_index, &sc->trapno);
859 __put_user(env->error_code, &sc->err);
860 __put_user(env->eip, &sc->eip);
861 __put_user(env->segs[R_CS].selector, (unsigned int *)&sc->cs);
862 __put_user(env->eflags, &sc->eflags);
863 __put_user(env->regs[R_ESP], &sc->esp_at_signal);
864 __put_user(env->segs[R_SS].selector, (unsigned int *)&sc->ss);
865
866 cpu_x86_fsave(env, fpstate_addr, 1);
867 fpstate->status = fpstate->sw;
868 magic = 0xffff;
869 __put_user(magic, &fpstate->magic);
870 __put_user(fpstate_addr, &sc->fpstate);
871
872 /* non-iBCS2 extensions.. */
873 __put_user(mask, &sc->oldmask);
874 __put_user(env->cr[2], &sc->cr2);
875 }
876
877 /*
878 * Determine which stack to use..
879 */
880
881 static inline abi_ulong
882 get_sigframe(struct target_sigaction *ka, CPUX86State *env, size_t frame_size)
883 {
884 unsigned long esp;
885
886 /* Default to using normal stack */
887 esp = env->regs[R_ESP];
888 /* This is the X/Open sanctioned signal stack switching. */
889 if (ka->sa_flags & TARGET_SA_ONSTACK) {
890 if (sas_ss_flags(esp) == 0)
891 esp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
892 }
893
894 /* This is the legacy signal stack switching. */
895 else
896 if ((env->segs[R_SS].selector & 0xffff) != __USER_DS &&
897 !(ka->sa_flags & TARGET_SA_RESTORER) &&
898 ka->sa_restorer) {
899 esp = (unsigned long) ka->sa_restorer;
900 }
901 return (esp - frame_size) & -8ul;
902 }
903
904 /* compare linux/arch/i386/kernel/signal.c:setup_frame() */
905 static void setup_frame(int sig, struct target_sigaction *ka,
906 target_sigset_t *set, CPUX86State *env)
907 {
908 abi_ulong frame_addr;
909 struct sigframe *frame;
910 int i;
911
912 frame_addr = get_sigframe(ka, env, sizeof(*frame));
913
914 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
915 goto give_sigsegv;
916
917 __put_user(sig, &frame->sig);
918
919 setup_sigcontext(&frame->sc, &frame->fpstate, env, set->sig[0],
920 frame_addr + offsetof(struct sigframe, fpstate));
921
922 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
923 __put_user(set->sig[i], &frame->extramask[i - 1]);
924 }
925
926 /* Set up to return from userspace. If provided, use a stub
927 already in userspace. */
928 if (ka->sa_flags & TARGET_SA_RESTORER) {
929 __put_user(ka->sa_restorer, &frame->pretcode);
930 } else {
931 uint16_t val16;
932 abi_ulong retcode_addr;
933 retcode_addr = frame_addr + offsetof(struct sigframe, retcode);
934 __put_user(retcode_addr, &frame->pretcode);
935 /* This is popl %eax ; movl $,%eax ; int $0x80 */
936 val16 = 0xb858;
937 __put_user(val16, (uint16_t *)(frame->retcode+0));
938 __put_user(TARGET_NR_sigreturn, (int *)(frame->retcode+2));
939 val16 = 0x80cd;
940 __put_user(val16, (uint16_t *)(frame->retcode+6));
941 }
942
943
944 /* Set up registers for signal handler */
945 env->regs[R_ESP] = frame_addr;
946 env->eip = ka->_sa_handler;
947
948 cpu_x86_load_seg(env, R_DS, __USER_DS);
949 cpu_x86_load_seg(env, R_ES, __USER_DS);
950 cpu_x86_load_seg(env, R_SS, __USER_DS);
951 cpu_x86_load_seg(env, R_CS, __USER_CS);
952 env->eflags &= ~TF_MASK;
953
954 unlock_user_struct(frame, frame_addr, 1);
955
956 return;
957
958 give_sigsegv:
959 if (sig == TARGET_SIGSEGV)
960 ka->_sa_handler = TARGET_SIG_DFL;
961 force_sig(TARGET_SIGSEGV /* , current */);
962 }
963
964 /* compare linux/arch/i386/kernel/signal.c:setup_rt_frame() */
965 static void setup_rt_frame(int sig, struct target_sigaction *ka,
966 target_siginfo_t *info,
967 target_sigset_t *set, CPUX86State *env)
968 {
969 abi_ulong frame_addr, addr;
970 struct rt_sigframe *frame;
971 int i;
972
973 frame_addr = get_sigframe(ka, env, sizeof(*frame));
974
975 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
976 goto give_sigsegv;
977
978 __put_user(sig, &frame->sig);
979 addr = frame_addr + offsetof(struct rt_sigframe, info);
980 __put_user(addr, &frame->pinfo);
981 addr = frame_addr + offsetof(struct rt_sigframe, uc);
982 __put_user(addr, &frame->puc);
983 tswap_siginfo(&frame->info, info);
984
985 /* Create the ucontext. */
986 __put_user(0, &frame->uc.tuc_flags);
987 __put_user(0, &frame->uc.tuc_link);
988 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
989 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
990 &frame->uc.tuc_stack.ss_flags);
991 __put_user(target_sigaltstack_used.ss_size,
992 &frame->uc.tuc_stack.ss_size);
993 setup_sigcontext(&frame->uc.tuc_mcontext, &frame->fpstate, env,
994 set->sig[0], frame_addr + offsetof(struct rt_sigframe, fpstate));
995
996 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
997 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
998 }
999
1000 /* Set up to return from userspace. If provided, use a stub
1001 already in userspace. */
1002 if (ka->sa_flags & TARGET_SA_RESTORER) {
1003 __put_user(ka->sa_restorer, &frame->pretcode);
1004 } else {
1005 uint16_t val16;
1006 addr = frame_addr + offsetof(struct rt_sigframe, retcode);
1007 __put_user(addr, &frame->pretcode);
1008 /* This is movl $,%eax ; int $0x80 */
1009 __put_user(0xb8, (char *)(frame->retcode+0));
1010 __put_user(TARGET_NR_rt_sigreturn, (int *)(frame->retcode+1));
1011 val16 = 0x80cd;
1012 __put_user(val16, (uint16_t *)(frame->retcode+5));
1013 }
1014
1015 /* Set up registers for signal handler */
1016 env->regs[R_ESP] = frame_addr;
1017 env->eip = ka->_sa_handler;
1018
1019 cpu_x86_load_seg(env, R_DS, __USER_DS);
1020 cpu_x86_load_seg(env, R_ES, __USER_DS);
1021 cpu_x86_load_seg(env, R_SS, __USER_DS);
1022 cpu_x86_load_seg(env, R_CS, __USER_CS);
1023 env->eflags &= ~TF_MASK;
1024
1025 unlock_user_struct(frame, frame_addr, 1);
1026
1027 return;
1028
1029 give_sigsegv:
1030 if (sig == TARGET_SIGSEGV)
1031 ka->_sa_handler = TARGET_SIG_DFL;
1032 force_sig(TARGET_SIGSEGV /* , current */);
1033 }
1034
1035 static int
1036 restore_sigcontext(CPUX86State *env, struct target_sigcontext *sc, int *peax)
1037 {
1038 unsigned int err = 0;
1039 abi_ulong fpstate_addr;
1040 unsigned int tmpflags;
1041
1042 cpu_x86_load_seg(env, R_GS, tswap16(sc->gs));
1043 cpu_x86_load_seg(env, R_FS, tswap16(sc->fs));
1044 cpu_x86_load_seg(env, R_ES, tswap16(sc->es));
1045 cpu_x86_load_seg(env, R_DS, tswap16(sc->ds));
1046
1047 env->regs[R_EDI] = tswapl(sc->edi);
1048 env->regs[R_ESI] = tswapl(sc->esi);
1049 env->regs[R_EBP] = tswapl(sc->ebp);
1050 env->regs[R_ESP] = tswapl(sc->esp);
1051 env->regs[R_EBX] = tswapl(sc->ebx);
1052 env->regs[R_EDX] = tswapl(sc->edx);
1053 env->regs[R_ECX] = tswapl(sc->ecx);
1054 env->eip = tswapl(sc->eip);
1055
1056 cpu_x86_load_seg(env, R_CS, lduw_p(&sc->cs) | 3);
1057 cpu_x86_load_seg(env, R_SS, lduw_p(&sc->ss) | 3);
1058
1059 tmpflags = tswapl(sc->eflags);
1060 env->eflags = (env->eflags & ~0x40DD5) | (tmpflags & 0x40DD5);
1061 // regs->orig_eax = -1; /* disable syscall checks */
1062
1063 fpstate_addr = tswapl(sc->fpstate);
1064 if (fpstate_addr != 0) {
1065 if (!access_ok(VERIFY_READ, fpstate_addr,
1066 sizeof(struct target_fpstate)))
1067 goto badframe;
1068 cpu_x86_frstor(env, fpstate_addr, 1);
1069 }
1070
1071 *peax = tswapl(sc->eax);
1072 return err;
1073 badframe:
1074 return 1;
1075 }
1076
1077 long do_sigreturn(CPUX86State *env)
1078 {
1079 struct sigframe *frame;
1080 abi_ulong frame_addr = env->regs[R_ESP] - 8;
1081 target_sigset_t target_set;
1082 sigset_t set;
1083 int eax, i;
1084
1085 #if defined(DEBUG_SIGNAL)
1086 fprintf(stderr, "do_sigreturn\n");
1087 #endif
1088 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1089 goto badframe;
1090 /* set blocked signals */
1091 __get_user(target_set.sig[0], &frame->sc.oldmask);
1092 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1093 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
1094 }
1095
1096 target_to_host_sigset_internal(&set, &target_set);
1097 do_sigprocmask(SIG_SETMASK, &set, NULL);
1098
1099 /* restore registers */
1100 if (restore_sigcontext(env, &frame->sc, &eax))
1101 goto badframe;
1102 unlock_user_struct(frame, frame_addr, 0);
1103 return eax;
1104
1105 badframe:
1106 unlock_user_struct(frame, frame_addr, 0);
1107 force_sig(TARGET_SIGSEGV);
1108 return 0;
1109 }
1110
1111 long do_rt_sigreturn(CPUX86State *env)
1112 {
1113 abi_ulong frame_addr;
1114 struct rt_sigframe *frame;
1115 sigset_t set;
1116 int eax;
1117
1118 frame_addr = env->regs[R_ESP] - 4;
1119 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1120 goto badframe;
1121 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
1122 do_sigprocmask(SIG_SETMASK, &set, NULL);
1123
1124 if (restore_sigcontext(env, &frame->uc.tuc_mcontext, &eax))
1125 goto badframe;
1126
1127 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe, uc.tuc_stack), 0,
1128 get_sp_from_cpustate(env)) == -EFAULT)
1129 goto badframe;
1130
1131 unlock_user_struct(frame, frame_addr, 0);
1132 return eax;
1133
1134 badframe:
1135 unlock_user_struct(frame, frame_addr, 0);
1136 force_sig(TARGET_SIGSEGV);
1137 return 0;
1138 }
1139
1140 #elif defined(TARGET_AARCH64)
1141
1142 struct target_sigcontext {
1143 uint64_t fault_address;
1144 /* AArch64 registers */
1145 uint64_t regs[31];
1146 uint64_t sp;
1147 uint64_t pc;
1148 uint64_t pstate;
1149 /* 4K reserved for FP/SIMD state and future expansion */
1150 char __reserved[4096] __attribute__((__aligned__(16)));
1151 };
1152
1153 struct target_ucontext {
1154 abi_ulong tuc_flags;
1155 abi_ulong tuc_link;
1156 target_stack_t tuc_stack;
1157 target_sigset_t tuc_sigmask;
1158 /* glibc uses a 1024-bit sigset_t */
1159 char __unused[1024 / 8 - sizeof(target_sigset_t)];
1160 /* last for future expansion */
1161 struct target_sigcontext tuc_mcontext;
1162 };
1163
1164 /*
1165 * Header to be used at the beginning of structures extending the user
1166 * context. Such structures must be placed after the rt_sigframe on the stack
1167 * and be 16-byte aligned. The last structure must be a dummy one with the
1168 * magic and size set to 0.
1169 */
1170 struct target_aarch64_ctx {
1171 uint32_t magic;
1172 uint32_t size;
1173 };
1174
1175 #define TARGET_FPSIMD_MAGIC 0x46508001
1176
1177 struct target_fpsimd_context {
1178 struct target_aarch64_ctx head;
1179 uint32_t fpsr;
1180 uint32_t fpcr;
1181 uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */
1182 };
1183
1184 /*
1185 * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
1186 * user space as it will change with the addition of new context. User space
1187 * should check the magic/size information.
1188 */
1189 struct target_aux_context {
1190 struct target_fpsimd_context fpsimd;
1191 /* additional context to be added before "end" */
1192 struct target_aarch64_ctx end;
1193 };
1194
1195 struct target_rt_sigframe {
1196 struct target_siginfo info;
1197 struct target_ucontext uc;
1198 uint64_t fp;
1199 uint64_t lr;
1200 uint32_t tramp[2];
1201 };
1202
1203 static int target_setup_sigframe(struct target_rt_sigframe *sf,
1204 CPUARMState *env, target_sigset_t *set)
1205 {
1206 int i;
1207 struct target_aux_context *aux =
1208 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1209
1210 /* set up the stack frame for unwinding */
1211 __put_user(env->xregs[29], &sf->fp);
1212 __put_user(env->xregs[30], &sf->lr);
1213
1214 for (i = 0; i < 31; i++) {
1215 __put_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1216 }
1217 __put_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1218 __put_user(env->pc, &sf->uc.tuc_mcontext.pc);
1219 __put_user(pstate_read(env), &sf->uc.tuc_mcontext.pstate);
1220
1221 __put_user(env->exception.vaddress, &sf->uc.tuc_mcontext.fault_address);
1222
1223 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
1224 __put_user(set->sig[i], &sf->uc.tuc_sigmask.sig[i]);
1225 }
1226
1227 for (i = 0; i < 32; i++) {
1228 #ifdef TARGET_WORDS_BIGENDIAN
1229 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1230 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1231 #else
1232 __put_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1233 __put_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1234 #endif
1235 }
1236 __put_user(vfp_get_fpsr(env), &aux->fpsimd.fpsr);
1237 __put_user(vfp_get_fpcr(env), &aux->fpsimd.fpcr);
1238 __put_user(TARGET_FPSIMD_MAGIC, &aux->fpsimd.head.magic);
1239 __put_user(sizeof(struct target_fpsimd_context),
1240 &aux->fpsimd.head.size);
1241
1242 /* set the "end" magic */
1243 __put_user(0, &aux->end.magic);
1244 __put_user(0, &aux->end.size);
1245
1246 return 0;
1247 }
1248
1249 static int target_restore_sigframe(CPUARMState *env,
1250 struct target_rt_sigframe *sf)
1251 {
1252 sigset_t set;
1253 int i;
1254 struct target_aux_context *aux =
1255 (struct target_aux_context *)sf->uc.tuc_mcontext.__reserved;
1256 uint32_t magic, size, fpsr, fpcr;
1257 uint64_t pstate;
1258
1259 target_to_host_sigset(&set, &sf->uc.tuc_sigmask);
1260 do_sigprocmask(SIG_SETMASK, &set, NULL);
1261
1262 for (i = 0; i < 31; i++) {
1263 __get_user(env->xregs[i], &sf->uc.tuc_mcontext.regs[i]);
1264 }
1265
1266 __get_user(env->xregs[31], &sf->uc.tuc_mcontext.sp);
1267 __get_user(env->pc, &sf->uc.tuc_mcontext.pc);
1268 __get_user(pstate, &sf->uc.tuc_mcontext.pstate);
1269 pstate_write(env, pstate);
1270
1271 __get_user(magic, &aux->fpsimd.head.magic);
1272 __get_user(size, &aux->fpsimd.head.size);
1273
1274 if (magic != TARGET_FPSIMD_MAGIC
1275 || size != sizeof(struct target_fpsimd_context)) {
1276 return 1;
1277 }
1278
1279 for (i = 0; i < 32; i++) {
1280 #ifdef TARGET_WORDS_BIGENDIAN
1281 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2 + 1]);
1282 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2]);
1283 #else
1284 __get_user(env->vfp.regs[i * 2], &aux->fpsimd.vregs[i * 2]);
1285 __get_user(env->vfp.regs[i * 2 + 1], &aux->fpsimd.vregs[i * 2 + 1]);
1286 #endif
1287 }
1288 __get_user(fpsr, &aux->fpsimd.fpsr);
1289 vfp_set_fpsr(env, fpsr);
1290 __get_user(fpcr, &aux->fpsimd.fpcr);
1291 vfp_set_fpcr(env, fpcr);
1292
1293 return 0;
1294 }
1295
1296 static abi_ulong get_sigframe(struct target_sigaction *ka, CPUARMState *env)
1297 {
1298 abi_ulong sp;
1299
1300 sp = env->xregs[31];
1301
1302 /*
1303 * This is the X/Open sanctioned signal stack switching.
1304 */
1305 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp)) {
1306 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1307 }
1308
1309 sp = (sp - sizeof(struct target_rt_sigframe)) & ~15;
1310
1311 return sp;
1312 }
1313
1314 static void target_setup_frame(int usig, struct target_sigaction *ka,
1315 target_siginfo_t *info, target_sigset_t *set,
1316 CPUARMState *env)
1317 {
1318 struct target_rt_sigframe *frame;
1319 abi_ulong frame_addr, return_addr;
1320
1321 frame_addr = get_sigframe(ka, env);
1322 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
1323 goto give_sigsegv;
1324 }
1325
1326 __put_user(0, &frame->uc.tuc_flags);
1327 __put_user(0, &frame->uc.tuc_link);
1328
1329 __put_user(target_sigaltstack_used.ss_sp,
1330 &frame->uc.tuc_stack.ss_sp);
1331 __put_user(sas_ss_flags(env->xregs[31]),
1332 &frame->uc.tuc_stack.ss_flags);
1333 __put_user(target_sigaltstack_used.ss_size,
1334 &frame->uc.tuc_stack.ss_size);
1335 target_setup_sigframe(frame, env, set);
1336 if (ka->sa_flags & TARGET_SA_RESTORER) {
1337 return_addr = ka->sa_restorer;
1338 } else {
1339 /* mov x8,#__NR_rt_sigreturn; svc #0 */
1340 __put_user(0xd2801168, &frame->tramp[0]);
1341 __put_user(0xd4000001, &frame->tramp[1]);
1342 return_addr = frame_addr + offsetof(struct target_rt_sigframe, tramp);
1343 }
1344 env->xregs[0] = usig;
1345 env->xregs[31] = frame_addr;
1346 env->xregs[29] = env->xregs[31] + offsetof(struct target_rt_sigframe, fp);
1347 env->pc = ka->_sa_handler;
1348 env->xregs[30] = return_addr;
1349 if (info) {
1350 tswap_siginfo(&frame->info, info);
1351 env->xregs[1] = frame_addr + offsetof(struct target_rt_sigframe, info);
1352 env->xregs[2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
1353 }
1354
1355 unlock_user_struct(frame, frame_addr, 1);
1356 return;
1357
1358 give_sigsegv:
1359 unlock_user_struct(frame, frame_addr, 1);
1360 force_sig(TARGET_SIGSEGV);
1361 }
1362
1363 static void setup_rt_frame(int sig, struct target_sigaction *ka,
1364 target_siginfo_t *info, target_sigset_t *set,
1365 CPUARMState *env)
1366 {
1367 target_setup_frame(sig, ka, info, set, env);
1368 }
1369
1370 static void setup_frame(int sig, struct target_sigaction *ka,
1371 target_sigset_t *set, CPUARMState *env)
1372 {
1373 target_setup_frame(sig, ka, 0, set, env);
1374 }
1375
1376 long do_rt_sigreturn(CPUARMState *env)
1377 {
1378 struct target_rt_sigframe *frame = NULL;
1379 abi_ulong frame_addr = env->xregs[31];
1380
1381 if (frame_addr & 15) {
1382 goto badframe;
1383 }
1384
1385 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
1386 goto badframe;
1387 }
1388
1389 if (target_restore_sigframe(env, frame)) {
1390 goto badframe;
1391 }
1392
1393 if (do_sigaltstack(frame_addr +
1394 offsetof(struct target_rt_sigframe, uc.tuc_stack),
1395 0, get_sp_from_cpustate(env)) == -EFAULT) {
1396 goto badframe;
1397 }
1398
1399 unlock_user_struct(frame, frame_addr, 0);
1400 return env->xregs[0];
1401
1402 badframe:
1403 unlock_user_struct(frame, frame_addr, 0);
1404 force_sig(TARGET_SIGSEGV);
1405 return 0;
1406 }
1407
1408 long do_sigreturn(CPUARMState *env)
1409 {
1410 return do_rt_sigreturn(env);
1411 }
1412
1413 #elif defined(TARGET_ARM)
1414
1415 struct target_sigcontext {
1416 abi_ulong trap_no;
1417 abi_ulong error_code;
1418 abi_ulong oldmask;
1419 abi_ulong arm_r0;
1420 abi_ulong arm_r1;
1421 abi_ulong arm_r2;
1422 abi_ulong arm_r3;
1423 abi_ulong arm_r4;
1424 abi_ulong arm_r5;
1425 abi_ulong arm_r6;
1426 abi_ulong arm_r7;
1427 abi_ulong arm_r8;
1428 abi_ulong arm_r9;
1429 abi_ulong arm_r10;
1430 abi_ulong arm_fp;
1431 abi_ulong arm_ip;
1432 abi_ulong arm_sp;
1433 abi_ulong arm_lr;
1434 abi_ulong arm_pc;
1435 abi_ulong arm_cpsr;
1436 abi_ulong fault_address;
1437 };
1438
1439 struct target_ucontext_v1 {
1440 abi_ulong tuc_flags;
1441 abi_ulong tuc_link;
1442 target_stack_t tuc_stack;
1443 struct target_sigcontext tuc_mcontext;
1444 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1445 };
1446
1447 struct target_ucontext_v2 {
1448 abi_ulong tuc_flags;
1449 abi_ulong tuc_link;
1450 target_stack_t tuc_stack;
1451 struct target_sigcontext tuc_mcontext;
1452 target_sigset_t tuc_sigmask; /* mask last for extensibility */
1453 char __unused[128 - sizeof(target_sigset_t)];
1454 abi_ulong tuc_regspace[128] __attribute__((__aligned__(8)));
1455 };
1456
1457 struct target_user_vfp {
1458 uint64_t fpregs[32];
1459 abi_ulong fpscr;
1460 };
1461
1462 struct target_user_vfp_exc {
1463 abi_ulong fpexc;
1464 abi_ulong fpinst;
1465 abi_ulong fpinst2;
1466 };
1467
1468 struct target_vfp_sigframe {
1469 abi_ulong magic;
1470 abi_ulong size;
1471 struct target_user_vfp ufp;
1472 struct target_user_vfp_exc ufp_exc;
1473 } __attribute__((__aligned__(8)));
1474
1475 struct target_iwmmxt_sigframe {
1476 abi_ulong magic;
1477 abi_ulong size;
1478 uint64_t regs[16];
1479 /* Note that not all the coprocessor control registers are stored here */
1480 uint32_t wcssf;
1481 uint32_t wcasf;
1482 uint32_t wcgr0;
1483 uint32_t wcgr1;
1484 uint32_t wcgr2;
1485 uint32_t wcgr3;
1486 } __attribute__((__aligned__(8)));
1487
1488 #define TARGET_VFP_MAGIC 0x56465001
1489 #define TARGET_IWMMXT_MAGIC 0x12ef842a
1490
1491 struct sigframe_v1
1492 {
1493 struct target_sigcontext sc;
1494 abi_ulong extramask[TARGET_NSIG_WORDS-1];
1495 abi_ulong retcode;
1496 };
1497
1498 struct sigframe_v2
1499 {
1500 struct target_ucontext_v2 uc;
1501 abi_ulong retcode;
1502 };
1503
1504 struct rt_sigframe_v1
1505 {
1506 abi_ulong pinfo;
1507 abi_ulong puc;
1508 struct target_siginfo info;
1509 struct target_ucontext_v1 uc;
1510 abi_ulong retcode;
1511 };
1512
1513 struct rt_sigframe_v2
1514 {
1515 struct target_siginfo info;
1516 struct target_ucontext_v2 uc;
1517 abi_ulong retcode;
1518 };
1519
1520 #define TARGET_CONFIG_CPU_32 1
1521
1522 /*
1523 * For ARM syscalls, we encode the syscall number into the instruction.
1524 */
1525 #define SWI_SYS_SIGRETURN (0xef000000|(TARGET_NR_sigreturn + ARM_SYSCALL_BASE))
1526 #define SWI_SYS_RT_SIGRETURN (0xef000000|(TARGET_NR_rt_sigreturn + ARM_SYSCALL_BASE))
1527
1528 /*
1529 * For Thumb syscalls, we pass the syscall number via r7. We therefore
1530 * need two 16-bit instructions.
1531 */
1532 #define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_sigreturn))
1533 #define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (TARGET_NR_rt_sigreturn))
1534
1535 static const abi_ulong retcodes[4] = {
1536 SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
1537 SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN
1538 };
1539
1540
1541 static inline int valid_user_regs(CPUARMState *regs)
1542 {
1543 return 1;
1544 }
1545
1546 static void
1547 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
1548 CPUARMState *env, abi_ulong mask)
1549 {
1550 __put_user(env->regs[0], &sc->arm_r0);
1551 __put_user(env->regs[1], &sc->arm_r1);
1552 __put_user(env->regs[2], &sc->arm_r2);
1553 __put_user(env->regs[3], &sc->arm_r3);
1554 __put_user(env->regs[4], &sc->arm_r4);
1555 __put_user(env->regs[5], &sc->arm_r5);
1556 __put_user(env->regs[6], &sc->arm_r6);
1557 __put_user(env->regs[7], &sc->arm_r7);
1558 __put_user(env->regs[8], &sc->arm_r8);
1559 __put_user(env->regs[9], &sc->arm_r9);
1560 __put_user(env->regs[10], &sc->arm_r10);
1561 __put_user(env->regs[11], &sc->arm_fp);
1562 __put_user(env->regs[12], &sc->arm_ip);
1563 __put_user(env->regs[13], &sc->arm_sp);
1564 __put_user(env->regs[14], &sc->arm_lr);
1565 __put_user(env->regs[15], &sc->arm_pc);
1566 #ifdef TARGET_CONFIG_CPU_32
1567 __put_user(cpsr_read(env), &sc->arm_cpsr);
1568 #endif
1569
1570 __put_user(/* current->thread.trap_no */ 0, &sc->trap_no);
1571 __put_user(/* current->thread.error_code */ 0, &sc->error_code);
1572 __put_user(/* current->thread.address */ 0, &sc->fault_address);
1573 __put_user(mask, &sc->oldmask);
1574 }
1575
1576 static inline abi_ulong
1577 get_sigframe(struct target_sigaction *ka, CPUARMState *regs, int framesize)
1578 {
1579 unsigned long sp = regs->regs[13];
1580
1581 /*
1582 * This is the X/Open sanctioned signal stack switching.
1583 */
1584 if ((ka->sa_flags & TARGET_SA_ONSTACK) && !sas_ss_flags(sp))
1585 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
1586 /*
1587 * ATPCS B01 mandates 8-byte alignment
1588 */
1589 return (sp - framesize) & ~7;
1590 }
1591
1592 static void
1593 setup_return(CPUARMState *env, struct target_sigaction *ka,
1594 abi_ulong *rc, abi_ulong frame_addr, int usig, abi_ulong rc_addr)
1595 {
1596 abi_ulong handler = ka->_sa_handler;
1597 abi_ulong retcode;
1598 int thumb = handler & 1;
1599 uint32_t cpsr = cpsr_read(env);
1600
1601 cpsr &= ~CPSR_IT;
1602 if (thumb) {
1603 cpsr |= CPSR_T;
1604 } else {
1605 cpsr &= ~CPSR_T;
1606 }
1607
1608 if (ka->sa_flags & TARGET_SA_RESTORER) {
1609 retcode = ka->sa_restorer;
1610 } else {
1611 unsigned int idx = thumb;
1612
1613 if (ka->sa_flags & TARGET_SA_SIGINFO)
1614 idx += 2;
1615
1616 __put_user(retcodes[idx], rc);
1617
1618 retcode = rc_addr + thumb;
1619 }
1620
1621 env->regs[0] = usig;
1622 env->regs[13] = frame_addr;
1623 env->regs[14] = retcode;
1624 env->regs[15] = handler & (thumb ? ~1 : ~3);
1625 cpsr_write(env, cpsr, 0xffffffff);
1626 }
1627
1628 static abi_ulong *setup_sigframe_v2_vfp(abi_ulong *regspace, CPUARMState *env)
1629 {
1630 int i;
1631 struct target_vfp_sigframe *vfpframe;
1632 vfpframe = (struct target_vfp_sigframe *)regspace;
1633 __put_user(TARGET_VFP_MAGIC, &vfpframe->magic);
1634 __put_user(sizeof(*vfpframe), &vfpframe->size);
1635 for (i = 0; i < 32; i++) {
1636 __put_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1637 }
1638 __put_user(vfp_get_fpscr(env), &vfpframe->ufp.fpscr);
1639 __put_user(env->vfp.xregs[ARM_VFP_FPEXC], &vfpframe->ufp_exc.fpexc);
1640 __put_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1641 __put_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1642 return (abi_ulong*)(vfpframe+1);
1643 }
1644
1645 static abi_ulong *setup_sigframe_v2_iwmmxt(abi_ulong *regspace,
1646 CPUARMState *env)
1647 {
1648 int i;
1649 struct target_iwmmxt_sigframe *iwmmxtframe;
1650 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1651 __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic);
1652 __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size);
1653 for (i = 0; i < 16; i++) {
1654 __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1655 }
1656 __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1657 __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1658 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1659 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1660 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1661 __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1662 return (abi_ulong*)(iwmmxtframe+1);
1663 }
1664
1665 static void setup_sigframe_v2(struct target_ucontext_v2 *uc,
1666 target_sigset_t *set, CPUARMState *env)
1667 {
1668 struct target_sigaltstack stack;
1669 int i;
1670 abi_ulong *regspace;
1671
1672 /* Clear all the bits of the ucontext we don't use. */
1673 memset(uc, 0, offsetof(struct target_ucontext_v2, tuc_mcontext));
1674
1675 memset(&stack, 0, sizeof(stack));
1676 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1677 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1678 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1679 memcpy(&uc->tuc_stack, &stack, sizeof(stack));
1680
1681 setup_sigcontext(&uc->tuc_mcontext, env, set->sig[0]);
1682 /* Save coprocessor signal frame. */
1683 regspace = uc->tuc_regspace;
1684 if (arm_feature(env, ARM_FEATURE_VFP)) {
1685 regspace = setup_sigframe_v2_vfp(regspace, env);
1686 }
1687 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1688 regspace = setup_sigframe_v2_iwmmxt(regspace, env);
1689 }
1690
1691 /* Write terminating magic word */
1692 __put_user(0, regspace);
1693
1694 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1695 __put_user(set->sig[i], &uc->tuc_sigmask.sig[i]);
1696 }
1697 }
1698
1699 /* compare linux/arch/arm/kernel/signal.c:setup_frame() */
1700 static void setup_frame_v1(int usig, struct target_sigaction *ka,
1701 target_sigset_t *set, CPUARMState *regs)
1702 {
1703 struct sigframe_v1 *frame;
1704 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1705 int i;
1706
1707 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1708 return;
1709
1710 setup_sigcontext(&frame->sc, regs, set->sig[0]);
1711
1712 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1713 __put_user(set->sig[i], &frame->extramask[i - 1]);
1714 }
1715
1716 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1717 frame_addr + offsetof(struct sigframe_v1, retcode));
1718
1719 unlock_user_struct(frame, frame_addr, 1);
1720 }
1721
1722 static void setup_frame_v2(int usig, struct target_sigaction *ka,
1723 target_sigset_t *set, CPUARMState *regs)
1724 {
1725 struct sigframe_v2 *frame;
1726 abi_ulong frame_addr = get_sigframe(ka, regs, sizeof(*frame));
1727
1728 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1729 return;
1730
1731 setup_sigframe_v2(&frame->uc, set, regs);
1732
1733 setup_return(regs, ka, &frame->retcode, frame_addr, usig,
1734 frame_addr + offsetof(struct sigframe_v2, retcode));
1735
1736 unlock_user_struct(frame, frame_addr, 1);
1737 }
1738
1739 static void setup_frame(int usig, struct target_sigaction *ka,
1740 target_sigset_t *set, CPUARMState *regs)
1741 {
1742 if (get_osversion() >= 0x020612) {
1743 setup_frame_v2(usig, ka, set, regs);
1744 } else {
1745 setup_frame_v1(usig, ka, set, regs);
1746 }
1747 }
1748
1749 /* compare linux/arch/arm/kernel/signal.c:setup_rt_frame() */
1750 static void setup_rt_frame_v1(int usig, struct target_sigaction *ka,
1751 target_siginfo_t *info,
1752 target_sigset_t *set, CPUARMState *env)
1753 {
1754 struct rt_sigframe_v1 *frame;
1755 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1756 struct target_sigaltstack stack;
1757 int i;
1758 abi_ulong info_addr, uc_addr;
1759
1760 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1761 return /* 1 */;
1762
1763 info_addr = frame_addr + offsetof(struct rt_sigframe_v1, info);
1764 __put_user(info_addr, &frame->pinfo);
1765 uc_addr = frame_addr + offsetof(struct rt_sigframe_v1, uc);
1766 __put_user(uc_addr, &frame->puc);
1767 tswap_siginfo(&frame->info, info);
1768
1769 /* Clear all the bits of the ucontext we don't use. */
1770 memset(&frame->uc, 0, offsetof(struct target_ucontext_v1, tuc_mcontext));
1771
1772 memset(&stack, 0, sizeof(stack));
1773 __put_user(target_sigaltstack_used.ss_sp, &stack.ss_sp);
1774 __put_user(target_sigaltstack_used.ss_size, &stack.ss_size);
1775 __put_user(sas_ss_flags(get_sp_from_cpustate(env)), &stack.ss_flags);
1776 memcpy(&frame->uc.tuc_stack, &stack, sizeof(stack));
1777
1778 setup_sigcontext(&frame->uc.tuc_mcontext, env, set->sig[0]);
1779 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
1780 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
1781 }
1782
1783 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1784 frame_addr + offsetof(struct rt_sigframe_v1, retcode));
1785
1786 env->regs[1] = info_addr;
1787 env->regs[2] = uc_addr;
1788
1789 unlock_user_struct(frame, frame_addr, 1);
1790 }
1791
1792 static void setup_rt_frame_v2(int usig, struct target_sigaction *ka,
1793 target_siginfo_t *info,
1794 target_sigset_t *set, CPUARMState *env)
1795 {
1796 struct rt_sigframe_v2 *frame;
1797 abi_ulong frame_addr = get_sigframe(ka, env, sizeof(*frame));
1798 abi_ulong info_addr, uc_addr;
1799
1800 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
1801 return /* 1 */;
1802
1803 info_addr = frame_addr + offsetof(struct rt_sigframe_v2, info);
1804 uc_addr = frame_addr + offsetof(struct rt_sigframe_v2, uc);
1805 tswap_siginfo(&frame->info, info);
1806
1807 setup_sigframe_v2(&frame->uc, set, env);
1808
1809 setup_return(env, ka, &frame->retcode, frame_addr, usig,
1810 frame_addr + offsetof(struct rt_sigframe_v2, retcode));
1811
1812 env->regs[1] = info_addr;
1813 env->regs[2] = uc_addr;
1814
1815 unlock_user_struct(frame, frame_addr, 1);
1816 }
1817
1818 static void setup_rt_frame(int usig, struct target_sigaction *ka,
1819 target_siginfo_t *info,
1820 target_sigset_t *set, CPUARMState *env)
1821 {
1822 if (get_osversion() >= 0x020612) {
1823 setup_rt_frame_v2(usig, ka, info, set, env);
1824 } else {
1825 setup_rt_frame_v1(usig, ka, info, set, env);
1826 }
1827 }
1828
1829 static int
1830 restore_sigcontext(CPUARMState *env, struct target_sigcontext *sc)
1831 {
1832 int err = 0;
1833 uint32_t cpsr;
1834
1835 __get_user(env->regs[0], &sc->arm_r0);
1836 __get_user(env->regs[1], &sc->arm_r1);
1837 __get_user(env->regs[2], &sc->arm_r2);
1838 __get_user(env->regs[3], &sc->arm_r3);
1839 __get_user(env->regs[4], &sc->arm_r4);
1840 __get_user(env->regs[5], &sc->arm_r5);
1841 __get_user(env->regs[6], &sc->arm_r6);
1842 __get_user(env->regs[7], &sc->arm_r7);
1843 __get_user(env->regs[8], &sc->arm_r8);
1844 __get_user(env->regs[9], &sc->arm_r9);
1845 __get_user(env->regs[10], &sc->arm_r10);
1846 __get_user(env->regs[11], &sc->arm_fp);
1847 __get_user(env->regs[12], &sc->arm_ip);
1848 __get_user(env->regs[13], &sc->arm_sp);
1849 __get_user(env->regs[14], &sc->arm_lr);
1850 __get_user(env->regs[15], &sc->arm_pc);
1851 #ifdef TARGET_CONFIG_CPU_32
1852 __get_user(cpsr, &sc->arm_cpsr);
1853 cpsr_write(env, cpsr, CPSR_USER | CPSR_EXEC);
1854 #endif
1855
1856 err |= !valid_user_regs(env);
1857
1858 return err;
1859 }
1860
1861 static long do_sigreturn_v1(CPUARMState *env)
1862 {
1863 abi_ulong frame_addr;
1864 struct sigframe_v1 *frame = NULL;
1865 target_sigset_t set;
1866 sigset_t host_set;
1867 int i;
1868
1869 /*
1870 * Since we stacked the signal on a 64-bit boundary,
1871 * then 'sp' should be word aligned here. If it's
1872 * not, then the user is trying to mess with us.
1873 */
1874 frame_addr = env->regs[13];
1875 if (frame_addr & 7) {
1876 goto badframe;
1877 }
1878
1879 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
1880 goto badframe;
1881
1882 __get_user(set.sig[0], &frame->sc.oldmask);
1883 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
1884 __get_user(set.sig[i], &frame->extramask[i - 1]);
1885 }
1886
1887 target_to_host_sigset_internal(&host_set, &set);
1888 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
1889
1890 if (restore_sigcontext(env, &frame->sc))
1891 goto badframe;
1892
1893 #if 0
1894 /* Send SIGTRAP if we're single-stepping */
1895 if (ptrace_cancel_bpt(current))
1896 send_sig(SIGTRAP, current, 1);
1897 #endif
1898 unlock_user_struct(frame, frame_addr, 0);
1899 return env->regs[0];
1900
1901 badframe:
1902 force_sig(TARGET_SIGSEGV /* , current */);
1903 return 0;
1904 }
1905
1906 static abi_ulong *restore_sigframe_v2_vfp(CPUARMState *env, abi_ulong *regspace)
1907 {
1908 int i;
1909 abi_ulong magic, sz;
1910 uint32_t fpscr, fpexc;
1911 struct target_vfp_sigframe *vfpframe;
1912 vfpframe = (struct target_vfp_sigframe *)regspace;
1913
1914 __get_user(magic, &vfpframe->magic);
1915 __get_user(sz, &vfpframe->size);
1916 if (magic != TARGET_VFP_MAGIC || sz != sizeof(*vfpframe)) {
1917 return 0;
1918 }
1919 for (i = 0; i < 32; i++) {
1920 __get_user(float64_val(env->vfp.regs[i]), &vfpframe->ufp.fpregs[i]);
1921 }
1922 __get_user(fpscr, &vfpframe->ufp.fpscr);
1923 vfp_set_fpscr(env, fpscr);
1924 __get_user(fpexc, &vfpframe->ufp_exc.fpexc);
1925 /* Sanitise FPEXC: ensure VFP is enabled, FPINST2 is invalid
1926 * and the exception flag is cleared
1927 */
1928 fpexc |= (1 << 30);
1929 fpexc &= ~((1 << 31) | (1 << 28));
1930 env->vfp.xregs[ARM_VFP_FPEXC] = fpexc;
1931 __get_user(env->vfp.xregs[ARM_VFP_FPINST], &vfpframe->ufp_exc.fpinst);
1932 __get_user(env->vfp.xregs[ARM_VFP_FPINST2], &vfpframe->ufp_exc.fpinst2);
1933 return (abi_ulong*)(vfpframe + 1);
1934 }
1935
1936 static abi_ulong *restore_sigframe_v2_iwmmxt(CPUARMState *env,
1937 abi_ulong *regspace)
1938 {
1939 int i;
1940 abi_ulong magic, sz;
1941 struct target_iwmmxt_sigframe *iwmmxtframe;
1942 iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace;
1943
1944 __get_user(magic, &iwmmxtframe->magic);
1945 __get_user(sz, &iwmmxtframe->size);
1946 if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) {
1947 return 0;
1948 }
1949 for (i = 0; i < 16; i++) {
1950 __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]);
1951 }
1952 __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf);
1953 __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf);
1954 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0);
1955 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1);
1956 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2);
1957 __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3);
1958 return (abi_ulong*)(iwmmxtframe + 1);
1959 }
1960
1961 static int do_sigframe_return_v2(CPUARMState *env, target_ulong frame_addr,
1962 struct target_ucontext_v2 *uc)
1963 {
1964 sigset_t host_set;
1965 abi_ulong *regspace;
1966
1967 target_to_host_sigset(&host_set, &uc->tuc_sigmask);
1968 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
1969
1970 if (restore_sigcontext(env, &uc->tuc_mcontext))
1971 return 1;
1972
1973 /* Restore coprocessor signal frame */
1974 regspace = uc->tuc_regspace;
1975 if (arm_feature(env, ARM_FEATURE_VFP)) {
1976 regspace = restore_sigframe_v2_vfp(env, regspace);
1977 if (!regspace) {
1978 return 1;
1979 }
1980 }
1981 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
1982 regspace = restore_sigframe_v2_iwmmxt(env, regspace);
1983 if (!regspace) {
1984 return 1;
1985 }
1986 }
1987
1988 if (do_sigaltstack(frame_addr + offsetof(struct target_ucontext_v2, tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
1989 return 1;
1990
1991 #if 0
1992 /* Send SIGTRAP if we're single-stepping */
1993 if (ptrace_cancel_bpt(current))
1994 send_sig(SIGTRAP, current, 1);
1995 #endif
1996
1997 return 0;
1998 }
1999
2000 static long do_sigreturn_v2(CPUARMState *env)
2001 {
2002 abi_ulong frame_addr;
2003 struct sigframe_v2 *frame = NULL;
2004
2005 /*
2006 * Since we stacked the signal on a 64-bit boundary,
2007 * then 'sp' should be word aligned here. If it's
2008 * not, then the user is trying to mess with us.
2009 */
2010 frame_addr = env->regs[13];
2011 if (frame_addr & 7) {
2012 goto badframe;
2013 }
2014
2015 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2016 goto badframe;
2017
2018 if (do_sigframe_return_v2(env, frame_addr, &frame->uc))
2019 goto badframe;
2020
2021 unlock_user_struct(frame, frame_addr, 0);
2022 return env->regs[0];
2023
2024 badframe:
2025 unlock_user_struct(frame, frame_addr, 0);
2026 force_sig(TARGET_SIGSEGV /* , current */);
2027 return 0;
2028 }
2029
2030 long do_sigreturn(CPUARMState *env)
2031 {
2032 if (get_osversion() >= 0x020612) {
2033 return do_sigreturn_v2(env);
2034 } else {
2035 return do_sigreturn_v1(env);
2036 }
2037 }
2038
2039 static long do_rt_sigreturn_v1(CPUARMState *env)
2040 {
2041 abi_ulong frame_addr;
2042 struct rt_sigframe_v1 *frame = NULL;
2043 sigset_t host_set;
2044
2045 /*
2046 * Since we stacked the signal on a 64-bit boundary,
2047 * then 'sp' should be word aligned here. If it's
2048 * not, then the user is trying to mess with us.
2049 */
2050 frame_addr = env->regs[13];
2051 if (frame_addr & 7) {
2052 goto badframe;
2053 }
2054
2055 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2056 goto badframe;
2057
2058 target_to_host_sigset(&host_set, &frame->uc.tuc_sigmask);
2059 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
2060
2061 if (restore_sigcontext(env, &frame->uc.tuc_mcontext))
2062 goto badframe;
2063
2064 if (do_sigaltstack(frame_addr + offsetof(struct rt_sigframe_v1, uc.tuc_stack), 0, get_sp_from_cpustate(env)) == -EFAULT)
2065 goto badframe;
2066
2067 #if 0
2068 /* Send SIGTRAP if we're single-stepping */
2069 if (ptrace_cancel_bpt(current))
2070 send_sig(SIGTRAP, current, 1);
2071 #endif
2072 unlock_user_struct(frame, frame_addr, 0);
2073 return env->regs[0];
2074
2075 badframe:
2076 unlock_user_struct(frame, frame_addr, 0);
2077 force_sig(TARGET_SIGSEGV /* , current */);
2078 return 0;
2079 }
2080
2081 static long do_rt_sigreturn_v2(CPUARMState *env)
2082 {
2083 abi_ulong frame_addr;
2084 struct rt_sigframe_v2 *frame = NULL;
2085
2086 /*
2087 * Since we stacked the signal on a 64-bit boundary,
2088 * then 'sp' should be word aligned here. If it's
2089 * not, then the user is trying to mess with us.
2090 */
2091 frame_addr = env->regs[13];
2092 if (frame_addr & 7) {
2093 goto badframe;
2094 }
2095
2096 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2097 goto badframe;
2098
2099 if (do_sigframe_return_v2(env, frame_addr, &frame->uc))
2100 goto badframe;
2101
2102 unlock_user_struct(frame, frame_addr, 0);
2103 return env->regs[0];
2104
2105 badframe:
2106 unlock_user_struct(frame, frame_addr, 0);
2107 force_sig(TARGET_SIGSEGV /* , current */);
2108 return 0;
2109 }
2110
2111 long do_rt_sigreturn(CPUARMState *env)
2112 {
2113 if (get_osversion() >= 0x020612) {
2114 return do_rt_sigreturn_v2(env);
2115 } else {
2116 return do_rt_sigreturn_v1(env);
2117 }
2118 }
2119
2120 #elif defined(TARGET_SPARC)
2121
2122 #define __SUNOS_MAXWIN 31
2123
2124 /* This is what SunOS does, so shall I. */
2125 struct target_sigcontext {
2126 abi_ulong sigc_onstack; /* state to restore */
2127
2128 abi_ulong sigc_mask; /* sigmask to restore */
2129 abi_ulong sigc_sp; /* stack pointer */
2130 abi_ulong sigc_pc; /* program counter */
2131 abi_ulong sigc_npc; /* next program counter */
2132 abi_ulong sigc_psr; /* for condition codes etc */
2133 abi_ulong sigc_g1; /* User uses these two registers */
2134 abi_ulong sigc_o0; /* within the trampoline code. */
2135
2136 /* Now comes information regarding the users window set
2137 * at the time of the signal.
2138 */
2139 abi_ulong sigc_oswins; /* outstanding windows */
2140
2141 /* stack ptrs for each regwin buf */
2142 char *sigc_spbuf[__SUNOS_MAXWIN];
2143
2144 /* Windows to restore after signal */
2145 struct {
2146 abi_ulong locals[8];
2147 abi_ulong ins[8];
2148 } sigc_wbuf[__SUNOS_MAXWIN];
2149 };
2150 /* A Sparc stack frame */
2151 struct sparc_stackf {
2152 abi_ulong locals[8];
2153 abi_ulong ins[8];
2154 /* It's simpler to treat fp and callers_pc as elements of ins[]
2155 * since we never need to access them ourselves.
2156 */
2157 char *structptr;
2158 abi_ulong xargs[6];
2159 abi_ulong xxargs[1];
2160 };
2161
2162 typedef struct {
2163 struct {
2164 abi_ulong psr;
2165 abi_ulong pc;
2166 abi_ulong npc;
2167 abi_ulong y;
2168 abi_ulong u_regs[16]; /* globals and ins */
2169 } si_regs;
2170 int si_mask;
2171 } __siginfo_t;
2172
2173 typedef struct {
2174 abi_ulong si_float_regs[32];
2175 unsigned long si_fsr;
2176 unsigned long si_fpqdepth;
2177 struct {
2178 unsigned long *insn_addr;
2179 unsigned long insn;
2180 } si_fpqueue [16];
2181 } qemu_siginfo_fpu_t;
2182
2183
2184 struct target_signal_frame {
2185 struct sparc_stackf ss;
2186 __siginfo_t info;
2187 abi_ulong fpu_save;
2188 abi_ulong insns[2] __attribute__ ((aligned (8)));
2189 abi_ulong extramask[TARGET_NSIG_WORDS - 1];
2190 abi_ulong extra_size; /* Should be 0 */
2191 qemu_siginfo_fpu_t fpu_state;
2192 };
2193 struct target_rt_signal_frame {
2194 struct sparc_stackf ss;
2195 siginfo_t info;
2196 abi_ulong regs[20];
2197 sigset_t mask;
2198 abi_ulong fpu_save;
2199 unsigned int insns[2];
2200 stack_t stack;
2201 unsigned int extra_size; /* Should be 0 */
2202 qemu_siginfo_fpu_t fpu_state;
2203 };
2204
2205 #define UREG_O0 16
2206 #define UREG_O6 22
2207 #define UREG_I0 0
2208 #define UREG_I1 1
2209 #define UREG_I2 2
2210 #define UREG_I3 3
2211 #define UREG_I4 4
2212 #define UREG_I5 5
2213 #define UREG_I6 6
2214 #define UREG_I7 7
2215 #define UREG_L0 8
2216 #define UREG_FP UREG_I6
2217 #define UREG_SP UREG_O6
2218
2219 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
2220 CPUSPARCState *env,
2221 unsigned long framesize)
2222 {
2223 abi_ulong sp;
2224
2225 sp = env->regwptr[UREG_FP];
2226
2227 /* This is the X/Open sanctioned signal stack switching. */
2228 if (sa->sa_flags & TARGET_SA_ONSTACK) {
2229 if (!on_sig_stack(sp)
2230 && !((target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size) & 7))
2231 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2232 }
2233 return sp - framesize;
2234 }
2235
2236 static int
2237 setup___siginfo(__siginfo_t *si, CPUSPARCState *env, abi_ulong mask)
2238 {
2239 int err = 0, i;
2240
2241 __put_user(env->psr, &si->si_regs.psr);
2242 __put_user(env->pc, &si->si_regs.pc);
2243 __put_user(env->npc, &si->si_regs.npc);
2244 __put_user(env->y, &si->si_regs.y);
2245 for (i=0; i < 8; i++) {
2246 __put_user(env->gregs[i], &si->si_regs.u_regs[i]);
2247 }
2248 for (i=0; i < 8; i++) {
2249 __put_user(env->regwptr[UREG_I0 + i], &si->si_regs.u_regs[i+8]);
2250 }
2251 __put_user(mask, &si->si_mask);
2252 return err;
2253 }
2254
2255 #if 0
2256 static int
2257 setup_sigcontext(struct target_sigcontext *sc, /*struct _fpstate *fpstate,*/
2258 CPUSPARCState *env, unsigned long mask)
2259 {
2260 int err = 0;
2261
2262 __put_user(mask, &sc->sigc_mask);
2263 __put_user(env->regwptr[UREG_SP], &sc->sigc_sp);
2264 __put_user(env->pc, &sc->sigc_pc);
2265 __put_user(env->npc, &sc->sigc_npc);
2266 __put_user(env->psr, &sc->sigc_psr);
2267 __put_user(env->gregs[1], &sc->sigc_g1);
2268 __put_user(env->regwptr[UREG_O0], &sc->sigc_o0);
2269
2270 return err;
2271 }
2272 #endif
2273 #define NF_ALIGNEDSZ (((sizeof(struct target_signal_frame) + 7) & (~7)))
2274
2275 static void setup_frame(int sig, struct target_sigaction *ka,
2276 target_sigset_t *set, CPUSPARCState *env)
2277 {
2278 abi_ulong sf_addr;
2279 struct target_signal_frame *sf;
2280 int sigframe_size, err, i;
2281
2282 /* 1. Make sure everything is clean */
2283 //synchronize_user_stack();
2284
2285 sigframe_size = NF_ALIGNEDSZ;
2286 sf_addr = get_sigframe(ka, env, sigframe_size);
2287
2288 sf = lock_user(VERIFY_WRITE, sf_addr,
2289 sizeof(struct target_signal_frame), 0);
2290 if (!sf)
2291 goto sigsegv;
2292
2293 //fprintf(stderr, "sf: %x pc %x fp %x sp %x\n", sf, env->pc, env->regwptr[UREG_FP], env->regwptr[UREG_SP]);
2294 #if 0
2295 if (invalid_frame_pointer(sf, sigframe_size))
2296 goto sigill_and_return;
2297 #endif
2298 /* 2. Save the current process state */
2299 err = setup___siginfo(&sf->info, env, set->sig[0]);
2300 __put_user(0, &sf->extra_size);
2301
2302 //save_fpu_state(regs, &sf->fpu_state);
2303 //__put_user(&sf->fpu_state, &sf->fpu_save);
2304
2305 __put_user(set->sig[0], &sf->info.si_mask);
2306 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
2307 __put_user(set->sig[i + 1], &sf->extramask[i]);
2308 }
2309
2310 for (i = 0; i < 8; i++) {
2311 __put_user(env->regwptr[i + UREG_L0], &sf->ss.locals[i]);
2312 }
2313 for (i = 0; i < 8; i++) {
2314 __put_user(env->regwptr[i + UREG_I0], &sf->ss.ins[i]);
2315 }
2316 if (err)
2317 goto sigsegv;
2318
2319 /* 3. signal handler back-trampoline and parameters */
2320 env->regwptr[UREG_FP] = sf_addr;
2321 env->regwptr[UREG_I0] = sig;
2322 env->regwptr[UREG_I1] = sf_addr +
2323 offsetof(struct target_signal_frame, info);
2324 env->regwptr[UREG_I2] = sf_addr +
2325 offsetof(struct target_signal_frame, info);
2326
2327 /* 4. signal handler */
2328 env->pc = ka->_sa_handler;
2329 env->npc = (env->pc + 4);
2330 /* 5. return to kernel instructions */
2331 if (ka->sa_restorer)
2332 env->regwptr[UREG_I7] = ka->sa_restorer;
2333 else {
2334 uint32_t val32;
2335
2336 env->regwptr[UREG_I7] = sf_addr +
2337 offsetof(struct target_signal_frame, insns) - 2 * 4;
2338
2339 /* mov __NR_sigreturn, %g1 */
2340 val32 = 0x821020d8;
2341 __put_user(val32, &sf->insns[0]);
2342
2343 /* t 0x10 */
2344 val32 = 0x91d02010;
2345 __put_user(val32, &sf->insns[1]);
2346 if (err)
2347 goto sigsegv;
2348
2349 /* Flush instruction space. */
2350 //flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0]));
2351 // tb_flush(CPU(sparc_env_get_cpu(env)));
2352 }
2353 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2354 return;
2355 #if 0
2356 sigill_and_return:
2357 force_sig(TARGET_SIGILL);
2358 #endif
2359 sigsegv:
2360 //fprintf(stderr, "force_sig\n");
2361 unlock_user(sf, sf_addr, sizeof(struct target_signal_frame));
2362 force_sig(TARGET_SIGSEGV);
2363 }
2364
2365 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2366 target_siginfo_t *info,
2367 target_sigset_t *set, CPUSPARCState *env)
2368 {
2369 fprintf(stderr, "setup_rt_frame: not implemented\n");
2370 }
2371
2372 long do_sigreturn(CPUSPARCState *env)
2373 {
2374 abi_ulong sf_addr;
2375 struct target_signal_frame *sf;
2376 uint32_t up_psr, pc, npc;
2377 target_sigset_t set;
2378 sigset_t host_set;
2379 int err=0, i;
2380
2381 sf_addr = env->regwptr[UREG_FP];
2382 if (!lock_user_struct(VERIFY_READ, sf, sf_addr, 1))
2383 goto segv_and_exit;
2384 #if 0
2385 fprintf(stderr, "sigreturn\n");
2386 fprintf(stderr, "sf: %x pc %x fp %x sp %x\n", sf, env->pc, env->regwptr[UREG_FP], env->regwptr[UREG_SP]);
2387 #endif
2388 //cpu_dump_state(env, stderr, fprintf, 0);
2389
2390 /* 1. Make sure we are not getting garbage from the user */
2391
2392 if (sf_addr & 3)
2393 goto segv_and_exit;
2394
2395 __get_user(pc, &sf->info.si_regs.pc);
2396 __get_user(npc, &sf->info.si_regs.npc);
2397
2398 if ((pc | npc) & 3)
2399 goto segv_and_exit;
2400
2401 /* 2. Restore the state */
2402 __get_user(up_psr, &sf->info.si_regs.psr);
2403
2404 /* User can only change condition codes and FPU enabling in %psr. */
2405 env->psr = (up_psr & (PSR_ICC /* | PSR_EF */))
2406 | (env->psr & ~(PSR_ICC /* | PSR_EF */));
2407
2408 env->pc = pc;
2409 env->npc = npc;
2410 __get_user(env->y, &sf->info.si_regs.y);
2411 for (i=0; i < 8; i++) {
2412 __get_user(env->gregs[i], &sf->info.si_regs.u_regs[i]);
2413 }
2414 for (i=0; i < 8; i++) {
2415 __get_user(env->regwptr[i + UREG_I0], &sf->info.si_regs.u_regs[i+8]);
2416 }
2417
2418 /* FIXME: implement FPU save/restore:
2419 * __get_user(fpu_save, &sf->fpu_save);
2420 * if (fpu_save)
2421 * err |= restore_fpu_state(env, fpu_save);
2422 */
2423
2424 /* This is pretty much atomic, no amount locking would prevent
2425 * the races which exist anyways.
2426 */
2427 __get_user(set.sig[0], &sf->info.si_mask);
2428 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
2429 __get_user(set.sig[i], &sf->extramask[i - 1]);
2430 }
2431
2432 target_to_host_sigset_internal(&host_set, &set);
2433 do_sigprocmask(SIG_SETMASK, &host_set, NULL);
2434
2435 if (err)
2436 goto segv_and_exit;
2437 unlock_user_struct(sf, sf_addr, 0);
2438 return env->regwptr[0];
2439
2440 segv_and_exit:
2441 unlock_user_struct(sf, sf_addr, 0);
2442 force_sig(TARGET_SIGSEGV);
2443 }
2444
2445 long do_rt_sigreturn(CPUSPARCState *env)
2446 {
2447 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
2448 return -TARGET_ENOSYS;
2449 }
2450
2451 #if defined(TARGET_SPARC64) && !defined(TARGET_ABI32)
2452 #define MC_TSTATE 0
2453 #define MC_PC 1
2454 #define MC_NPC 2
2455 #define MC_Y 3
2456 #define MC_G1 4
2457 #define MC_G2 5
2458 #define MC_G3 6
2459 #define MC_G4 7
2460 #define MC_G5 8
2461 #define MC_G6 9
2462 #define MC_G7 10
2463 #define MC_O0 11
2464 #define MC_O1 12
2465 #define MC_O2 13
2466 #define MC_O3 14
2467 #define MC_O4 15
2468 #define MC_O5 16
2469 #define MC_O6 17
2470 #define MC_O7 18
2471 #define MC_NGREG 19
2472
2473 typedef abi_ulong target_mc_greg_t;
2474 typedef target_mc_greg_t target_mc_gregset_t[MC_NGREG];
2475
2476 struct target_mc_fq {
2477 abi_ulong *mcfq_addr;
2478 uint32_t mcfq_insn;
2479 };
2480
2481 struct target_mc_fpu {
2482 union {
2483 uint32_t sregs[32];
2484 uint64_t dregs[32];
2485 //uint128_t qregs[16];
2486 } mcfpu_fregs;
2487 abi_ulong mcfpu_fsr;
2488 abi_ulong mcfpu_fprs;
2489 abi_ulong mcfpu_gsr;
2490 struct target_mc_fq *mcfpu_fq;
2491 unsigned char mcfpu_qcnt;
2492 unsigned char mcfpu_qentsz;
2493 unsigned char mcfpu_enab;
2494 };
2495 typedef struct target_mc_fpu target_mc_fpu_t;
2496
2497 typedef struct {
2498 target_mc_gregset_t mc_gregs;
2499 target_mc_greg_t mc_fp;
2500 target_mc_greg_t mc_i7;
2501 target_mc_fpu_t mc_fpregs;
2502 } target_mcontext_t;
2503
2504 struct target_ucontext {
2505 struct target_ucontext *tuc_link;
2506 abi_ulong tuc_flags;
2507 target_sigset_t tuc_sigmask;
2508 target_mcontext_t tuc_mcontext;
2509 };
2510
2511 /* A V9 register window */
2512 struct target_reg_window {
2513 abi_ulong locals[8];
2514 abi_ulong ins[8];
2515 };
2516
2517 #define TARGET_STACK_BIAS 2047
2518
2519 /* {set, get}context() needed for 64-bit SparcLinux userland. */
2520 void sparc64_set_context(CPUSPARCState *env)
2521 {
2522 abi_ulong ucp_addr;
2523 struct target_ucontext *ucp;
2524 target_mc_gregset_t *grp;
2525 abi_ulong pc, npc, tstate;
2526 abi_ulong fp, i7, w_addr;
2527 unsigned int i;
2528
2529 ucp_addr = env->regwptr[UREG_I0];
2530 if (!lock_user_struct(VERIFY_READ, ucp, ucp_addr, 1))
2531 goto do_sigsegv;
2532 grp = &ucp->tuc_mcontext.mc_gregs;
2533 __get_user(pc, &((*grp)[MC_PC]));
2534 __get_user(npc, &((*grp)[MC_NPC]));
2535 if ((pc | npc) & 3)
2536 goto do_sigsegv;
2537 if (env->regwptr[UREG_I1]) {
2538 target_sigset_t target_set;
2539 sigset_t set;
2540
2541 if (TARGET_NSIG_WORDS == 1) {
2542 __get_user(target_set.sig[0], &ucp->tuc_sigmask.sig[0]);
2543 } else {
2544 abi_ulong *src, *dst;
2545 src = ucp->tuc_sigmask.sig;
2546 dst = target_set.sig;
2547 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2548 __get_user(*dst, src);
2549 }
2550 }
2551 target_to_host_sigset_internal(&set, &target_set);
2552 do_sigprocmask(SIG_SETMASK, &set, NULL);
2553 }
2554 env->pc = pc;
2555 env->npc = npc;
2556 __get_user(env->y, &((*grp)[MC_Y]));
2557 __get_user(tstate, &((*grp)[MC_TSTATE]));
2558 env->asi = (tstate >> 24) & 0xff;
2559 cpu_put_ccr(env, tstate >> 32);
2560 cpu_put_cwp64(env, tstate & 0x1f);
2561 __get_user(env->gregs[1], (&(*grp)[MC_G1]));
2562 __get_user(env->gregs[2], (&(*grp)[MC_G2]));
2563 __get_user(env->gregs[3], (&(*grp)[MC_G3]));
2564 __get_user(env->gregs[4], (&(*grp)[MC_G4]));
2565 __get_user(env->gregs[5], (&(*grp)[MC_G5]));
2566 __get_user(env->gregs[6], (&(*grp)[MC_G6]));
2567 __get_user(env->gregs[7], (&(*grp)[MC_G7]));
2568 __get_user(env->regwptr[UREG_I0], (&(*grp)[MC_O0]));
2569 __get_user(env->regwptr[UREG_I1], (&(*grp)[MC_O1]));
2570 __get_user(env->regwptr[UREG_I2], (&(*grp)[MC_O2]));
2571 __get_user(env->regwptr[UREG_I3], (&(*grp)[MC_O3]));
2572 __get_user(env->regwptr[UREG_I4], (&(*grp)[MC_O4]));
2573 __get_user(env->regwptr[UREG_I5], (&(*grp)[MC_O5]));
2574 __get_user(env->regwptr[UREG_I6], (&(*grp)[MC_O6]));
2575 __get_user(env->regwptr[UREG_I7], (&(*grp)[MC_O7]));
2576
2577 __get_user(fp, &(ucp->tuc_mcontext.mc_fp));
2578 __get_user(i7, &(ucp->tuc_mcontext.mc_i7));
2579
2580 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2581 if (put_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2582 abi_ulong) != 0)
2583 goto do_sigsegv;
2584 if (put_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2585 abi_ulong) != 0)
2586 goto do_sigsegv;
2587 /* FIXME this does not match how the kernel handles the FPU in
2588 * its sparc64_set_context implementation. In particular the FPU
2589 * is only restored if fenab is non-zero in:
2590 * __get_user(fenab, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_enab));
2591 */
2592 __get_user(env->fprs, &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fprs));
2593 {
2594 uint32_t *src = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2595 for (i = 0; i < 64; i++, src++) {
2596 if (i & 1) {
2597 __get_user(env->fpr[i/2].l.lower, src);
2598 } else {
2599 __get_user(env->fpr[i/2].l.upper, src);
2600 }
2601 }
2602 }
2603 __get_user(env->fsr,
2604 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_fsr));
2605 __get_user(env->gsr,
2606 &(ucp->tuc_mcontext.mc_fpregs.mcfpu_gsr));
2607 unlock_user_struct(ucp, ucp_addr, 0);
2608 return;
2609 do_sigsegv:
2610 unlock_user_struct(ucp, ucp_addr, 0);
2611 force_sig(TARGET_SIGSEGV);
2612 }
2613
2614 void sparc64_get_context(CPUSPARCState *env)
2615 {
2616 abi_ulong ucp_addr;
2617 struct target_ucontext *ucp;
2618 target_mc_gregset_t *grp;
2619 target_mcontext_t *mcp;
2620 abi_ulong fp, i7, w_addr;
2621 int err;
2622 unsigned int i;
2623 target_sigset_t target_set;
2624 sigset_t set;
2625
2626 ucp_addr = env->regwptr[UREG_I0];
2627 if (!lock_user_struct(VERIFY_WRITE, ucp, ucp_addr, 0))
2628 goto do_sigsegv;
2629
2630 mcp = &ucp->tuc_mcontext;
2631 grp = &mcp->mc_gregs;
2632
2633 /* Skip over the trap instruction, first. */
2634 env->pc = env->npc;
2635 env->npc += 4;
2636
2637 err = 0;
2638
2639 do_sigprocmask(0, NULL, &set);
2640 host_to_target_sigset_internal(&target_set, &set);
2641 if (TARGET_NSIG_WORDS == 1) {
2642 __put_user(target_set.sig[0],
2643 (abi_ulong *)&ucp->tuc_sigmask);
2644 } else {
2645 abi_ulong *src, *dst;
2646 src = target_set.sig;
2647 dst = ucp->tuc_sigmask.sig;
2648 for (i = 0; i < TARGET_NSIG_WORDS; i++, dst++, src++) {
2649 __put_user(*src, dst);
2650 }
2651 if (err)
2652 goto do_sigsegv;
2653 }
2654
2655 /* XXX: tstate must be saved properly */
2656 // __put_user(env->tstate, &((*grp)[MC_TSTATE]));
2657 __put_user(env->pc, &((*grp)[MC_PC]));
2658 __put_user(env->npc, &((*grp)[MC_NPC]));
2659 __put_user(env->y, &((*grp)[MC_Y]));
2660 __put_user(env->gregs[1], &((*grp)[MC_G1]));
2661 __put_user(env->gregs[2], &((*grp)[MC_G2]));
2662 __put_user(env->gregs[3], &((*grp)[MC_G3]));
2663 __put_user(env->gregs[4], &((*grp)[MC_G4]));
2664 __put_user(env->gregs[5], &((*grp)[MC_G5]));
2665 __put_user(env->gregs[6], &((*grp)[MC_G6]));
2666 __put_user(env->gregs[7], &((*grp)[MC_G7]));
2667 __put_user(env->regwptr[UREG_I0], &((*grp)[MC_O0]));
2668 __put_user(env->regwptr[UREG_I1], &((*grp)[MC_O1]));
2669 __put_user(env->regwptr[UREG_I2], &((*grp)[MC_O2]));
2670 __put_user(env->regwptr[UREG_I3], &((*grp)[MC_O3]));
2671 __put_user(env->regwptr[UREG_I4], &((*grp)[MC_O4]));
2672 __put_user(env->regwptr[UREG_I5], &((*grp)[MC_O5]));
2673 __put_user(env->regwptr[UREG_I6], &((*grp)[MC_O6]));
2674 __put_user(env->regwptr[UREG_I7], &((*grp)[MC_O7]));
2675
2676 w_addr = TARGET_STACK_BIAS+env->regwptr[UREG_I6];
2677 fp = i7 = 0;
2678 if (get_user(fp, w_addr + offsetof(struct target_reg_window, ins[6]),
2679 abi_ulong) != 0)
2680 goto do_sigsegv;
2681 if (get_user(i7, w_addr + offsetof(struct target_reg_window, ins[7]),
2682 abi_ulong) != 0)
2683 goto do_sigsegv;
2684 __put_user(fp, &(mcp->mc_fp));
2685 __put_user(i7, &(mcp->mc_i7));
2686
2687 {
2688 uint32_t *dst = ucp->tuc_mcontext.mc_fpregs.mcfpu_fregs.sregs;
2689 for (i = 0; i < 64; i++, dst++) {
2690 if (i & 1) {
2691 __put_user(env->fpr[i/2].l.lower, dst);
2692 } else {
2693 __put_user(env->fpr[i/2].l.upper, dst);
2694 }
2695 }
2696 }
2697 __put_user(env->fsr, &(mcp->mc_fpregs.mcfpu_fsr));
2698 __put_user(env->gsr, &(mcp->mc_fpregs.mcfpu_gsr));
2699 __put_user(env->fprs, &(mcp->mc_fpregs.mcfpu_fprs));
2700
2701 if (err)
2702 goto do_sigsegv;
2703 unlock_user_struct(ucp, ucp_addr, 1);
2704 return;
2705 do_sigsegv:
2706 unlock_user_struct(ucp, ucp_addr, 1);
2707 force_sig(TARGET_SIGSEGV);
2708 }
2709 #endif
2710 #elif defined(TARGET_MIPS) || defined(TARGET_MIPS64)
2711
2712 # if defined(TARGET_ABI_MIPSO32)
2713 struct target_sigcontext {
2714 uint32_t sc_regmask; /* Unused */
2715 uint32_t sc_status;
2716 uint64_t sc_pc;
2717 uint64_t sc_regs[32];
2718 uint64_t sc_fpregs[32];
2719 uint32_t sc_ownedfp; /* Unused */
2720 uint32_t sc_fpc_csr;
2721 uint32_t sc_fpc_eir; /* Unused */
2722 uint32_t sc_used_math;
2723 uint32_t sc_dsp; /* dsp status, was sc_ssflags */
2724 uint32_t pad0;
2725 uint64_t sc_mdhi;
2726 uint64_t sc_mdlo;
2727 target_ulong sc_hi1; /* Was sc_cause */
2728 target_ulong sc_lo1; /* Was sc_badvaddr */
2729 target_ulong sc_hi2; /* Was sc_sigset[4] */
2730 target_ulong sc_lo2;
2731 target_ulong sc_hi3;
2732 target_ulong sc_lo3;
2733 };
2734 # else /* N32 || N64 */
2735 struct target_sigcontext {
2736 uint64_t sc_regs[32];
2737 uint64_t sc_fpregs[32];
2738 uint64_t sc_mdhi;
2739 uint64_t sc_hi1;
2740 uint64_t sc_hi2;
2741 uint64_t sc_hi3;
2742 uint64_t sc_mdlo;
2743 uint64_t sc_lo1;
2744 uint64_t sc_lo2;
2745 uint64_t sc_lo3;
2746 uint64_t sc_pc;
2747 uint32_t sc_fpc_csr;
2748 uint32_t sc_used_math;
2749 uint32_t sc_dsp;
2750 uint32_t sc_reserved;
2751 };
2752 # endif /* O32 */
2753
2754 struct sigframe {
2755 uint32_t sf_ass[4]; /* argument save space for o32 */
2756 uint32_t sf_code[2]; /* signal trampoline */
2757 struct target_sigcontext sf_sc;
2758 target_sigset_t sf_mask;
2759 };
2760
2761 struct target_ucontext {
2762 target_ulong tuc_flags;
2763 target_ulong tuc_link;
2764 target_stack_t tuc_stack;
2765 target_ulong pad0;
2766 struct target_sigcontext tuc_mcontext;
2767 target_sigset_t tuc_sigmask;
2768 };
2769
2770 struct target_rt_sigframe {
2771 uint32_t rs_ass[4]; /* argument save space for o32 */
2772 uint32_t rs_code[2]; /* signal trampoline */
2773 struct target_siginfo rs_info;
2774 struct target_ucontext rs_uc;
2775 };
2776
2777 /* Install trampoline to jump back from signal handler */
2778 static inline int install_sigtramp(unsigned int *tramp, unsigned int syscall)
2779 {
2780 int err = 0;
2781
2782 /*
2783 * Set up the return code ...
2784 *
2785 * li v0, __NR__foo_sigreturn
2786 * syscall
2787 */
2788
2789 __put_user(0x24020000 + syscall, tramp + 0);
2790 __put_user(0x0000000c , tramp + 1);
2791 return err;
2792 }
2793
2794 static inline void setup_sigcontext(CPUMIPSState *regs,
2795 struct target_sigcontext *sc)
2796 {
2797 int i;
2798
2799 __put_user(exception_resume_pc(regs), &sc->sc_pc);
2800 regs->hflags &= ~MIPS_HFLAG_BMASK;
2801
2802 __put_user(0, &sc->sc_regs[0]);
2803 for (i = 1; i < 32; ++i) {
2804 __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2805 }
2806
2807 __put_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2808 __put_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2809
2810 /* Rather than checking for dsp existence, always copy. The storage
2811 would just be garbage otherwise. */
2812 __put_user(regs->active_tc.HI[1], &sc->sc_hi1);
2813 __put_user(regs->active_tc.HI[2], &sc->sc_hi2);
2814 __put_user(regs->active_tc.HI[3], &sc->sc_hi3);
2815 __put_user(regs->active_tc.LO[1], &sc->sc_lo1);
2816 __put_user(regs->active_tc.LO[2], &sc->sc_lo2);
2817 __put_user(regs->active_tc.LO[3], &sc->sc_lo3);
2818 {
2819 uint32_t dsp = cpu_rddsp(0x3ff, regs);
2820 __put_user(dsp, &sc->sc_dsp);
2821 }
2822
2823 __put_user(1, &sc->sc_used_math);
2824
2825 for (i = 0; i < 32; ++i) {
2826 __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2827 }
2828 }
2829
2830 static inline void
2831 restore_sigcontext(CPUMIPSState *regs, struct target_sigcontext *sc)
2832 {
2833 int i;
2834
2835 __get_user(regs->CP0_EPC, &sc->sc_pc);
2836
2837 __get_user(regs->active_tc.HI[0], &sc->sc_mdhi);
2838 __get_user(regs->active_tc.LO[0], &sc->sc_mdlo);
2839
2840 for (i = 1; i < 32; ++i) {
2841 __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]);
2842 }
2843
2844 __get_user(regs->active_tc.HI[1], &sc->sc_hi1);
2845 __get_user(regs->active_tc.HI[2], &sc->sc_hi2);
2846 __get_user(regs->active_tc.HI[3], &sc->sc_hi3);
2847 __get_user(regs->active_tc.LO[1], &sc->sc_lo1);
2848 __get_user(regs->active_tc.LO[2], &sc->sc_lo2);
2849 __get_user(regs->active_tc.LO[3], &sc->sc_lo3);
2850 {
2851 uint32_t dsp;
2852 __get_user(dsp, &sc->sc_dsp);
2853 cpu_wrdsp(dsp, 0x3ff, regs);
2854 }
2855
2856 for (i = 0; i < 32; ++i) {
2857 __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i]);
2858 }
2859 }
2860
2861 /*
2862 * Determine which stack to use..
2863 */
2864 static inline abi_ulong
2865 get_sigframe(struct target_sigaction *ka, CPUMIPSState *regs, size_t frame_size)
2866 {
2867 unsigned long sp;
2868
2869 /* Default to using normal stack */
2870 sp = regs->active_tc.gpr[29];
2871
2872 /*
2873 * FPU emulator may have its own trampoline active just
2874 * above the user stack, 16-bytes before the next lowest
2875 * 16 byte boundary. Try to avoid trashing it.
2876 */
2877 sp -= 32;
2878
2879 /* This is the X/Open sanctioned signal stack switching. */
2880 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
2881 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
2882 }
2883
2884 return (sp - frame_size) & ~7;
2885 }
2886
2887 static void mips_set_hflags_isa_mode_from_pc(CPUMIPSState *env)
2888 {
2889 if (env->insn_flags & (ASE_MIPS16 | ASE_MICROMIPS)) {
2890 env->hflags &= ~MIPS_HFLAG_M16;
2891 env->hflags |= (env->active_tc.PC & 1) << MIPS_HFLAG_M16_SHIFT;
2892 env->active_tc.PC &= ~(target_ulong) 1;
2893 }
2894 }
2895
2896 # if defined(TARGET_ABI_MIPSO32)
2897 /* compare linux/arch/mips/kernel/signal.c:setup_frame() */
2898 static void setup_frame(int sig, struct target_sigaction * ka,
2899 target_sigset_t *set, CPUMIPSState *regs)
2900 {
2901 struct sigframe *frame;
2902 abi_ulong frame_addr;
2903 int i;
2904
2905 frame_addr = get_sigframe(ka, regs, sizeof(*frame));
2906 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
2907 goto give_sigsegv;
2908
2909 install_sigtramp(frame->sf_code, TARGET_NR_sigreturn);
2910
2911 setup_sigcontext(regs, &frame->sf_sc);
2912
2913 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2914 __put_user(set->sig[i], &frame->sf_mask.sig[i]);
2915 }
2916
2917 /*
2918 * Arguments to signal handler:
2919 *
2920 * a0 = signal number
2921 * a1 = 0 (should be cause)
2922 * a2 = pointer to struct sigcontext
2923 *
2924 * $25 and PC point to the signal handler, $29 points to the
2925 * struct sigframe.
2926 */
2927 regs->active_tc.gpr[ 4] = sig;
2928 regs->active_tc.gpr[ 5] = 0;
2929 regs->active_tc.gpr[ 6] = frame_addr + offsetof(struct sigframe, sf_sc);
2930 regs->active_tc.gpr[29] = frame_addr;
2931 regs->active_tc.gpr[31] = frame_addr + offsetof(struct sigframe, sf_code);
2932 /* The original kernel code sets CP0_EPC to the handler
2933 * since it returns to userland using eret
2934 * we cannot do this here, and we must set PC directly */
2935 regs->active_tc.PC = regs->active_tc.gpr[25] = ka->_sa_handler;
2936 mips_set_hflags_isa_mode_from_pc(regs);
2937 unlock_user_struct(frame, frame_addr, 1);
2938 return;
2939
2940 give_sigsegv:
2941 force_sig(TARGET_SIGSEGV/*, current*/);
2942 }
2943
2944 long do_sigreturn(CPUMIPSState *regs)
2945 {
2946 struct sigframe *frame;
2947 abi_ulong frame_addr;
2948 sigset_t blocked;
2949 target_sigset_t target_set;
2950 int i;
2951
2952 #if defined(DEBUG_SIGNAL)
2953 fprintf(stderr, "do_sigreturn\n");
2954 #endif
2955 frame_addr = regs->active_tc.gpr[29];
2956 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
2957 goto badframe;
2958
2959 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
2960 __get_user(target_set.sig[i], &frame->sf_mask.sig[i]);
2961 }
2962
2963 target_to_host_sigset_internal(&blocked, &target_set);
2964 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
2965
2966 restore_sigcontext(regs, &frame->sf_sc);
2967
2968 #if 0
2969 /*
2970 * Don't let your children do this ...
2971 */
2972 __asm__ __volatile__(
2973 "move\t$29, %0\n\t"
2974 "j\tsyscall_exit"
2975 :/* no outputs */
2976 :"r" (&regs));
2977 /* Unreached */
2978 #endif
2979
2980 regs->active_tc.PC = regs->CP0_EPC;
2981 mips_set_hflags_isa_mode_from_pc(regs);
2982 /* I am not sure this is right, but it seems to work
2983 * maybe a problem with nested signals ? */
2984 regs->CP0_EPC = 0;
2985 return -TARGET_QEMU_ESIGRETURN;
2986
2987 badframe:
2988 force_sig(TARGET_SIGSEGV/*, current*/);
2989 return 0;
2990 }
2991 # endif /* O32 */
2992
2993 static void setup_rt_frame(int sig, struct target_sigaction *ka,
2994 target_siginfo_t *info,
2995 target_sigset_t *set, CPUMIPSState *env)
2996 {
2997 struct target_rt_sigframe *frame;
2998 abi_ulong frame_addr;
2999 int i;
3000
3001 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3002 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3003 goto give_sigsegv;
3004
3005 install_sigtramp(frame->rs_code, TARGET_NR_rt_sigreturn);
3006
3007 tswap_siginfo(&frame->rs_info, info);
3008
3009 __put_user(0, &frame->rs_uc.tuc_flags);
3010 __put_user(0, &frame->rs_uc.tuc_link);
3011 __put_user(target_sigaltstack_used.ss_sp, &frame->rs_uc.tuc_stack.ss_sp);
3012 __put_user(target_sigaltstack_used.ss_size, &frame->rs_uc.tuc_stack.ss_size);
3013 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
3014 &frame->rs_uc.tuc_stack.ss_flags);
3015
3016 setup_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3017
3018 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3019 __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
3020 }
3021
3022 /*
3023 * Arguments to signal handler:
3024 *
3025 * a0 = signal number
3026 * a1 = pointer to siginfo_t
3027 * a2 = pointer to struct ucontext
3028 *
3029 * $25 and PC point to the signal handler, $29 points to the
3030 * struct sigframe.
3031 */
3032 env->active_tc.gpr[ 4] = sig;
3033 env->active_tc.gpr[ 5] = frame_addr
3034 + offsetof(struct target_rt_sigframe, rs_info);
3035 env->active_tc.gpr[ 6] = frame_addr
3036 + offsetof(struct target_rt_sigframe, rs_uc);
3037 env->active_tc.gpr[29] = frame_addr;
3038 env->active_tc.gpr[31] = frame_addr
3039 + offsetof(struct target_rt_sigframe, rs_code);
3040 /* The original kernel code sets CP0_EPC to the handler
3041 * since it returns to userland using eret
3042 * we cannot do this here, and we must set PC directly */
3043 env->active_tc.PC = env->active_tc.gpr[25] = ka->_sa_handler;
3044 mips_set_hflags_isa_mode_from_pc(env);
3045 unlock_user_struct(frame, frame_addr, 1);
3046 return;
3047
3048 give_sigsegv:
3049 unlock_user_struct(frame, frame_addr, 1);
3050 force_sig(TARGET_SIGSEGV/*, current*/);
3051 }
3052
3053 long do_rt_sigreturn(CPUMIPSState *env)
3054 {
3055 struct target_rt_sigframe *frame;
3056 abi_ulong frame_addr;
3057 sigset_t blocked;
3058
3059 #if defined(DEBUG_SIGNAL)
3060 fprintf(stderr, "do_rt_sigreturn\n");
3061 #endif
3062 frame_addr = env->active_tc.gpr[29];
3063 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3064 goto badframe;
3065
3066 target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
3067 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
3068
3069 restore_sigcontext(env, &frame->rs_uc.tuc_mcontext);
3070
3071 if (do_sigaltstack(frame_addr +
3072 offsetof(struct target_rt_sigframe, rs_uc.tuc_stack),
3073 0, get_sp_from_cpustate(env)) == -EFAULT)
3074 goto badframe;
3075
3076 env->active_tc.PC = env->CP0_EPC;
3077 mips_set_hflags_isa_mode_from_pc(env);
3078 /* I am not sure this is right, but it seems to work
3079 * maybe a problem with nested signals ? */
3080 env->CP0_EPC = 0;
3081 return -TARGET_QEMU_ESIGRETURN;
3082
3083 badframe:
3084 force_sig(TARGET_SIGSEGV/*, current*/);
3085 return 0;
3086 }
3087
3088 #elif defined(TARGET_SH4)
3089
3090 /*
3091 * code and data structures from linux kernel:
3092 * include/asm-sh/sigcontext.h
3093 * arch/sh/kernel/signal.c
3094 */
3095
3096 struct target_sigcontext {
3097 target_ulong oldmask;
3098
3099 /* CPU registers */
3100 target_ulong sc_gregs[16];
3101 target_ulong sc_pc;
3102 target_ulong sc_pr;
3103 target_ulong sc_sr;
3104 target_ulong sc_gbr;
3105 target_ulong sc_mach;
3106 target_ulong sc_macl;
3107
3108 /* FPU registers */
3109 target_ulong sc_fpregs[16];
3110 target_ulong sc_xfpregs[16];
3111 unsigned int sc_fpscr;
3112 unsigned int sc_fpul;
3113 unsigned int sc_ownedfp;
3114 };
3115
3116 struct target_sigframe
3117 {
3118 struct target_sigcontext sc;
3119 target_ulong extramask[TARGET_NSIG_WORDS-1];
3120 uint16_t retcode[3];
3121 };
3122
3123
3124 struct target_ucontext {
3125 target_ulong tuc_flags;
3126 struct target_ucontext *tuc_link;
3127 target_stack_t tuc_stack;
3128 struct target_sigcontext tuc_mcontext;
3129 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3130 };
3131
3132 struct target_rt_sigframe
3133 {
3134 struct target_siginfo info;
3135 struct target_ucontext uc;
3136 uint16_t retcode[3];
3137 };
3138
3139
3140 #define MOVW(n) (0x9300|((n)-2)) /* Move mem word at PC+n to R3 */
3141 #define TRAP_NOARG 0xc310 /* Syscall w/no args (NR in R3) SH3/4 */
3142
3143 static abi_ulong get_sigframe(struct target_sigaction *ka,
3144 unsigned long sp, size_t frame_size)
3145 {
3146 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags(sp) == 0)) {
3147 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3148 }
3149
3150 return (sp - frame_size) & -8ul;
3151 }
3152
3153 static void setup_sigcontext(struct target_sigcontext *sc,
3154 CPUSH4State *regs, unsigned long mask)
3155 {
3156 int i;
3157
3158 #define COPY(x) __put_user(regs->x, &sc->sc_##x)
3159 COPY(gregs[0]); COPY(gregs[1]);
3160 COPY(gregs[2]); COPY(gregs[3]);
3161 COPY(gregs[4]); COPY(gregs[5]);
3162 COPY(gregs[6]); COPY(gregs[7]);
3163 COPY(gregs[8]); COPY(gregs[9]);
3164 COPY(gregs[10]); COPY(gregs[11]);
3165 COPY(gregs[12]); COPY(gregs[13]);
3166 COPY(gregs[14]); COPY(gregs[15]);
3167 COPY(gbr); COPY(mach);
3168 COPY(macl); COPY(pr);
3169 COPY(sr); COPY(pc);
3170 #undef COPY
3171
3172 for (i=0; i<16; i++) {
3173 __put_user(regs->fregs[i], &sc->sc_fpregs[i]);
3174 }
3175 __put_user(regs->fpscr, &sc->sc_fpscr);
3176 __put_user(regs->fpul, &sc->sc_fpul);
3177
3178 /* non-iBCS2 extensions.. */
3179 __put_user(mask, &sc->oldmask);
3180 }
3181
3182 static void restore_sigcontext(CPUSH4State *regs, struct target_sigcontext *sc,
3183 target_ulong *r0_p)
3184 {
3185 int i;
3186
3187 #define COPY(x) __get_user(regs->x, &sc->sc_##x)
3188 COPY(gregs[1]);
3189 COPY(gregs[2]); COPY(gregs[3]);
3190 COPY(gregs[4]); COPY(gregs[5]);
3191 COPY(gregs[6]); COPY(gregs[7]);
3192 COPY(gregs[8]); COPY(gregs[9]);
3193 COPY(gregs[10]); COPY(gregs[11]);
3194 COPY(gregs[12]); COPY(gregs[13]);
3195 COPY(gregs[14]); COPY(gregs[15]);
3196 COPY(gbr); COPY(mach);
3197 COPY(macl); COPY(pr);
3198 COPY(sr); COPY(pc);
3199 #undef COPY
3200
3201 for (i=0; i<16; i++) {
3202 __get_user(regs->fregs[i], &sc->sc_fpregs[i]);
3203 }
3204 __get_user(regs->fpscr, &sc->sc_fpscr);
3205 __get_user(regs->fpul, &sc->sc_fpul);
3206
3207 regs->tra = -1; /* disable syscall checks */
3208 __get_user(*r0_p, &sc->sc_gregs[0]);
3209 }
3210
3211 static void setup_frame(int sig, struct target_sigaction *ka,
3212 target_sigset_t *set, CPUSH4State *regs)
3213 {
3214 struct target_sigframe *frame;
3215 abi_ulong frame_addr;
3216 int i;
3217 int err = 0;
3218
3219 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3220 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3221 goto give_sigsegv;
3222
3223 setup_sigcontext(&frame->sc, regs, set->sig[0]);
3224
3225 for (i = 0; i < TARGET_NSIG_WORDS - 1; i++) {
3226 __put_user(set->sig[i + 1], &frame->extramask[i]);
3227 }
3228
3229 /* Set up to return from userspace. If provided, use a stub
3230 already in userspace. */
3231 if (ka->sa_flags & TARGET_SA_RESTORER) {
3232 regs->pr = (unsigned long) ka->sa_restorer;
3233 } else {
3234 /* Generate return code (system call to sigreturn) */
3235 __put_user(MOVW(2), &frame->retcode[0]);
3236 __put_user(TRAP_NOARG, &frame->retcode[1]);
3237 __put_user((TARGET_NR_sigreturn), &frame->retcode[2]);
3238 regs->pr = (unsigned long) frame->retcode;
3239 }
3240
3241 if (err)
3242 goto give_sigsegv;
3243
3244 /* Set up registers for signal handler */
3245 regs->gregs[15] = frame_addr;
3246 regs->gregs[4] = sig; /* Arg for signal handler */
3247 regs->gregs[5] = 0;
3248 regs->gregs[6] = frame_addr += offsetof(typeof(*frame), sc);
3249 regs->pc = (unsigned long) ka->_sa_handler;
3250
3251 unlock_user_struct(frame, frame_addr, 1);
3252 return;
3253
3254 give_sigsegv:
3255 unlock_user_struct(frame, frame_addr, 1);
3256 force_sig(TARGET_SIGSEGV);
3257 }
3258
3259 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3260 target_siginfo_t *info,
3261 target_sigset_t *set, CPUSH4State *regs)
3262 {
3263 struct target_rt_sigframe *frame;
3264 abi_ulong frame_addr;
3265 int i;
3266 int err = 0;
3267
3268 frame_addr = get_sigframe(ka, regs->gregs[15], sizeof(*frame));
3269 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3270 goto give_sigsegv;
3271
3272 tswap_siginfo(&frame->info, info);
3273
3274 /* Create the ucontext. */
3275 __put_user(0, &frame->uc.tuc_flags);
3276 __put_user(0, (unsigned long *)&frame->uc.tuc_link);
3277 __put_user((unsigned long)target_sigaltstack_used.ss_sp,
3278 &frame->uc.tuc_stack.ss_sp);
3279 __put_user(sas_ss_flags(regs->gregs[15]),
3280 &frame->uc.tuc_stack.ss_flags);
3281 __put_user(target_sigaltstack_used.ss_size,
3282 &frame->uc.tuc_stack.ss_size);
3283 setup_sigcontext(&frame->uc.tuc_mcontext,
3284 regs, set->sig[0]);
3285 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
3286 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
3287 }
3288
3289 /* Set up to return from userspace. If provided, use a stub
3290 already in userspace. */
3291 if (ka->sa_flags & TARGET_SA_RESTORER) {
3292 regs->pr = (unsigned long) ka->sa_restorer;
3293 } else {
3294 /* Generate return code (system call to sigreturn) */
3295 __put_user(MOVW(2), &frame->retcode[0]);
3296 __put_user(TRAP_NOARG, &frame->retcode[1]);
3297 __put_user((TARGET_NR_rt_sigreturn), &frame->retcode[2]);
3298 regs->pr = (unsigned long) frame->retcode;
3299 }
3300
3301 if (err)
3302 goto give_sigsegv;
3303
3304 /* Set up registers for signal handler */
3305 regs->gregs[15] = frame_addr;
3306 regs->gregs[4] = sig; /* Arg for signal handler */
3307 regs->gregs[5] = frame_addr + offsetof(typeof(*frame), info);
3308 regs->gregs[6] = frame_addr + offsetof(typeof(*frame), uc);
3309 regs->pc = (unsigned long) ka->_sa_handler;
3310
3311 unlock_user_struct(frame, frame_addr, 1);
3312 return;
3313
3314 give_sigsegv:
3315 unlock_user_struct(frame, frame_addr, 1);
3316 force_sig(TARGET_SIGSEGV);
3317 }
3318
3319 long do_sigreturn(CPUSH4State *regs)
3320 {
3321 struct target_sigframe *frame;
3322 abi_ulong frame_addr;
3323 sigset_t blocked;
3324 target_sigset_t target_set;
3325 target_ulong r0;
3326 int i;
3327 int err = 0;
3328
3329 #if defined(DEBUG_SIGNAL)
3330 fprintf(stderr, "do_sigreturn\n");
3331 #endif
3332 frame_addr = regs->gregs[15];
3333 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3334 goto badframe;
3335
3336 __get_user(target_set.sig[0], &frame->sc.oldmask);
3337 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3338 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3339 }
3340
3341 if (err)
3342 goto badframe;
3343
3344 target_to_host_sigset_internal(&blocked, &target_set);
3345 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
3346
3347 restore_sigcontext(regs, &frame->sc, &r0);
3348
3349 unlock_user_struct(frame, frame_addr, 0);
3350 return r0;
3351
3352 badframe:
3353 unlock_user_struct(frame, frame_addr, 0);
3354 force_sig(TARGET_SIGSEGV);
3355 return 0;
3356 }
3357
3358 long do_rt_sigreturn(CPUSH4State *regs)
3359 {
3360 struct target_rt_sigframe *frame;
3361 abi_ulong frame_addr;
3362 sigset_t blocked;
3363 target_ulong r0;
3364
3365 #if defined(DEBUG_SIGNAL)
3366 fprintf(stderr, "do_rt_sigreturn\n");
3367 #endif
3368 frame_addr = regs->gregs[15];
3369 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
3370 goto badframe;
3371
3372 target_to_host_sigset(&blocked, &frame->uc.tuc_sigmask);
3373 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
3374
3375 restore_sigcontext(regs, &frame->uc.tuc_mcontext, &r0);
3376
3377 if (do_sigaltstack(frame_addr +
3378 offsetof(struct target_rt_sigframe, uc.tuc_stack),
3379 0, get_sp_from_cpustate(regs)) == -EFAULT)
3380 goto badframe;
3381
3382 unlock_user_struct(frame, frame_addr, 0);
3383 return r0;
3384
3385 badframe:
3386 unlock_user_struct(frame, frame_addr, 0);
3387 force_sig(TARGET_SIGSEGV);
3388 return 0;
3389 }
3390 #elif defined(TARGET_MICROBLAZE)
3391
3392 struct target_sigcontext {
3393 struct target_pt_regs regs; /* needs to be first */
3394 uint32_t oldmask;
3395 };
3396
3397 struct target_stack_t {
3398 abi_ulong ss_sp;
3399 int ss_flags;
3400 unsigned int ss_size;
3401 };
3402
3403 struct target_ucontext {
3404 abi_ulong tuc_flags;
3405 abi_ulong tuc_link;
3406 struct target_stack_t tuc_stack;
3407 struct target_sigcontext tuc_mcontext;
3408 uint32_t tuc_extramask[TARGET_NSIG_WORDS - 1];
3409 };
3410
3411 /* Signal frames. */
3412 struct target_signal_frame {
3413 struct target_ucontext uc;
3414 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3415 uint32_t tramp[2];
3416 };
3417
3418 struct rt_signal_frame {
3419 siginfo_t info;
3420 struct ucontext uc;
3421 uint32_t tramp[2];
3422 };
3423
3424 static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3425 {
3426 __put_user(env->regs[0], &sc->regs.r0);
3427 __put_user(env->regs[1], &sc->regs.r1);
3428 __put_user(env->regs[2], &sc->regs.r2);
3429 __put_user(env->regs[3], &sc->regs.r3);
3430 __put_user(env->regs[4], &sc->regs.r4);
3431 __put_user(env->regs[5], &sc->regs.r5);
3432 __put_user(env->regs[6], &sc->regs.r6);
3433 __put_user(env->regs[7], &sc->regs.r7);
3434 __put_user(env->regs[8], &sc->regs.r8);
3435 __put_user(env->regs[9], &sc->regs.r9);
3436 __put_user(env->regs[10], &sc->regs.r10);
3437 __put_user(env->regs[11], &sc->regs.r11);
3438 __put_user(env->regs[12], &sc->regs.r12);
3439 __put_user(env->regs[13], &sc->regs.r13);
3440 __put_user(env->regs[14], &sc->regs.r14);
3441 __put_user(env->regs[15], &sc->regs.r15);
3442 __put_user(env->regs[16], &sc->regs.r16);
3443 __put_user(env->regs[17], &sc->regs.r17);
3444 __put_user(env->regs[18], &sc->regs.r18);
3445 __put_user(env->regs[19], &sc->regs.r19);
3446 __put_user(env->regs[20], &sc->regs.r20);
3447 __put_user(env->regs[21], &sc->regs.r21);
3448 __put_user(env->regs[22], &sc->regs.r22);
3449 __put_user(env->regs[23], &sc->regs.r23);
3450 __put_user(env->regs[24], &sc->regs.r24);
3451 __put_user(env->regs[25], &sc->regs.r25);
3452 __put_user(env->regs[26], &sc->regs.r26);
3453 __put_user(env->regs[27], &sc->regs.r27);
3454 __put_user(env->regs[28], &sc->regs.r28);
3455 __put_user(env->regs[29], &sc->regs.r29);
3456 __put_user(env->regs[30], &sc->regs.r30);
3457 __put_user(env->regs[31], &sc->regs.r31);
3458 __put_user(env->sregs[SR_PC], &sc->regs.pc);
3459 }
3460
3461 static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env)
3462 {
3463 __get_user(env->regs[0], &sc->regs.r0);
3464 __get_user(env->regs[1], &sc->regs.r1);
3465 __get_user(env->regs[2], &sc->regs.r2);
3466 __get_user(env->regs[3], &sc->regs.r3);
3467 __get_user(env->regs[4], &sc->regs.r4);
3468 __get_user(env->regs[5], &sc->regs.r5);
3469 __get_user(env->regs[6], &sc->regs.r6);
3470 __get_user(env->regs[7], &sc->regs.r7);
3471 __get_user(env->regs[8], &sc->regs.r8);
3472 __get_user(env->regs[9], &sc->regs.r9);
3473 __get_user(env->regs[10], &sc->regs.r10);
3474 __get_user(env->regs[11], &sc->regs.r11);
3475 __get_user(env->regs[12], &sc->regs.r12);
3476 __get_user(env->regs[13], &sc->regs.r13);
3477 __get_user(env->regs[14], &sc->regs.r14);
3478 __get_user(env->regs[15], &sc->regs.r15);
3479 __get_user(env->regs[16], &sc->regs.r16);
3480 __get_user(env->regs[17], &sc->regs.r17);
3481 __get_user(env->regs[18], &sc->regs.r18);
3482 __get_user(env->regs[19], &sc->regs.r19);
3483 __get_user(env->regs[20], &sc->regs.r20);
3484 __get_user(env->regs[21], &sc->regs.r21);
3485 __get_user(env->regs[22], &sc->regs.r22);
3486 __get_user(env->regs[23], &sc->regs.r23);
3487 __get_user(env->regs[24], &sc->regs.r24);
3488 __get_user(env->regs[25], &sc->regs.r25);
3489 __get_user(env->regs[26], &sc->regs.r26);
3490 __get_user(env->regs[27], &sc->regs.r27);
3491 __get_user(env->regs[28], &sc->regs.r28);
3492 __get_user(env->regs[29], &sc->regs.r29);
3493 __get_user(env->regs[30], &sc->regs.r30);
3494 __get_user(env->regs[31], &sc->regs.r31);
3495 __get_user(env->sregs[SR_PC], &sc->regs.pc);
3496 }
3497
3498 static abi_ulong get_sigframe(struct target_sigaction *ka,
3499 CPUMBState *env, int frame_size)
3500 {
3501 abi_ulong sp = env->regs[1];
3502
3503 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !on_sig_stack(sp)) {
3504 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3505 }
3506
3507 return ((sp - frame_size) & -8UL);
3508 }
3509
3510 static void setup_frame(int sig, struct target_sigaction *ka,
3511 target_sigset_t *set, CPUMBState *env)
3512 {
3513 struct target_signal_frame *frame;
3514 abi_ulong frame_addr;
3515 int i;
3516
3517 frame_addr = get_sigframe(ka, env, sizeof *frame);
3518 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3519 goto badframe;
3520
3521 /* Save the mask. */
3522 __put_user(set->sig[0], &frame->uc.tuc_mcontext.oldmask);
3523
3524 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3525 __put_user(set->sig[i], &frame->extramask[i - 1]);
3526 }
3527
3528 setup_sigcontext(&frame->uc.tuc_mcontext, env);
3529
3530 /* Set up to return from userspace. If provided, use a stub
3531 already in userspace. */
3532 /* minus 8 is offset to cater for "rtsd r15,8" offset */
3533 if (ka->sa_flags & TARGET_SA_RESTORER) {
3534 env->regs[15] = ((unsigned long)ka->sa_restorer)-8;
3535 } else {
3536 uint32_t t;
3537 /* Note, these encodings are _big endian_! */
3538 /* addi r12, r0, __NR_sigreturn */
3539 t = 0x31800000UL | TARGET_NR_sigreturn;
3540 __put_user(t, frame->tramp + 0);
3541 /* brki r14, 0x8 */
3542 t = 0xb9cc0008UL;
3543 __put_user(t, frame->tramp + 1);
3544
3545 /* Return from sighandler will jump to the tramp.
3546 Negative 8 offset because return is rtsd r15, 8 */
3547 env->regs[15] = ((unsigned long)frame->tramp) - 8;
3548 }
3549
3550 /* Set up registers for signal handler */
3551 env->regs[1] = frame_addr;
3552 /* Signal handler args: */
3553 env->regs[5] = sig; /* Arg 0: signum */
3554 env->regs[6] = 0;
3555 /* arg 1: sigcontext */
3556 env->regs[7] = frame_addr += offsetof(typeof(*frame), uc);
3557
3558 /* Offset of 4 to handle microblaze rtid r14, 0 */
3559 env->sregs[SR_PC] = (unsigned long)ka->_sa_handler;
3560
3561 unlock_user_struct(frame, frame_addr, 1);
3562 return;
3563 badframe:
3564 force_sig(TARGET_SIGSEGV);
3565 }
3566
3567 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3568 target_siginfo_t *info,
3569 target_sigset_t *set, CPUMBState *env)
3570 {
3571 fprintf(stderr, "Microblaze setup_rt_frame: not implemented\n");
3572 }
3573
3574 long do_sigreturn(CPUMBState *env)
3575 {
3576 struct target_signal_frame *frame;
3577 abi_ulong frame_addr;
3578 target_sigset_t target_set;
3579 sigset_t set;
3580 int i;
3581
3582 frame_addr = env->regs[R_SP];
3583 /* Make sure the guest isn't playing games. */
3584 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
3585 goto badframe;
3586
3587 /* Restore blocked signals */
3588 __get_user(target_set.sig[0], &frame->uc.tuc_mcontext.oldmask);
3589 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3590 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3591 }
3592 target_to_host_sigset_internal(&set, &target_set);
3593 do_sigprocmask(SIG_SETMASK, &set, NULL);
3594
3595 restore_sigcontext(&frame->uc.tuc_mcontext, env);
3596 /* We got here through a sigreturn syscall, our path back is via an
3597 rtb insn so setup r14 for that. */
3598 env->regs[14] = env->sregs[SR_PC];
3599
3600 unlock_user_struct(frame, frame_addr, 0);
3601 return env->regs[10];
3602 badframe:
3603 force_sig(TARGET_SIGSEGV);
3604 }
3605
3606 long do_rt_sigreturn(CPUMBState *env)
3607 {
3608 fprintf(stderr, "Microblaze do_rt_sigreturn: not implemented\n");
3609 return -TARGET_ENOSYS;
3610 }
3611
3612 #elif defined(TARGET_CRIS)
3613
3614 struct target_sigcontext {
3615 struct target_pt_regs regs; /* needs to be first */
3616 uint32_t oldmask;
3617 uint32_t usp; /* usp before stacking this gunk on it */
3618 };
3619
3620 /* Signal frames. */
3621 struct target_signal_frame {
3622 struct target_sigcontext sc;
3623 uint32_t extramask[TARGET_NSIG_WORDS - 1];
3624 uint16_t retcode[4]; /* Trampoline code. */
3625 };
3626
3627 struct rt_signal_frame {
3628 siginfo_t *pinfo;
3629 void *puc;
3630 siginfo_t info;
3631 struct ucontext uc;
3632 uint16_t retcode[4]; /* Trampoline code. */
3633 };
3634
3635 static void setup_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3636 {
3637 __put_user(env->regs[0], &sc->regs.r0);
3638 __put_user(env->regs[1], &sc->regs.r1);
3639 __put_user(env->regs[2], &sc->regs.r2);
3640 __put_user(env->regs[3], &sc->regs.r3);
3641 __put_user(env->regs[4], &sc->regs.r4);
3642 __put_user(env->regs[5], &sc->regs.r5);
3643 __put_user(env->regs[6], &sc->regs.r6);
3644 __put_user(env->regs[7], &sc->regs.r7);
3645 __put_user(env->regs[8], &sc->regs.r8);
3646 __put_user(env->regs[9], &sc->regs.r9);
3647 __put_user(env->regs[10], &sc->regs.r10);
3648 __put_user(env->regs[11], &sc->regs.r11);
3649 __put_user(env->regs[12], &sc->regs.r12);
3650 __put_user(env->regs[13], &sc->regs.r13);
3651 __put_user(env->regs[14], &sc->usp);
3652 __put_user(env->regs[15], &sc->regs.acr);
3653 __put_user(env->pregs[PR_MOF], &sc->regs.mof);
3654 __put_user(env->pregs[PR_SRP], &sc->regs.srp);
3655 __put_user(env->pc, &sc->regs.erp);
3656 }
3657
3658 static void restore_sigcontext(struct target_sigcontext *sc, CPUCRISState *env)
3659 {
3660 __get_user(env->regs[0], &sc->regs.r0);
3661 __get_user(env->regs[1], &sc->regs.r1);
3662 __get_user(env->regs[2], &sc->regs.r2);
3663 __get_user(env->regs[3], &sc->regs.r3);
3664 __get_user(env->regs[4], &sc->regs.r4);
3665 __get_user(env->regs[5], &sc->regs.r5);
3666 __get_user(env->regs[6], &sc->regs.r6);
3667 __get_user(env->regs[7], &sc->regs.r7);
3668 __get_user(env->regs[8], &sc->regs.r8);
3669 __get_user(env->regs[9], &sc->regs.r9);
3670 __get_user(env->regs[10], &sc->regs.r10);
3671 __get_user(env->regs[11], &sc->regs.r11);
3672 __get_user(env->regs[12], &sc->regs.r12);
3673 __get_user(env->regs[13], &sc->regs.r13);
3674 __get_user(env->regs[14], &sc->usp);
3675 __get_user(env->regs[15], &sc->regs.acr);
3676 __get_user(env->pregs[PR_MOF], &sc->regs.mof);
3677 __get_user(env->pregs[PR_SRP], &sc->regs.srp);
3678 __get_user(env->pc, &sc->regs.erp);
3679 }
3680
3681 static abi_ulong get_sigframe(CPUCRISState *env, int framesize)
3682 {
3683 abi_ulong sp;
3684 /* Align the stack downwards to 4. */
3685 sp = (env->regs[R_SP] & ~3);
3686 return sp - framesize;
3687 }
3688
3689 static void setup_frame(int sig, struct target_sigaction *ka,
3690 target_sigset_t *set, CPUCRISState *env)
3691 {
3692 struct target_signal_frame *frame;
3693 abi_ulong frame_addr;
3694 int i;
3695
3696 frame_addr = get_sigframe(env, sizeof *frame);
3697 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
3698 goto badframe;
3699
3700 /*
3701 * The CRIS signal return trampoline. A real linux/CRIS kernel doesn't
3702 * use this trampoline anymore but it sets it up for GDB.
3703 * In QEMU, using the trampoline simplifies things a bit so we use it.
3704 *
3705 * This is movu.w __NR_sigreturn, r9; break 13;
3706 */
3707 __put_user(0x9c5f, frame->retcode+0);
3708 __put_user(TARGET_NR_sigreturn,
3709 frame->retcode + 1);
3710 __put_user(0xe93d, frame->retcode + 2);
3711
3712 /* Save the mask. */
3713 __put_user(set->sig[0], &frame->sc.oldmask);
3714
3715 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3716 __put_user(set->sig[i], &frame->extramask[i - 1]);
3717 }
3718
3719 setup_sigcontext(&frame->sc, env);
3720
3721 /* Move the stack and setup the arguments for the handler. */
3722 env->regs[R_SP] = frame_addr;
3723 env->regs[10] = sig;
3724 env->pc = (unsigned long) ka->_sa_handler;
3725 /* Link SRP so the guest returns through the trampoline. */
3726 env->pregs[PR_SRP] = frame_addr + offsetof(typeof(*frame), retcode);
3727
3728 unlock_user_struct(frame, frame_addr, 1);
3729 return;
3730 badframe:
3731 force_sig(TARGET_SIGSEGV);
3732 }
3733
3734 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3735 target_siginfo_t *info,
3736 target_sigset_t *set, CPUCRISState *env)
3737 {
3738 fprintf(stderr, "CRIS setup_rt_frame: not implemented\n");
3739 }
3740
3741 long do_sigreturn(CPUCRISState *env)
3742 {
3743 struct target_signal_frame *frame;
3744 abi_ulong frame_addr;
3745 target_sigset_t target_set;
3746 sigset_t set;
3747 int i;
3748
3749 frame_addr = env->regs[R_SP];
3750 /* Make sure the guest isn't playing games. */
3751 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
3752 goto badframe;
3753
3754 /* Restore blocked signals */
3755 __get_user(target_set.sig[0], &frame->sc.oldmask);
3756 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
3757 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
3758 }
3759 target_to_host_sigset_internal(&set, &target_set);
3760 do_sigprocmask(SIG_SETMASK, &set, NULL);
3761
3762 restore_sigcontext(&frame->sc, env);
3763 unlock_user_struct(frame, frame_addr, 0);
3764 return env->regs[10];
3765 badframe:
3766 force_sig(TARGET_SIGSEGV);
3767 }
3768
3769 long do_rt_sigreturn(CPUCRISState *env)
3770 {
3771 fprintf(stderr, "CRIS do_rt_sigreturn: not implemented\n");
3772 return -TARGET_ENOSYS;
3773 }
3774
3775 #elif defined(TARGET_OPENRISC)
3776
3777 struct target_sigcontext {
3778 struct target_pt_regs regs;
3779 abi_ulong oldmask;
3780 abi_ulong usp;
3781 };
3782
3783 struct target_ucontext {
3784 abi_ulong tuc_flags;
3785 abi_ulong tuc_link;
3786 target_stack_t tuc_stack;
3787 struct target_sigcontext tuc_mcontext;
3788 target_sigset_t tuc_sigmask; /* mask last for extensibility */
3789 };
3790
3791 struct target_rt_sigframe {
3792 abi_ulong pinfo;
3793 uint64_t puc;
3794 struct target_siginfo info;
3795 struct target_sigcontext sc;
3796 struct target_ucontext uc;
3797 unsigned char retcode[16]; /* trampoline code */
3798 };
3799
3800 /* This is the asm-generic/ucontext.h version */
3801 #if 0
3802 static int restore_sigcontext(CPUOpenRISCState *regs,
3803 struct target_sigcontext *sc)
3804 {
3805 unsigned int err = 0;
3806 unsigned long old_usp;
3807
3808 /* Alwys make any pending restarted system call return -EINTR */
3809 current_thread_info()->restart_block.fn = do_no_restart_syscall;
3810
3811 /* restore the regs from &sc->regs (same as sc, since regs is first)
3812 * (sc is already checked for VERIFY_READ since the sigframe was
3813 * checked in sys_sigreturn previously)
3814 */
3815
3816 if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
3817 goto badframe;
3818 }
3819
3820 /* make sure the U-flag is set so user-mode cannot fool us */
3821
3822 regs->sr &= ~SR_SM;
3823
3824 /* restore the old USP as it was before we stacked the sc etc.
3825 * (we cannot just pop the sigcontext since we aligned the sp and
3826 * stuff after pushing it)
3827 */
3828
3829 __get_user(old_usp, &sc->usp);
3830 phx_signal("old_usp 0x%lx", old_usp);
3831
3832 __PHX__ REALLY /* ??? */
3833 wrusp(old_usp);
3834 regs->gpr[1] = old_usp;
3835
3836 /* TODO: the other ports use regs->orig_XX to disable syscall checks
3837 * after this completes, but we don't use that mechanism. maybe we can
3838 * use it now ?
3839 */
3840
3841 return err;
3842
3843 badframe:
3844 return 1;
3845 }
3846 #endif
3847
3848 /* Set up a signal frame. */
3849
3850 static void setup_sigcontext(struct target_sigcontext *sc,
3851 CPUOpenRISCState *regs,
3852 unsigned long mask)
3853 {
3854 unsigned long usp = regs->gpr[1];
3855
3856 /* copy the regs. they are first in sc so we can use sc directly */
3857
3858 /*copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
3859
3860 /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
3861 the signal handler. The frametype will be restored to its previous
3862 value in restore_sigcontext. */
3863 /*regs->frametype = CRIS_FRAME_NORMAL;*/
3864
3865 /* then some other stuff */
3866 __put_user(mask, &sc->oldmask);
3867 __put_user(usp, &sc->usp);
3868 }
3869
3870 static inline unsigned long align_sigframe(unsigned long sp)
3871 {
3872 unsigned long i;
3873 i = sp & ~3UL;
3874 return i;
3875 }
3876
3877 static inline abi_ulong get_sigframe(struct target_sigaction *ka,
3878 CPUOpenRISCState *regs,
3879 size_t frame_size)
3880 {
3881 unsigned long sp = regs->gpr[1];
3882 int onsigstack = on_sig_stack(sp);
3883
3884 /* redzone */
3885 /* This is the X/Open sanctioned signal stack switching. */
3886 if ((ka->sa_flags & TARGET_SA_ONSTACK) != 0 && !onsigstack) {
3887 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
3888 }
3889
3890 sp = align_sigframe(sp - frame_size);
3891
3892 /*
3893 * If we are on the alternate signal stack and would overflow it, don't.
3894 * Return an always-bogus address instead so we will die with SIGSEGV.
3895 */
3896
3897 if (onsigstack && !likely(on_sig_stack(sp))) {
3898 return -1L;
3899 }
3900
3901 return sp;
3902 }
3903
3904 static void setup_frame(int sig, struct target_sigaction *ka,
3905 target_sigset_t *set, CPUOpenRISCState *env)
3906 {
3907 qemu_log("Not implement.\n");
3908 }
3909
3910 static void setup_rt_frame(int sig, struct target_sigaction *ka,
3911 target_siginfo_t *info,
3912 target_sigset_t *set, CPUOpenRISCState *env)
3913 {
3914 int err = 0;
3915 abi_ulong frame_addr;
3916 unsigned long return_ip;
3917 struct target_rt_sigframe *frame;
3918 abi_ulong info_addr, uc_addr;
3919
3920 frame_addr = get_sigframe(ka, env, sizeof(*frame));
3921 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
3922 goto give_sigsegv;
3923 }
3924
3925 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
3926 __put_user(info_addr, &frame->pinfo);
3927 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
3928 __put_user(uc_addr, &frame->puc);
3929
3930 if (ka->sa_flags & SA_SIGINFO) {
3931 tswap_siginfo(&frame->info, info);
3932 }
3933
3934 /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/
3935 __put_user(0, &frame->uc.tuc_flags);
3936 __put_user(0, &frame->uc.tuc_link);
3937 __put_user(target_sigaltstack_used.ss_sp,
3938 &frame->uc.tuc_stack.ss_sp);
3939 __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags);
3940 __put_user(target_sigaltstack_used.ss_size,
3941 &frame->uc.tuc_stack.ss_size);
3942 setup_sigcontext(&frame->sc, env, set->sig[0]);
3943
3944 /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
3945
3946 /* trampoline - the desired return ip is the retcode itself */
3947 return_ip = (unsigned long)&frame->retcode;
3948 /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
3949 __put_user(0xa960, (short *)(frame->retcode + 0));
3950 __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
3951 __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
3952 __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
3953
3954 if (err) {
3955 goto give_sigsegv;
3956 }
3957
3958 /* TODO what is the current->exec_domain stuff and invmap ? */
3959
3960 /* Set up registers for signal handler */
3961 env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
3962 env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */
3963 env->gpr[3] = (unsigned long)sig; /* arg 1: signo */
3964 env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */
3965 env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */
3966
3967 /* actually move the usp to reflect the stacked frame */
3968 env->gpr[1] = (unsigned long)frame;
3969
3970 return;
3971
3972 give_sigsegv:
3973 unlock_user_struct(frame, frame_addr, 1);
3974 if (sig == TARGET_SIGSEGV) {
3975 ka->_sa_handler = TARGET_SIG_DFL;
3976 }
3977 force_sig(TARGET_SIGSEGV);
3978 }
3979
3980 long do_sigreturn(CPUOpenRISCState *env)
3981 {
3982
3983 qemu_log("do_sigreturn: not implemented\n");
3984 return -TARGET_ENOSYS;
3985 }
3986
3987 long do_rt_sigreturn(CPUOpenRISCState *env)
3988 {
3989 qemu_log("do_rt_sigreturn: not implemented\n");
3990 return -TARGET_ENOSYS;
3991 }
3992 /* TARGET_OPENRISC */
3993
3994 #elif defined(TARGET_S390X)
3995
3996 #define __NUM_GPRS 16
3997 #define __NUM_FPRS 16
3998 #define __NUM_ACRS 16
3999
4000 #define S390_SYSCALL_SIZE 2
4001 #define __SIGNAL_FRAMESIZE 160 /* FIXME: 31-bit mode -> 96 */
4002
4003 #define _SIGCONTEXT_NSIG 64
4004 #define _SIGCONTEXT_NSIG_BPW 64 /* FIXME: 31-bit mode -> 32 */
4005 #define _SIGCONTEXT_NSIG_WORDS (_SIGCONTEXT_NSIG / _SIGCONTEXT_NSIG_BPW)
4006 #define _SIGMASK_COPY_SIZE (sizeof(unsigned long)*_SIGCONTEXT_NSIG_WORDS)
4007 #define PSW_ADDR_AMODE 0x0000000000000000UL /* 0x80000000UL for 31-bit */
4008 #define S390_SYSCALL_OPCODE ((uint16_t)0x0a00)
4009
4010 typedef struct {
4011 target_psw_t psw;
4012 target_ulong gprs[__NUM_GPRS];
4013 unsigned int acrs[__NUM_ACRS];
4014 } target_s390_regs_common;
4015
4016 typedef struct {
4017 unsigned int fpc;
4018 double fprs[__NUM_FPRS];
4019 } target_s390_fp_regs;
4020
4021 typedef struct {
4022 target_s390_regs_common regs;
4023 target_s390_fp_regs fpregs;
4024 } target_sigregs;
4025
4026 struct target_sigcontext {
4027 target_ulong oldmask[_SIGCONTEXT_NSIG_WORDS];
4028 target_sigregs *sregs;
4029 };
4030
4031 typedef struct {
4032 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4033 struct target_sigcontext sc;
4034 target_sigregs sregs;
4035 int signo;
4036 uint8_t retcode[S390_SYSCALL_SIZE];
4037 } sigframe;
4038
4039 struct target_ucontext {
4040 target_ulong tuc_flags;
4041 struct target_ucontext *tuc_link;
4042 target_stack_t tuc_stack;
4043 target_sigregs tuc_mcontext;
4044 target_sigset_t tuc_sigmask; /* mask last for extensibility */
4045 };
4046
4047 typedef struct {
4048 uint8_t callee_used_stack[__SIGNAL_FRAMESIZE];
4049 uint8_t retcode[S390_SYSCALL_SIZE];
4050 struct target_siginfo info;
4051 struct target_ucontext uc;
4052 } rt_sigframe;
4053
4054 static inline abi_ulong
4055 get_sigframe(struct target_sigaction *ka, CPUS390XState *env, size_t frame_size)
4056 {
4057 abi_ulong sp;
4058
4059 /* Default to using normal stack */
4060 sp = env->regs[15];
4061
4062 /* This is the X/Open sanctioned signal stack switching. */
4063 if (ka->sa_flags & TARGET_SA_ONSTACK) {
4064 if (!sas_ss_flags(sp)) {
4065 sp = target_sigaltstack_used.ss_sp +
4066 target_sigaltstack_used.ss_size;
4067 }
4068 }
4069
4070 /* This is the legacy signal stack switching. */
4071 else if (/* FIXME !user_mode(regs) */ 0 &&
4072 !(ka->sa_flags & TARGET_SA_RESTORER) &&
4073 ka->sa_restorer) {
4074 sp = (abi_ulong) ka->sa_restorer;
4075 }
4076
4077 return (sp - frame_size) & -8ul;
4078 }
4079
4080 static void save_sigregs(CPUS390XState *env, target_sigregs *sregs)
4081 {
4082 int i;
4083 //save_access_regs(current->thread.acrs); FIXME
4084
4085 /* Copy a 'clean' PSW mask to the user to avoid leaking
4086 information about whether PER is currently on. */
4087 __put_user(env->psw.mask, &sregs->regs.psw.mask);
4088 __put_user(env->psw.addr, &sregs->regs.psw.addr);
4089 for (i = 0; i < 16; i++) {
4090 __put_user(env->regs[i], &sregs->regs.gprs[i]);
4091 }
4092 for (i = 0; i < 16; i++) {
4093 __put_user(env->aregs[i], &sregs->regs.acrs[i]);
4094 }
4095 /*
4096 * We have to store the fp registers to current->thread.fp_regs
4097 * to merge them with the emulated registers.
4098 */
4099 //save_fp_regs(&current->thread.fp_regs); FIXME
4100 for (i = 0; i < 16; i++) {
4101 __put_user(get_freg(env, i)->ll, &sregs->fpregs.fprs[i]);
4102 }
4103 }
4104
4105 static void setup_frame(int sig, struct target_sigaction *ka,
4106 target_sigset_t *set, CPUS390XState *env)
4107 {
4108 sigframe *frame;
4109 abi_ulong frame_addr;
4110
4111 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4112 qemu_log("%s: frame_addr 0x%llx\n", __FUNCTION__,
4113 (unsigned long long)frame_addr);
4114 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4115 goto give_sigsegv;
4116 }
4117
4118 qemu_log("%s: 1\n", __FUNCTION__);
4119 __put_user(set->sig[0], &frame->sc.oldmask[0]);
4120
4121 save_sigregs(env, &frame->sregs);
4122
4123 __put_user((abi_ulong)(unsigned long)&frame->sregs,
4124 (abi_ulong *)&frame->sc.sregs);
4125
4126 /* Set up to return from userspace. If provided, use a stub
4127 already in userspace. */
4128 if (ka->sa_flags & TARGET_SA_RESTORER) {
4129 env->regs[14] = (unsigned long)
4130 ka->sa_restorer | PSW_ADDR_AMODE;
4131 } else {
4132 env->regs[14] = (unsigned long)
4133 frame->retcode | PSW_ADDR_AMODE;
4134 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_sigreturn,
4135 (uint16_t *)(frame->retcode));
4136 }
4137
4138 /* Set up backchain. */
4139 __put_user(env->regs[15], (abi_ulong *) frame);
4140
4141 /* Set up registers for signal handler */
4142 env->regs[15] = frame_addr;
4143 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4144
4145 env->regs[2] = sig; //map_signal(sig);
4146 env->regs[3] = frame_addr += offsetof(typeof(*frame), sc);
4147
4148 /* We forgot to include these in the sigcontext.
4149 To avoid breaking binary compatibility, they are passed as args. */
4150 env->regs[4] = 0; // FIXME: no clue... current->thread.trap_no;
4151 env->regs[5] = 0; // FIXME: no clue... current->thread.prot_addr;
4152
4153 /* Place signal number on stack to allow backtrace from handler. */
4154 __put_user(env->regs[2], (int *) &frame->signo);
4155 unlock_user_struct(frame, frame_addr, 1);
4156 return;
4157
4158 give_sigsegv:
4159 qemu_log("%s: give_sigsegv\n", __FUNCTION__);
4160 force_sig(TARGET_SIGSEGV);
4161 }
4162
4163 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4164 target_siginfo_t *info,
4165 target_sigset_t *set, CPUS390XState *env)
4166 {
4167 int i;
4168 rt_sigframe *frame;
4169 abi_ulong frame_addr;
4170
4171 frame_addr = get_sigframe(ka, env, sizeof *frame);
4172 qemu_log("%s: frame_addr 0x%llx\n", __FUNCTION__,
4173 (unsigned long long)frame_addr);
4174 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
4175 goto give_sigsegv;
4176 }
4177
4178 qemu_log("%s: 1\n", __FUNCTION__);
4179 tswap_siginfo(&frame->info, info);
4180
4181 /* Create the ucontext. */
4182 __put_user(0, &frame->uc.tuc_flags);
4183 __put_user((abi_ulong)0, (abi_ulong *)&frame->uc.tuc_link);
4184 __put_user(target_sigaltstack_used.ss_sp, &frame->uc.tuc_stack.ss_sp);
4185 __put_user(sas_ss_flags(get_sp_from_cpustate(env)),
4186 &frame->uc.tuc_stack.ss_flags);
4187 __put_user(target_sigaltstack_used.ss_size, &frame->uc.tuc_stack.ss_size);
4188 save_sigregs(env, &frame->uc.tuc_mcontext);
4189 for (i = 0; i < TARGET_NSIG_WORDS; i++) {
4190 __put_user((abi_ulong)set->sig[i],
4191 (abi_ulong *)&frame->uc.tuc_sigmask.sig[i]);
4192 }
4193
4194 /* Set up to return from userspace. If provided, use a stub
4195 already in userspace. */
4196 if (ka->sa_flags & TARGET_SA_RESTORER) {
4197 env->regs[14] = (unsigned long) ka->sa_restorer | PSW_ADDR_AMODE;
4198 } else {
4199 env->regs[14] = (unsigned long) frame->retcode | PSW_ADDR_AMODE;
4200 __put_user(S390_SYSCALL_OPCODE | TARGET_NR_rt_sigreturn,
4201 (uint16_t *)(frame->retcode));
4202 }
4203
4204 /* Set up backchain. */
4205 __put_user(env->regs[15], (abi_ulong *) frame);
4206
4207 /* Set up registers for signal handler */
4208 env->regs[15] = frame_addr;
4209 env->psw.addr = (target_ulong) ka->_sa_handler | PSW_ADDR_AMODE;
4210
4211 env->regs[2] = sig; //map_signal(sig);
4212 env->regs[3] = frame_addr + offsetof(typeof(*frame), info);
4213 env->regs[4] = frame_addr + offsetof(typeof(*frame), uc);
4214 return;
4215
4216 give_sigsegv:
4217 qemu_log("%s: give_sigsegv\n", __FUNCTION__);
4218 force_sig(TARGET_SIGSEGV);
4219 }
4220
4221 static int
4222 restore_sigregs(CPUS390XState *env, target_sigregs *sc)
4223 {
4224 int err = 0;
4225 int i;
4226
4227 for (i = 0; i < 16; i++) {
4228 __get_user(env->regs[i], &sc->regs.gprs[i]);
4229 }
4230
4231 __get_user(env->psw.mask, &sc->regs.psw.mask);
4232 qemu_log("%s: sc->regs.psw.addr 0x%llx env->psw.addr 0x%llx\n",
4233 __FUNCTION__, (unsigned long long)sc->regs.psw.addr,
4234 (unsigned long long)env->psw.addr);
4235 __get_user(env->psw.addr, &sc->regs.psw.addr);
4236 /* FIXME: 31-bit -> | PSW_ADDR_AMODE */
4237
4238 for (i = 0; i < 16; i++) {
4239 __get_user(env->aregs[i], &sc->regs.acrs[i]);
4240 }
4241 for (i = 0; i < 16; i++) {
4242 __get_user(get_freg(env, i)->ll, &sc->fpregs.fprs[i]);
4243 }
4244
4245 return err;
4246 }
4247
4248 long do_sigreturn(CPUS390XState *env)
4249 {
4250 sigframe *frame;
4251 abi_ulong frame_addr = env->regs[15];
4252 qemu_log("%s: frame_addr 0x%llx\n", __FUNCTION__,
4253 (unsigned long long)frame_addr);
4254 target_sigset_t target_set;
4255 sigset_t set;
4256
4257 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4258 goto badframe;
4259 }
4260 __get_user(target_set.sig[0], &frame->sc.oldmask[0]);
4261
4262 target_to_host_sigset_internal(&set, &target_set);
4263 do_sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */
4264
4265 if (restore_sigregs(env, &frame->sregs)) {
4266 goto badframe;
4267 }
4268
4269 unlock_user_struct(frame, frame_addr, 0);
4270 return env->regs[2];
4271
4272 badframe:
4273 force_sig(TARGET_SIGSEGV);
4274 return 0;
4275 }
4276
4277 long do_rt_sigreturn(CPUS390XState *env)
4278 {
4279 rt_sigframe *frame;
4280 abi_ulong frame_addr = env->regs[15];
4281 qemu_log("%s: frame_addr 0x%llx\n", __FUNCTION__,
4282 (unsigned long long)frame_addr);
4283 sigset_t set;
4284
4285 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
4286 goto badframe;
4287 }
4288 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
4289
4290 do_sigprocmask(SIG_SETMASK, &set, NULL); /* ~_BLOCKABLE? */
4291
4292 if (restore_sigregs(env, &frame->uc.tuc_mcontext)) {
4293 goto badframe;
4294 }
4295
4296 if (do_sigaltstack(frame_addr + offsetof(rt_sigframe, uc.tuc_stack), 0,
4297 get_sp_from_cpustate(env)) == -EFAULT) {
4298 goto badframe;
4299 }
4300 unlock_user_struct(frame, frame_addr, 0);
4301 return env->regs[2];
4302
4303 badframe:
4304 unlock_user_struct(frame, frame_addr, 0);
4305 force_sig(TARGET_SIGSEGV);
4306 return 0;
4307 }
4308
4309 #elif defined(TARGET_PPC)
4310
4311 /* Size of dummy stack frame allocated when calling signal handler.
4312 See arch/powerpc/include/asm/ptrace.h. */
4313 #if defined(TARGET_PPC64)
4314 #define SIGNAL_FRAMESIZE 128
4315 #else
4316 #define SIGNAL_FRAMESIZE 64
4317 #endif
4318
4319 /* See arch/powerpc/include/asm/ucontext.h. Only used for 32-bit PPC;
4320 on 64-bit PPC, sigcontext and mcontext are one and the same. */
4321 struct target_mcontext {
4322 target_ulong mc_gregs[48];
4323 /* Includes fpscr. */
4324 uint64_t mc_fregs[33];
4325 target_ulong mc_pad[2];
4326 /* We need to handle Altivec and SPE at the same time, which no
4327 kernel needs to do. Fortunately, the kernel defines this bit to
4328 be Altivec-register-large all the time, rather than trying to
4329 twiddle it based on the specific platform. */
4330 union {
4331 /* SPE vector registers. One extra for SPEFSCR. */
4332 uint32_t spe[33];
4333 /* Altivec vector registers. The packing of VSCR and VRSAVE
4334 varies depending on whether we're PPC64 or not: PPC64 splits
4335 them apart; PPC32 stuffs them together. */
4336 #if defined(TARGET_PPC64)
4337 #define QEMU_NVRREG 34
4338 #else
4339 #define QEMU_NVRREG 33
4340 #endif
4341 ppc_avr_t altivec[QEMU_NVRREG];
4342 #undef QEMU_NVRREG
4343 } mc_vregs __attribute__((__aligned__(16)));
4344 };
4345
4346 /* See arch/powerpc/include/asm/sigcontext.h. */
4347 struct target_sigcontext {
4348 target_ulong _unused[4];
4349 int32_t signal;
4350 #if defined(TARGET_PPC64)
4351 int32_t pad0;
4352 #endif
4353 target_ulong handler;
4354 target_ulong oldmask;
4355 target_ulong regs; /* struct pt_regs __user * */
4356 #if defined(TARGET_PPC64)
4357 struct target_mcontext mcontext;
4358 #endif
4359 };
4360
4361 /* Indices for target_mcontext.mc_gregs, below.
4362 See arch/powerpc/include/asm/ptrace.h for details. */
4363 enum {
4364 TARGET_PT_R0 = 0,
4365 TARGET_PT_R1 = 1,
4366 TARGET_PT_R2 = 2,
4367 TARGET_PT_R3 = 3,
4368 TARGET_PT_R4 = 4,
4369 TARGET_PT_R5 = 5,
4370 TARGET_PT_R6 = 6,
4371 TARGET_PT_R7 = 7,
4372 TARGET_PT_R8 = 8,
4373 TARGET_PT_R9 = 9,
4374 TARGET_PT_R10 = 10,
4375 TARGET_PT_R11 = 11,
4376 TARGET_PT_R12 = 12,
4377 TARGET_PT_R13 = 13,
4378 TARGET_PT_R14 = 14,
4379 TARGET_PT_R15 = 15,
4380 TARGET_PT_R16 = 16,
4381 TARGET_PT_R17 = 17,
4382 TARGET_PT_R18 = 18,
4383 TARGET_PT_R19 = 19,
4384 TARGET_PT_R20 = 20,
4385 TARGET_PT_R21 = 21,
4386 TARGET_PT_R22 = 22,
4387 TARGET_PT_R23 = 23,
4388 TARGET_PT_R24 = 24,
4389 TARGET_PT_R25 = 25,
4390 TARGET_PT_R26 = 26,
4391 TARGET_PT_R27 = 27,
4392 TARGET_PT_R28 = 28,
4393 TARGET_PT_R29 = 29,
4394 TARGET_PT_R30 = 30,
4395 TARGET_PT_R31 = 31,
4396 TARGET_PT_NIP = 32,
4397 TARGET_PT_MSR = 33,
4398 TARGET_PT_ORIG_R3 = 34,
4399 TARGET_PT_CTR = 35,
4400 TARGET_PT_LNK = 36,
4401 TARGET_PT_XER = 37,
4402 TARGET_PT_CCR = 38,
4403 /* Yes, there are two registers with #39. One is 64-bit only. */
4404 TARGET_PT_MQ = 39,
4405 TARGET_PT_SOFTE = 39,
4406 TARGET_PT_TRAP = 40,
4407 TARGET_PT_DAR = 41,
4408 TARGET_PT_DSISR = 42,
4409 TARGET_PT_RESULT = 43,
4410 TARGET_PT_REGS_COUNT = 44
4411 };
4412
4413
4414 struct target_ucontext {
4415 target_ulong tuc_flags;
4416 target_ulong tuc_link; /* struct ucontext __user * */
4417 struct target_sigaltstack tuc_stack;
4418 #if !defined(TARGET_PPC64)
4419 int32_t tuc_pad[7];
4420 target_ulong tuc_regs; /* struct mcontext __user *
4421 points to uc_mcontext field */
4422 #endif
4423 target_sigset_t tuc_sigmask;
4424 #if defined(TARGET_PPC64)
4425 target_sigset_t unused[15]; /* Allow for uc_sigmask growth */
4426 struct target_sigcontext tuc_sigcontext;
4427 #else
4428 int32_t tuc_maskext[30];
4429 int32_t tuc_pad2[3];
4430 struct target_mcontext tuc_mcontext;
4431 #endif
4432 };
4433
4434 /* See arch/powerpc/kernel/signal_32.c. */
4435 struct target_sigframe {
4436 struct target_sigcontext sctx;
4437 struct target_mcontext mctx;
4438 int32_t abigap[56];
4439 };
4440
4441 #if defined(TARGET_PPC64)
4442
4443 #define TARGET_TRAMP_SIZE 6
4444
4445 struct target_rt_sigframe {
4446 /* sys_rt_sigreturn requires the ucontext be the first field */
4447 struct target_ucontext uc;
4448 target_ulong _unused[2];
4449 uint32_t trampoline[TARGET_TRAMP_SIZE];
4450 target_ulong pinfo; /* struct siginfo __user * */
4451 target_ulong puc; /* void __user * */
4452 struct target_siginfo info;
4453 /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
4454 char abigap[288];
4455 } __attribute__((aligned(16)));
4456
4457 #else
4458
4459 struct target_rt_sigframe {
4460 struct target_siginfo info;
4461 struct target_ucontext uc;
4462 int32_t abigap[56];
4463 };
4464
4465 #endif
4466
4467 #if defined(TARGET_PPC64)
4468
4469 struct target_func_ptr {
4470 target_ulong entry;
4471 target_ulong toc;
4472 };
4473
4474 #endif
4475
4476 /* We use the mc_pad field for the signal return trampoline. */
4477 #define tramp mc_pad
4478
4479 /* See arch/powerpc/kernel/signal.c. */
4480 static target_ulong get_sigframe(struct target_sigaction *ka,
4481 CPUPPCState *env,
4482 int frame_size)
4483 {
4484 target_ulong oldsp, newsp;
4485
4486 oldsp = env->gpr[1];
4487
4488 if ((ka->sa_flags & TARGET_SA_ONSTACK) &&
4489 (sas_ss_flags(oldsp) == 0)) {
4490 oldsp = (target_sigaltstack_used.ss_sp
4491 + target_sigaltstack_used.ss_size);
4492 }
4493
4494 newsp = (oldsp - frame_size) & ~0xFUL;
4495
4496 return newsp;
4497 }
4498
4499 static void save_user_regs(CPUPPCState *env, struct target_mcontext *frame)
4500 {
4501 target_ulong msr = env->msr;
4502 int i;
4503 target_ulong ccr = 0;
4504
4505 /* In general, the kernel attempts to be intelligent about what it
4506 needs to save for Altivec/FP/SPE registers. We don't care that
4507 much, so we just go ahead and save everything. */
4508
4509 /* Save general registers. */
4510 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4511 __put_user(env->gpr[i], &frame->mc_gregs[i]);
4512 }
4513 __put_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4514 __put_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4515 __put_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4516 __put_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4517
4518 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4519 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
4520 }
4521 __put_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4522
4523 /* Save Altivec registers if necessary. */
4524 if (env->insns_flags & PPC_ALTIVEC) {
4525 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4526 ppc_avr_t *avr = &env->avr[i];
4527 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4528
4529 __put_user(avr->u64[0], &vreg->u64[0]);
4530 __put_user(avr->u64[1], &vreg->u64[1]);
4531 }
4532 /* Set MSR_VR in the saved MSR value to indicate that
4533 frame->mc_vregs contains valid data. */
4534 msr |= MSR_VR;
4535 __put_user((uint32_t)env->spr[SPR_VRSAVE],
4536 &frame->mc_vregs.altivec[32].u32[3]);
4537 }
4538
4539 /* Save floating point registers. */
4540 if (env->insns_flags & PPC_FLOAT) {
4541 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4542 __put_user(env->fpr[i], &frame->mc_fregs[i]);
4543 }
4544 __put_user((uint64_t) env->fpscr, &frame->mc_fregs[32]);
4545 }
4546
4547 /* Save SPE registers. The kernel only saves the high half. */
4548 if (env->insns_flags & PPC_SPE) {
4549 #if defined(TARGET_PPC64)
4550 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4551 __put_user(env->gpr[i] >> 32, &frame->mc_vregs.spe[i]);
4552 }
4553 #else
4554 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4555 __put_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4556 }
4557 #endif
4558 /* Set MSR_SPE in the saved MSR value to indicate that
4559 frame->mc_vregs contains valid data. */
4560 msr |= MSR_SPE;
4561 __put_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4562 }
4563
4564 /* Store MSR. */
4565 __put_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4566 }
4567
4568 static void encode_trampoline(int sigret, uint32_t *tramp)
4569 {
4570 /* Set up the sigreturn trampoline: li r0,sigret; sc. */
4571 if (sigret) {
4572 __put_user(0x38000000 | sigret, &tramp[0]);
4573 __put_user(0x44000002, &tramp[1]);
4574 }
4575 }
4576
4577 static void restore_user_regs(CPUPPCState *env,
4578 struct target_mcontext *frame, int sig)
4579 {
4580 target_ulong save_r2 = 0;
4581 target_ulong msr;
4582 target_ulong ccr;
4583
4584 int i;
4585
4586 if (!sig) {
4587 save_r2 = env->gpr[2];
4588 }
4589
4590 /* Restore general registers. */
4591 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4592 __get_user(env->gpr[i], &frame->mc_gregs[i]);
4593 }
4594 __get_user(env->nip, &frame->mc_gregs[TARGET_PT_NIP]);
4595 __get_user(env->ctr, &frame->mc_gregs[TARGET_PT_CTR]);
4596 __get_user(env->lr, &frame->mc_gregs[TARGET_PT_LNK]);
4597 __get_user(env->xer, &frame->mc_gregs[TARGET_PT_XER]);
4598 __get_user(ccr, &frame->mc_gregs[TARGET_PT_CCR]);
4599
4600 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
4601 env->crf[i] = (ccr >> (32 - ((i + 1) * 4))) & 0xf;
4602 }
4603
4604 if (!sig) {
4605 env->gpr[2] = save_r2;
4606 }
4607 /* Restore MSR. */
4608 __get_user(msr, &frame->mc_gregs[TARGET_PT_MSR]);
4609
4610 /* If doing signal return, restore the previous little-endian mode. */
4611 if (sig)
4612 env->msr = (env->msr & ~MSR_LE) | (msr & MSR_LE);
4613
4614 /* Restore Altivec registers if necessary. */
4615 if (env->insns_flags & PPC_ALTIVEC) {
4616 for (i = 0; i < ARRAY_SIZE(env->avr); i++) {
4617 ppc_avr_t *avr = &env->avr[i];
4618 ppc_avr_t *vreg = &frame->mc_vregs.altivec[i];
4619
4620 __get_user(avr->u64[0], &vreg->u64[0]);
4621 __get_user(avr->u64[1], &vreg->u64[1]);
4622 }
4623 /* Set MSR_VEC in the saved MSR value to indicate that
4624 frame->mc_vregs contains valid data. */
4625 __get_user(env->spr[SPR_VRSAVE],
4626 (target_ulong *)(&frame->mc_vregs.altivec[32].u32[3]));
4627 }
4628
4629 /* Restore floating point registers. */
4630 if (env->insns_flags & PPC_FLOAT) {
4631 uint64_t fpscr;
4632 for (i = 0; i < ARRAY_SIZE(env->fpr); i++) {
4633 __get_user(env->fpr[i], &frame->mc_fregs[i]);
4634 }
4635 __get_user(fpscr, &frame->mc_fregs[32]);
4636 env->fpscr = (uint32_t) fpscr;
4637 }
4638
4639 /* Save SPE registers. The kernel only saves the high half. */
4640 if (env->insns_flags & PPC_SPE) {
4641 #if defined(TARGET_PPC64)
4642 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
4643 uint32_t hi;
4644
4645 __get_user(hi, &frame->mc_vregs.spe[i]);
4646 env->gpr[i] = ((uint64_t)hi << 32) | ((uint32_t) env->gpr[i]);
4647 }
4648 #else
4649 for (i = 0; i < ARRAY_SIZE(env->gprh); i++) {
4650 __get_user(env->gprh[i], &frame->mc_vregs.spe[i]);
4651 }
4652 #endif
4653 __get_user(env->spe_fscr, &frame->mc_vregs.spe[32]);
4654 }
4655 }
4656
4657 static void setup_frame(int sig, struct target_sigaction *ka,
4658 target_sigset_t *set, CPUPPCState *env)
4659 {
4660 struct target_sigframe *frame;
4661 struct target_sigcontext *sc;
4662 target_ulong frame_addr, newsp;
4663 int err = 0;
4664 #if defined(TARGET_PPC64)
4665 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4666 #endif
4667
4668 frame_addr = get_sigframe(ka, env, sizeof(*frame));
4669 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
4670 goto sigsegv;
4671 sc = &frame->sctx;
4672
4673 __put_user(ka->_sa_handler, &sc->handler);
4674 __put_user(set->sig[0], &sc->oldmask);
4675 #if TARGET_ABI_BITS == 64
4676 __put_user(set->sig[0] >> 32, &sc->_unused[3]);
4677 #else
4678 __put_user(set->sig[1], &sc->_unused[3]);
4679 #endif
4680 __put_user(h2g(&frame->mctx), &sc->regs);
4681 __put_user(sig, &sc->signal);
4682
4683 /* Save user regs. */
4684 save_user_regs(env, &frame->mctx);
4685
4686 /* Construct the trampoline code on the stack. */
4687 encode_trampoline(TARGET_NR_sigreturn, (uint32_t *)&frame->mctx.tramp);
4688
4689 /* The kernel checks for the presence of a VDSO here. We don't
4690 emulate a vdso, so use a sigreturn system call. */
4691 env->lr = (target_ulong) h2g(frame->mctx.tramp);
4692
4693 /* Turn off all fp exceptions. */
4694 env->fpscr = 0;
4695
4696 /* Create a stack frame for the caller of the handler. */
4697 newsp = frame_addr - SIGNAL_FRAMESIZE;
4698 err |= put_user(env->gpr[1], newsp, target_ulong);
4699
4700 if (err)
4701 goto sigsegv;
4702
4703 /* Set up registers for signal handler. */
4704 env->gpr[1] = newsp;
4705 env->gpr[3] = sig;
4706 env->gpr[4] = frame_addr + offsetof(struct target_sigframe, sctx);
4707
4708 #if defined(TARGET_PPC64)
4709 if (get_ppc64_abi(image) < 2) {
4710 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4711 struct target_func_ptr *handler =
4712 (struct target_func_ptr *)g2h(ka->_sa_handler);
4713 env->nip = tswapl(handler->entry);
4714 env->gpr[2] = tswapl(handler->toc);
4715 } else {
4716 /* ELFv2 PPC64 function pointers are entry points, but R12
4717 * must also be set */
4718 env->nip = tswapl((target_ulong) ka->_sa_handler);
4719 env->gpr[12] = env->nip;
4720 }
4721 #else
4722 env->nip = (target_ulong) ka->_sa_handler;
4723 #endif
4724
4725 /* Signal handlers are entered in big-endian mode. */
4726 env->msr &= ~MSR_LE;
4727
4728 unlock_user_struct(frame, frame_addr, 1);
4729 return;
4730
4731 sigsegv:
4732 unlock_user_struct(frame, frame_addr, 1);
4733 qemu_log("segfaulting from setup_frame\n");
4734 force_sig(TARGET_SIGSEGV);
4735 }
4736
4737 static void setup_rt_frame(int sig, struct target_sigaction *ka,
4738 target_siginfo_t *info,
4739 target_sigset_t *set, CPUPPCState *env)
4740 {
4741 struct target_rt_sigframe *rt_sf;
4742 uint32_t *trampptr = 0;
4743 struct target_mcontext *mctx = 0;
4744 target_ulong rt_sf_addr, newsp = 0;
4745 int i, err = 0;
4746 #if defined(TARGET_PPC64)
4747 struct image_info *image = ((TaskState *)thread_cpu->opaque)->info;
4748 #endif
4749
4750 rt_sf_addr = get_sigframe(ka, env, sizeof(*rt_sf));
4751 if (!lock_user_struct(VERIFY_WRITE, rt_sf, rt_sf_addr, 1))
4752 goto sigsegv;
4753
4754 tswap_siginfo(&rt_sf->info, info);
4755
4756 __put_user(0, &rt_sf->uc.tuc_flags);
4757 __put_user(0, &rt_sf->uc.tuc_link);
4758 __put_user((target_ulong)target_sigaltstack_used.ss_sp,
4759 &rt_sf->uc.tuc_stack.ss_sp);
4760 __put_user(sas_ss_flags(env->gpr[1]),
4761 &rt_sf->uc.tuc_stack.ss_flags);
4762 __put_user(target_sigaltstack_used.ss_size,
4763 &rt_sf->uc.tuc_stack.ss_size);
4764 #if !defined(TARGET_PPC64)
4765 __put_user(h2g (&rt_sf->uc.tuc_mcontext),
4766 &rt_sf->uc.tuc_regs);
4767 #endif
4768 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
4769 __put_user(set->sig[i], &rt_sf->uc.tuc_sigmask.sig[i]);
4770 }
4771
4772 #if defined(TARGET_PPC64)
4773 mctx = &rt_sf->uc.tuc_sigcontext.mcontext;
4774 trampptr = &rt_sf->trampoline[0];
4775 #else
4776 mctx = &rt_sf->uc.tuc_mcontext;
4777 trampptr = (uint32_t *)&rt_sf->uc.tuc_mcontext.tramp;
4778 #endif
4779
4780 save_user_regs(env, mctx);
4781 encode_trampoline(TARGET_NR_rt_sigreturn, trampptr);
4782
4783 /* The kernel checks for the presence of a VDSO here. We don't
4784 emulate a vdso, so use a sigreturn system call. */
4785 env->lr = (target_ulong) h2g(trampptr);
4786
4787 /* Turn off all fp exceptions. */
4788 env->fpscr = 0;
4789
4790 /* Create a stack frame for the caller of the handler. */
4791 newsp = rt_sf_addr - (SIGNAL_FRAMESIZE + 16);
4792 err |= put_user(env->gpr[1], newsp, target_ulong);
4793
4794 if (err)
4795 goto sigsegv;
4796
4797 /* Set up registers for signal handler. */
4798 env->gpr[1] = newsp;
4799 env->gpr[3] = (target_ulong) sig;
4800 env->gpr[4] = (target_ulong) h2g(&rt_sf->info);
4801 env->gpr[5] = (target_ulong) h2g(&rt_sf->uc);
4802 env->gpr[6] = (target_ulong) h2g(rt_sf);
4803
4804 #if defined(TARGET_PPC64)
4805 if (get_ppc64_abi(image) < 2) {
4806 /* ELFv1 PPC64 function pointers are pointers to OPD entries. */
4807 struct target_func_ptr *handler =
4808 (struct target_func_ptr *)g2h(ka->_sa_handler);
4809 env->nip = tswapl(handler->entry);
4810 env->gpr[2] = tswapl(handler->toc);
4811 } else {
4812 /* ELFv2 PPC64 function pointers are entry points, but R12
4813 * must also be set */
4814 env->nip = tswapl((target_ulong) ka->_sa_handler);
4815 env->gpr[12] = env->nip;
4816 }
4817 #else
4818 env->nip = (target_ulong) ka->_sa_handler;
4819 #endif
4820
4821 /* Signal handlers are entered in big-endian mode. */
4822 env->msr &= ~MSR_LE;
4823
4824 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4825 return;
4826
4827 sigsegv:
4828 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4829 qemu_log("segfaulting from setup_rt_frame\n");
4830 force_sig(TARGET_SIGSEGV);
4831
4832 }
4833
4834 long do_sigreturn(CPUPPCState *env)
4835 {
4836 struct target_sigcontext *sc = NULL;
4837 struct target_mcontext *sr = NULL;
4838 target_ulong sr_addr = 0, sc_addr;
4839 sigset_t blocked;
4840 target_sigset_t set;
4841
4842 sc_addr = env->gpr[1] + SIGNAL_FRAMESIZE;
4843 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1))
4844 goto sigsegv;
4845
4846 #if defined(TARGET_PPC64)
4847 set.sig[0] = sc->oldmask + ((uint64_t)(sc->_unused[3]) << 32);
4848 #else
4849 __get_user(set.sig[0], &sc->oldmask);
4850 __get_user(set.sig[1], &sc->_unused[3]);
4851 #endif
4852 target_to_host_sigset_internal(&blocked, &set);
4853 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
4854
4855 __get_user(sr_addr, &sc->regs);
4856 if (!lock_user_struct(VERIFY_READ, sr, sr_addr, 1))
4857 goto sigsegv;
4858 restore_user_regs(env, sr, 1);
4859
4860 unlock_user_struct(sr, sr_addr, 1);
4861 unlock_user_struct(sc, sc_addr, 1);
4862 return -TARGET_QEMU_ESIGRETURN;
4863
4864 sigsegv:
4865 unlock_user_struct(sr, sr_addr, 1);
4866 unlock_user_struct(sc, sc_addr, 1);
4867 qemu_log("segfaulting from do_sigreturn\n");
4868 force_sig(TARGET_SIGSEGV);
4869 return 0;
4870 }
4871
4872 /* See arch/powerpc/kernel/signal_32.c. */
4873 static int do_setcontext(struct target_ucontext *ucp, CPUPPCState *env, int sig)
4874 {
4875 struct target_mcontext *mcp;
4876 target_ulong mcp_addr;
4877 sigset_t blocked;
4878 target_sigset_t set;
4879
4880 if (copy_from_user(&set, h2g(ucp) + offsetof(struct target_ucontext, tuc_sigmask),
4881 sizeof (set)))
4882 return 1;
4883
4884 #if defined(TARGET_PPC64)
4885 mcp_addr = h2g(ucp) +
4886 offsetof(struct target_ucontext, tuc_sigcontext.mcontext);
4887 #else
4888 __get_user(mcp_addr, &ucp->tuc_regs);
4889 #endif
4890
4891 if (!lock_user_struct(VERIFY_READ, mcp, mcp_addr, 1))
4892 return 1;
4893
4894 target_to_host_sigset_internal(&blocked, &set);
4895 do_sigprocmask(SIG_SETMASK, &blocked, NULL);
4896 restore_user_regs(env, mcp, sig);
4897
4898 unlock_user_struct(mcp, mcp_addr, 1);
4899 return 0;
4900 }
4901
4902 long do_rt_sigreturn(CPUPPCState *env)
4903 {
4904 struct target_rt_sigframe *rt_sf = NULL;
4905 target_ulong rt_sf_addr;
4906
4907 rt_sf_addr = env->gpr[1] + SIGNAL_FRAMESIZE + 16;
4908 if (!lock_user_struct(VERIFY_READ, rt_sf, rt_sf_addr, 1))
4909 goto sigsegv;
4910
4911 if (do_setcontext(&rt_sf->uc, env, 1))
4912 goto sigsegv;
4913
4914 do_sigaltstack(rt_sf_addr
4915 + offsetof(struct target_rt_sigframe, uc.tuc_stack),
4916 0, env->gpr[1]);
4917
4918 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4919 return -TARGET_QEMU_ESIGRETURN;
4920
4921 sigsegv:
4922 unlock_user_struct(rt_sf, rt_sf_addr, 1);
4923 qemu_log("segfaulting from do_rt_sigreturn\n");
4924 force_sig(TARGET_SIGSEGV);
4925 return 0;
4926 }
4927
4928 #elif defined(TARGET_M68K)
4929
4930 struct target_sigcontext {
4931 abi_ulong sc_mask;
4932 abi_ulong sc_usp;
4933 abi_ulong sc_d0;
4934 abi_ulong sc_d1;
4935 abi_ulong sc_a0;
4936 abi_ulong sc_a1;
4937 unsigned short sc_sr;
4938 abi_ulong sc_pc;
4939 };
4940
4941 struct target_sigframe
4942 {
4943 abi_ulong pretcode;
4944 int sig;
4945 int code;
4946 abi_ulong psc;
4947 char retcode[8];
4948 abi_ulong extramask[TARGET_NSIG_WORDS-1];
4949 struct target_sigcontext sc;
4950 };
4951
4952 typedef int target_greg_t;
4953 #define TARGET_NGREG 18
4954 typedef target_greg_t target_gregset_t[TARGET_NGREG];
4955
4956 typedef struct target_fpregset {
4957 int f_fpcntl[3];
4958 int f_fpregs[8*3];
4959 } target_fpregset_t;
4960
4961 struct target_mcontext {
4962 int version;
4963 target_gregset_t gregs;
4964 target_fpregset_t fpregs;
4965 };
4966
4967 #define TARGET_MCONTEXT_VERSION 2
4968
4969 struct target_ucontext {
4970 abi_ulong tuc_flags;
4971 abi_ulong tuc_link;
4972 target_stack_t tuc_stack;
4973 struct target_mcontext tuc_mcontext;
4974 abi_long tuc_filler[80];
4975 target_sigset_t tuc_sigmask;
4976 };
4977
4978 struct target_rt_sigframe
4979 {
4980 abi_ulong pretcode;
4981 int sig;
4982 abi_ulong pinfo;
4983 abi_ulong puc;
4984 char retcode[8];
4985 struct target_siginfo info;
4986 struct target_ucontext uc;
4987 };
4988
4989 static void setup_sigcontext(struct target_sigcontext *sc, CPUM68KState *env,
4990 abi_ulong mask)
4991 {
4992 __put_user(mask, &sc->sc_mask);
4993 __put_user(env->aregs[7], &sc->sc_usp);
4994 __put_user(env->dregs[0], &sc->sc_d0);
4995 __put_user(env->dregs[1], &sc->sc_d1);
4996 __put_user(env->aregs[0], &sc->sc_a0);
4997 __put_user(env->aregs[1], &sc->sc_a1);
4998 __put_user(env->sr, &sc->sc_sr);
4999 __put_user(env->pc, &sc->sc_pc);
5000 }
5001
5002 static void
5003 restore_sigcontext(CPUM68KState *env, struct target_sigcontext *sc, int *pd0)
5004 {
5005 int temp;
5006
5007 __get_user(env->aregs[7], &sc->sc_usp);
5008 __get_user(env->dregs[1], &sc->sc_d1);
5009 __get_user(env->aregs[0], &sc->sc_a0);
5010 __get_user(env->aregs[1], &sc->sc_a1);
5011 __get_user(env->pc, &sc->sc_pc);
5012 __get_user(temp, &sc->sc_sr);
5013 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5014
5015 *pd0 = tswapl(sc->sc_d0);
5016 }
5017
5018 /*
5019 * Determine which stack to use..
5020 */
5021 static inline abi_ulong
5022 get_sigframe(struct target_sigaction *ka, CPUM68KState *regs,
5023 size_t frame_size)
5024 {
5025 unsigned long sp;
5026
5027 sp = regs->aregs[7];
5028
5029 /* This is the X/Open sanctioned signal stack switching. */
5030 if ((ka->sa_flags & TARGET_SA_ONSTACK) && (sas_ss_flags (sp) == 0)) {
5031 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5032 }
5033
5034 return ((sp - frame_size) & -8UL);
5035 }
5036
5037 static void setup_frame(int sig, struct target_sigaction *ka,
5038 target_sigset_t *set, CPUM68KState *env)
5039 {
5040 struct target_sigframe *frame;
5041 abi_ulong frame_addr;
5042 abi_ulong retcode_addr;
5043 abi_ulong sc_addr;
5044 int i;
5045
5046 frame_addr = get_sigframe(ka, env, sizeof *frame);
5047 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
5048 goto give_sigsegv;
5049
5050 __put_user(sig, &frame->sig);
5051
5052 sc_addr = frame_addr + offsetof(struct target_sigframe, sc);
5053 __put_user(sc_addr, &frame->psc);
5054
5055 setup_sigcontext(&frame->sc, env, set->sig[0]);
5056
5057 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5058 __put_user(set->sig[i], &frame->extramask[i - 1]);
5059 }
5060
5061 /* Set up to return from userspace. */
5062
5063 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5064 __put_user(retcode_addr, &frame->pretcode);
5065
5066 /* moveq #,d0; trap #0 */
5067
5068 __put_user(0x70004e40 + (TARGET_NR_sigreturn << 16),
5069 (uint32_t *)(frame->retcode));
5070
5071 /* Set up to return from userspace */
5072
5073 env->aregs[7] = frame_addr;
5074 env->pc = ka->_sa_handler;
5075
5076 unlock_user_struct(frame, frame_addr, 1);
5077 return;
5078
5079 give_sigsegv:
5080 force_sig(TARGET_SIGSEGV);
5081 }
5082
5083 static inline int target_rt_setup_ucontext(struct target_ucontext *uc,
5084 CPUM68KState *env)
5085 {
5086 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5087
5088 __put_user(TARGET_MCONTEXT_VERSION, &uc->tuc_mcontext.version);
5089 __put_user(env->dregs[0], &gregs[0]);
5090 __put_user(env->dregs[1], &gregs[1]);
5091 __put_user(env->dregs[2], &gregs[2]);
5092 __put_user(env->dregs[3], &gregs[3]);
5093 __put_user(env->dregs[4], &gregs[4]);
5094 __put_user(env->dregs[5], &gregs[5]);
5095 __put_user(env->dregs[6], &gregs[6]);
5096 __put_user(env->dregs[7], &gregs[7]);
5097 __put_user(env->aregs[0], &gregs[8]);
5098 __put_user(env->aregs[1], &gregs[9]);
5099 __put_user(env->aregs[2], &gregs[10]);
5100 __put_user(env->aregs[3], &gregs[11]);
5101 __put_user(env->aregs[4], &gregs[12]);
5102 __put_user(env->aregs[5], &gregs[13]);
5103 __put_user(env->aregs[6], &gregs[14]);
5104 __put_user(env->aregs[7], &gregs[15]);
5105 __put_user(env->pc, &gregs[16]);
5106 __put_user(env->sr, &gregs[17]);
5107
5108 return 0;
5109 }
5110
5111 static inline int target_rt_restore_ucontext(CPUM68KState *env,
5112 struct target_ucontext *uc,
5113 int *pd0)
5114 {
5115 int temp;
5116 target_greg_t *gregs = uc->tuc_mcontext.gregs;
5117
5118 __get_user(temp, &uc->tuc_mcontext.version);
5119 if (temp != TARGET_MCONTEXT_VERSION)
5120 goto badframe;
5121
5122 /* restore passed registers */
5123 __get_user(env->dregs[0], &gregs[0]);
5124 __get_user(env->dregs[1], &gregs[1]);
5125 __get_user(env->dregs[2], &gregs[2]);
5126 __get_user(env->dregs[3], &gregs[3]);
5127 __get_user(env->dregs[4], &gregs[4]);
5128 __get_user(env->dregs[5], &gregs[5]);
5129 __get_user(env->dregs[6], &gregs[6]);
5130 __get_user(env->dregs[7], &gregs[7]);
5131 __get_user(env->aregs[0], &gregs[8]);
5132 __get_user(env->aregs[1], &gregs[9]);
5133 __get_user(env->aregs[2], &gregs[10]);
5134 __get_user(env->aregs[3], &gregs[11]);
5135 __get_user(env->aregs[4], &gregs[12]);
5136 __get_user(env->aregs[5], &gregs[13]);
5137 __get_user(env->aregs[6], &gregs[14]);
5138 __get_user(env->aregs[7], &gregs[15]);
5139 __get_user(env->pc, &gregs[16]);
5140 __get_user(temp, &gregs[17]);
5141 env->sr = (env->sr & 0xff00) | (temp & 0xff);
5142
5143 *pd0 = env->dregs[0];
5144 return 0;
5145
5146 badframe:
5147 return 1;
5148 }
5149
5150 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5151 target_siginfo_t *info,
5152 target_sigset_t *set, CPUM68KState *env)
5153 {
5154 struct target_rt_sigframe *frame;
5155 abi_ulong frame_addr;
5156 abi_ulong retcode_addr;
5157 abi_ulong info_addr;
5158 abi_ulong uc_addr;
5159 int err = 0;
5160 int i;
5161
5162 frame_addr = get_sigframe(ka, env, sizeof *frame);
5163 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0))
5164 goto give_sigsegv;
5165
5166 __put_user(sig, &frame->sig);
5167
5168 info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
5169 __put_user(info_addr, &frame->pinfo);
5170
5171 uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
5172 __put_user(uc_addr, &frame->puc);
5173
5174 tswap_siginfo(&frame->info, info);
5175
5176 /* Create the ucontext */
5177
5178 __put_user(0, &frame->uc.tuc_flags);
5179 __put_user(0, &frame->uc.tuc_link);
5180 __put_user(target_sigaltstack_used.ss_sp,
5181 &frame->uc.tuc_stack.ss_sp);
5182 __put_user(sas_ss_flags(env->aregs[7]),
5183 &frame->uc.tuc_stack.ss_flags);
5184 __put_user(target_sigaltstack_used.ss_size,
5185 &frame->uc.tuc_stack.ss_size);
5186 err |= target_rt_setup_ucontext(&frame->uc, env);
5187
5188 if (err)
5189 goto give_sigsegv;
5190
5191 for(i = 0; i < TARGET_NSIG_WORDS; i++) {
5192 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5193 }
5194
5195 /* Set up to return from userspace. */
5196
5197 retcode_addr = frame_addr + offsetof(struct target_sigframe, retcode);
5198 __put_user(retcode_addr, &frame->pretcode);
5199
5200 /* moveq #,d0; notb d0; trap #0 */
5201
5202 __put_user(0x70004600 + ((TARGET_NR_rt_sigreturn ^ 0xff) << 16),
5203 (uint32_t *)(frame->retcode + 0));
5204 __put_user(0x4e40, (uint16_t *)(frame->retcode + 4));
5205
5206 if (err)
5207 goto give_sigsegv;
5208
5209 /* Set up to return from userspace */
5210
5211 env->aregs[7] = frame_addr;
5212 env->pc = ka->_sa_handler;
5213
5214 unlock_user_struct(frame, frame_addr, 1);
5215 return;
5216
5217 give_sigsegv:
5218 unlock_user_struct(frame, frame_addr, 1);
5219 force_sig(TARGET_SIGSEGV);
5220 }
5221
5222 long do_sigreturn(CPUM68KState *env)
5223 {
5224 struct target_sigframe *frame;
5225 abi_ulong frame_addr = env->aregs[7] - 4;
5226 target_sigset_t target_set;
5227 sigset_t set;
5228 int d0, i;
5229
5230 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5231 goto badframe;
5232
5233 /* set blocked signals */
5234
5235 __get_user(target_set.sig[0], &frame->sc.sc_mask);
5236
5237 for(i = 1; i < TARGET_NSIG_WORDS; i++) {
5238 __get_user(target_set.sig[i], &frame->extramask[i - 1]);
5239 }
5240
5241 target_to_host_sigset_internal(&set, &target_set);
5242 do_sigprocmask(SIG_SETMASK, &set, NULL);
5243
5244 /* restore registers */
5245
5246 restore_sigcontext(env, &frame->sc, &d0);
5247
5248 unlock_user_struct(frame, frame_addr, 0);
5249 return d0;
5250
5251 badframe:
5252 force_sig(TARGET_SIGSEGV);
5253 return 0;
5254 }
5255
5256 long do_rt_sigreturn(CPUM68KState *env)
5257 {
5258 struct target_rt_sigframe *frame;
5259 abi_ulong frame_addr = env->aregs[7] - 4;
5260 target_sigset_t target_set;
5261 sigset_t set;
5262 int d0;
5263
5264 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1))
5265 goto badframe;
5266
5267 target_to_host_sigset_internal(&set, &target_set);
5268 do_sigprocmask(SIG_SETMASK, &set, NULL);
5269
5270 /* restore registers */
5271
5272 if (target_rt_restore_ucontext(env, &frame->uc, &d0))
5273 goto badframe;
5274
5275 if (do_sigaltstack(frame_addr +
5276 offsetof(struct target_rt_sigframe, uc.tuc_stack),
5277 0, get_sp_from_cpustate(env)) == -EFAULT)
5278 goto badframe;
5279
5280 unlock_user_struct(frame, frame_addr, 0);
5281 return d0;
5282
5283 badframe:
5284 unlock_user_struct(frame, frame_addr, 0);
5285 force_sig(TARGET_SIGSEGV);
5286 return 0;
5287 }
5288
5289 #elif defined(TARGET_ALPHA)
5290
5291 struct target_sigcontext {
5292 abi_long sc_onstack;
5293 abi_long sc_mask;
5294 abi_long sc_pc;
5295 abi_long sc_ps;
5296 abi_long sc_regs[32];
5297 abi_long sc_ownedfp;
5298 abi_long sc_fpregs[32];
5299 abi_ulong sc_fpcr;
5300 abi_ulong sc_fp_control;
5301 abi_ulong sc_reserved1;
5302 abi_ulong sc_reserved2;
5303 abi_ulong sc_ssize;
5304 abi_ulong sc_sbase;
5305 abi_ulong sc_traparg_a0;
5306 abi_ulong sc_traparg_a1;
5307 abi_ulong sc_traparg_a2;
5308 abi_ulong sc_fp_trap_pc;
5309 abi_ulong sc_fp_trigger_sum;
5310 abi_ulong sc_fp_trigger_inst;
5311 };
5312
5313 struct target_ucontext {
5314 abi_ulong tuc_flags;
5315 abi_ulong tuc_link;
5316 abi_ulong tuc_osf_sigmask;
5317 target_stack_t tuc_stack;
5318 struct target_sigcontext tuc_mcontext;
5319 target_sigset_t tuc_sigmask;
5320 };
5321
5322 struct target_sigframe {
5323 struct target_sigcontext sc;
5324 unsigned int retcode[3];
5325 };
5326
5327 struct target_rt_sigframe {
5328 target_siginfo_t info;
5329 struct target_ucontext uc;
5330 unsigned int retcode[3];
5331 };
5332
5333 #define INSN_MOV_R30_R16 0x47fe0410
5334 #define INSN_LDI_R0 0x201f0000
5335 #define INSN_CALLSYS 0x00000083
5336
5337 static void setup_sigcontext(struct target_sigcontext *sc, CPUAlphaState *env,
5338 abi_ulong frame_addr, target_sigset_t *set)
5339 {
5340 int i;
5341
5342 __put_user(on_sig_stack(frame_addr), &sc->sc_onstack);
5343 __put_user(set->sig[0], &sc->sc_mask);
5344 __put_user(env->pc, &sc->sc_pc);
5345 __put_user(8, &sc->sc_ps);
5346
5347 for (i = 0; i < 31; ++i) {
5348 __put_user(env->ir[i], &sc->sc_regs[i]);
5349 }
5350 __put_user(0, &sc->sc_regs[31]);
5351
5352 for (i = 0; i < 31; ++i) {
5353 __put_user(env->fir[i], &sc->sc_fpregs[i]);
5354 }
5355 __put_user(0, &sc->sc_fpregs[31]);
5356 __put_user(cpu_alpha_load_fpcr(env), &sc->sc_fpcr);
5357
5358 __put_user(0, &sc->sc_traparg_a0); /* FIXME */
5359 __put_user(0, &sc->sc_traparg_a1); /* FIXME */
5360 __put_user(0, &sc->sc_traparg_a2); /* FIXME */
5361 }
5362
5363 static void restore_sigcontext(CPUAlphaState *env,
5364 struct target_sigcontext *sc)
5365 {
5366 uint64_t fpcr;
5367 int i;
5368
5369 __get_user(env->pc, &sc->sc_pc);
5370
5371 for (i = 0; i < 31; ++i) {
5372 __get_user(env->ir[i], &sc->sc_regs[i]);
5373 }
5374 for (i = 0; i < 31; ++i) {
5375 __get_user(env->fir[i], &sc->sc_fpregs[i]);
5376 }
5377
5378 __get_user(fpcr, &sc->sc_fpcr);
5379 cpu_alpha_store_fpcr(env, fpcr);
5380 }
5381
5382 static inline abi_ulong get_sigframe(struct target_sigaction *sa,
5383 CPUAlphaState *env,
5384 unsigned long framesize)
5385 {
5386 abi_ulong sp = env->ir[IR_SP];
5387
5388 /* This is the X/Open sanctioned signal stack switching. */
5389 if ((sa->sa_flags & TARGET_SA_ONSTACK) != 0 && !sas_ss_flags(sp)) {
5390 sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
5391 }
5392 return (sp - framesize) & -32;
5393 }
5394
5395 static void setup_frame(int sig, struct target_sigaction *ka,
5396 target_sigset_t *set, CPUAlphaState *env)
5397 {
5398 abi_ulong frame_addr, r26;
5399 struct target_sigframe *frame;
5400 int err = 0;
5401
5402 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5403 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5404 goto give_sigsegv;
5405 }
5406
5407 setup_sigcontext(&frame->sc, env, frame_addr, set);
5408
5409 if (ka->sa_restorer) {
5410 r26 = ka->sa_restorer;
5411 } else {
5412 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5413 __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
5414 &frame->retcode[1]);
5415 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5416 /* imb() */
5417 r26 = frame_addr;
5418 }
5419
5420 unlock_user_struct(frame, frame_addr, 1);
5421
5422 if (err) {
5423 give_sigsegv:
5424 if (sig == TARGET_SIGSEGV) {
5425 ka->_sa_handler = TARGET_SIG_DFL;
5426 }
5427 force_sig(TARGET_SIGSEGV);
5428 }
5429
5430 env->ir[IR_RA] = r26;
5431 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5432 env->ir[IR_A0] = sig;
5433 env->ir[IR_A1] = 0;
5434 env->ir[IR_A2] = frame_addr + offsetof(struct target_sigframe, sc);
5435 env->ir[IR_SP] = frame_addr;
5436 }
5437
5438 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5439 target_siginfo_t *info,
5440 target_sigset_t *set, CPUAlphaState *env)
5441 {
5442 abi_ulong frame_addr, r26;
5443 struct target_rt_sigframe *frame;
5444 int i, err = 0;
5445
5446 frame_addr = get_sigframe(ka, env, sizeof(*frame));
5447 if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
5448 goto give_sigsegv;
5449 }
5450
5451 tswap_siginfo(&frame->info, info);
5452
5453 __put_user(0, &frame->uc.tuc_flags);
5454 __put_user(0, &frame->uc.tuc_link);
5455 __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask);
5456 __put_user(target_sigaltstack_used.ss_sp,
5457 &frame->uc.tuc_stack.ss_sp);
5458 __put_user(sas_ss_flags(env->ir[IR_SP]),
5459 &frame->uc.tuc_stack.ss_flags);
5460 __put_user(target_sigaltstack_used.ss_size,
5461 &frame->uc.tuc_stack.ss_size);
5462 setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set);
5463 for (i = 0; i < TARGET_NSIG_WORDS; ++i) {
5464 __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
5465 }
5466
5467 if (ka->sa_restorer) {
5468 r26 = ka->sa_restorer;
5469 } else {
5470 __put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
5471 __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
5472 &frame->retcode[1]);
5473 __put_user(INSN_CALLSYS, &frame->retcode[2]);
5474 /* imb(); */
5475 r26 = frame_addr;
5476 }
5477
5478 if (err) {
5479 give_sigsegv:
5480 if (sig == TARGET_SIGSEGV) {
5481 ka->_sa_handler = TARGET_SIG_DFL;
5482 }
5483 force_sig(TARGET_SIGSEGV);
5484 }
5485
5486 env->ir[IR_RA] = r26;
5487 env->ir[IR_PV] = env->pc = ka->_sa_handler;
5488 env->ir[IR_A0] = sig;
5489 env->ir[IR_A1] = frame_addr + offsetof(struct target_rt_sigframe, info);
5490 env->ir[IR_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc);
5491 env->ir[IR_SP] = frame_addr;
5492 }
5493
5494 long do_sigreturn(CPUAlphaState *env)
5495 {
5496 struct target_sigcontext *sc;
5497 abi_ulong sc_addr = env->ir[IR_A0];
5498 target_sigset_t target_set;
5499 sigset_t set;
5500
5501 if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) {
5502 goto badframe;
5503 }
5504
5505 target_sigemptyset(&target_set);
5506 __get_user(target_set.sig[0], &sc->sc_mask);
5507
5508 target_to_host_sigset_internal(&set, &target_set);
5509 do_sigprocmask(SIG_SETMASK, &set, NULL);
5510
5511 restore_sigcontext(env, sc);
5512 unlock_user_struct(sc, sc_addr, 0);
5513 return env->ir[IR_V0];
5514
5515 badframe:
5516 force_sig(TARGET_SIGSEGV);
5517 }
5518
5519 long do_rt_sigreturn(CPUAlphaState *env)
5520 {
5521 abi_ulong frame_addr = env->ir[IR_A0];
5522 struct target_rt_sigframe *frame;
5523 sigset_t set;
5524
5525 if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) {
5526 goto badframe;
5527 }
5528 target_to_host_sigset(&set, &frame->uc.tuc_sigmask);
5529 do_sigprocmask(SIG_SETMASK, &set, NULL);
5530
5531 restore_sigcontext(env, &frame->uc.tuc_mcontext);
5532 if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
5533 uc.tuc_stack),
5534 0, env->ir[IR_SP]) == -EFAULT) {
5535 goto badframe;
5536 }
5537
5538 unlock_user_struct(frame, frame_addr, 0);
5539 return env->ir[IR_V0];
5540
5541
5542 badframe:
5543 unlock_user_struct(frame, frame_addr, 0);
5544 force_sig(TARGET_SIGSEGV);
5545 }
5546
5547 #else
5548
5549 static void setup_frame(int sig, struct target_sigaction *ka,
5550 target_sigset_t *set, CPUArchState *env)
5551 {
5552 fprintf(stderr, "setup_frame: not implemented\n");
5553 }
5554
5555 static void setup_rt_frame(int sig, struct target_sigaction *ka,
5556 target_siginfo_t *info,
5557 target_sigset_t *set, CPUArchState *env)
5558 {
5559 fprintf(stderr, "setup_rt_frame: not implemented\n");
5560 }
5561
5562 long do_sigreturn(CPUArchState *env)
5563 {
5564 fprintf(stderr, "do_sigreturn: not implemented\n");
5565 return -TARGET_ENOSYS;
5566 }
5567
5568 long do_rt_sigreturn(CPUArchState *env)
5569 {
5570 fprintf(stderr, "do_rt_sigreturn: not implemented\n");
5571 return -TARGET_ENOSYS;
5572 }
5573
5574 #endif
5575
5576 void process_pending_signals(CPUArchState *cpu_env)
5577 {
5578 CPUState *cpu = ENV_GET_CPU(cpu_env);
5579 int sig;
5580 abi_ulong handler;
5581 sigset_t set, old_set;
5582 target_sigset_t target_old_set;
5583 struct emulated_sigtable *k;
5584 struct target_sigaction *sa;
5585 struct sigqueue *q;
5586 TaskState *ts = cpu->opaque;
5587
5588 if (!ts->signal_pending)
5589 return;
5590
5591 /* FIXME: This is not threadsafe. */
5592 k = ts->sigtab;
5593 for(sig = 1; sig <= TARGET_NSIG; sig++) {
5594 if (k->pending)
5595 goto handle_signal;
5596 k++;
5597 }
5598 /* if no signal is pending, just return */
5599 ts->signal_pending = 0;
5600 return;
5601
5602 handle_signal:
5603 #ifdef DEBUG_SIGNAL
5604 fprintf(stderr, "qemu: process signal %d\n", sig);
5605 #endif
5606 /* dequeue signal */
5607 q = k->first;
5608 k->first = q->next;
5609 if (!k->first)
5610 k->pending = 0;
5611
5612 sig = gdb_handlesig(cpu, sig);
5613 if (!sig) {
5614 sa = NULL;
5615 handler = TARGET_SIG_IGN;
5616 } else {
5617 sa = &sigact_table[sig - 1];
5618 handler = sa->_sa_handler;
5619 }
5620
5621 if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) {
5622 /* Guest has blocked SIGSEGV but we got one anyway. Assume this
5623 * is a forced SIGSEGV (ie one the kernel handles via force_sig_info
5624 * because it got a real MMU fault), and treat as if default handler.
5625 */
5626 handler = TARGET_SIG_DFL;
5627 }
5628
5629 if (handler == TARGET_SIG_DFL) {
5630 /* default handler : ignore some signal. The other are job control or fatal */
5631 if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
5632 kill(getpid(),SIGSTOP);
5633 } else if (sig != TARGET_SIGCHLD &&
5634 sig != TARGET_SIGURG &&
5635 sig != TARGET_SIGWINCH &&
5636 sig != TARGET_SIGCONT) {
5637 force_sig(sig);
5638 }
5639 } else if (handler == TARGET_SIG_IGN) {
5640 /* ignore sig */
5641 } else if (handler == TARGET_SIG_ERR) {
5642 force_sig(sig);
5643 } else {
5644 /* compute the blocked signals during the handler execution */
5645 target_to_host_sigset(&set, &sa->sa_mask);
5646 /* SA_NODEFER indicates that the current signal should not be
5647 blocked during the handler */
5648 if (!(sa->sa_flags & TARGET_SA_NODEFER))
5649 sigaddset(&set, target_to_host_signal(sig));
5650
5651 /* block signals in the handler using Linux */
5652 do_sigprocmask(SIG_BLOCK, &set, &old_set);
5653 /* save the previous blocked signal state to restore it at the
5654 end of the signal execution (see do_sigreturn) */
5655 host_to_target_sigset_internal(&target_old_set, &old_set);
5656
5657 /* if the CPU is in VM86 mode, we restore the 32 bit values */
5658 #if defined(TARGET_I386) && !defined(TARGET_X86_64)
5659 {
5660 CPUX86State *env = cpu_env;
5661 if (env->eflags & VM_MASK)
5662 save_v86_state(env);
5663 }
5664 #endif
5665 /* prepare the stack frame of the virtual CPU */
5666 #if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64)
5667 /* These targets do not have traditional signals. */
5668 setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env);
5669 #else
5670 if (sa->sa_flags & TARGET_SA_SIGINFO)
5671 setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env);
5672 else
5673 setup_frame(sig, sa, &target_old_set, cpu_env);
5674 #endif
5675 if (sa->sa_flags & TARGET_SA_RESETHAND)
5676 sa->_sa_handler = TARGET_SIG_DFL;
5677 }
5678 if (q != &k->info)
5679 free_sigqueue(cpu_env, q);
5680 }