]> git.ipfire.org Git - people/arne_f/kernel.git/blame - arch/powerpc/kernel/signal_32.c
powerpc/tm: Set MSR[TS] just prior to recheckpoint
[people/arne_f/kernel.git] / arch / powerpc / kernel / signal_32.c
CommitLineData
1da177e4 1/*
81e7009e 2 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
1da177e4 3 *
81e7009e
SR
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
1da177e4
LT
6 * Copyright (C) 2001 IBM
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 *
81e7009e
SR
10 * Derived from "arch/i386/kernel/signal.c"
11 * Copyright (C) 1991, 1992 Linus Torvalds
12 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
1da177e4 13 *
81e7009e
SR
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
1da177e4
LT
18 */
19
1da177e4 20#include <linux/sched.h>
81e7009e 21#include <linux/mm.h>
1da177e4 22#include <linux/smp.h>
1da177e4
LT
23#include <linux/kernel.h>
24#include <linux/signal.h>
1da177e4
LT
25#include <linux/errno.h>
26#include <linux/elf.h>
05ead015 27#include <linux/ptrace.h>
56b04d56 28#include <linux/pagemap.h>
76462232 29#include <linux/ratelimit.h>
81e7009e 30#include <linux/syscalls.h>
f3675644 31#ifdef CONFIG_PPC64
1da177e4 32#include <linux/compat.h>
81e7009e
SR
33#else
34#include <linux/wait.h>
81e7009e
SR
35#include <linux/unistd.h>
36#include <linux/stddef.h>
37#include <linux/tty.h>
38#include <linux/binfmts.h>
81e7009e
SR
39#endif
40
7c0f6ba6 41#include <linux/uaccess.h>
81e7009e 42#include <asm/cacheflush.h>
a7f31841 43#include <asm/syscalls.h>
c5ff7001 44#include <asm/sigcontext.h>
a7f290da 45#include <asm/vdso.h>
ae3a197e 46#include <asm/switch_to.h>
2b0a576d 47#include <asm/tm.h>
0545d543 48#include <asm/asm-prototypes.h>
81e7009e 49#ifdef CONFIG_PPC64
879168ee 50#include "ppc32.h"
1da177e4 51#include <asm/unistd.h>
81e7009e
SR
52#else
53#include <asm/ucontext.h>
54#include <asm/pgtable.h>
55#endif
1da177e4 56
22e38f29
BH
57#include "signal.h"
58
1da177e4 59
81e7009e 60#ifdef CONFIG_PPC64
81e7009e
SR
61#define old_sigaction old_sigaction32
62#define sigcontext sigcontext32
63#define mcontext mcontext32
64#define ucontext ucontext32
65
7cce2465
AV
66#define __save_altstack __compat_save_altstack
67
c1cb299e
MN
68/*
69 * Userspace code may pass a ucontext which doesn't include VSX added
70 * at the end. We need to check for this case.
71 */
72#define UCONTEXTSIZEWITHOUTVSX \
73 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
74
81e7009e
SR
75/*
76 * Returning 0 means we return to userspace via
77 * ret_from_except and thus restore all user
78 * registers from *regs. This is what we need
79 * to do when a signal has been delivered.
80 */
81e7009e
SR
81
82#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
83#undef __SIGNAL_FRAMESIZE
84#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
85#undef ELF_NVRREG
86#define ELF_NVRREG ELF_NVRREG32
87
88/*
89 * Functions for flipping sigsets (thanks to brain dead generic
90 * implementation that makes things simple for little endian only)
91 */
92static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
93{
a5ae754a 94 return put_compat_sigset(uset, set, sizeof(*uset));
81e7009e
SR
95}
96
9b7cf8b4
PM
97static inline int get_sigset_t(sigset_t *set,
98 const compat_sigset_t __user *uset)
81e7009e 99{
a5ae754a 100 return get_compat_sigset(set, uset);
81e7009e
SR
101}
102
29e646df 103#define to_user_ptr(p) ptr_to_compat(p)
81e7009e
SR
104#define from_user_ptr(p) compat_ptr(p)
105
106static inline int save_general_regs(struct pt_regs *regs,
107 struct mcontext __user *frame)
108{
109 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
110 int i;
a8a4b03a
MS
111 /* Force usr to alway see softe as 1 (interrupts enabled) */
112 elf_greg_t64 softe = 0x1;
81e7009e 113
1bd79336 114 WARN_ON(!FULL_REGS(regs));
401d1f02
DW
115
116 for (i = 0; i <= PT_RESULT; i ++) {
117 if (i == 14 && !FULL_REGS(regs))
118 i = 32;
a8a4b03a
MS
119 if ( i == PT_SOFTE) {
120 if(__put_user((unsigned int)softe, &frame->mc_gregs[i]))
121 return -EFAULT;
122 else
123 continue;
124 }
81e7009e
SR
125 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
126 return -EFAULT;
401d1f02 127 }
81e7009e
SR
128 return 0;
129}
130
131static inline int restore_general_regs(struct pt_regs *regs,
132 struct mcontext __user *sr)
133{
134 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
135 int i;
136
137 for (i = 0; i <= PT_RESULT; i++) {
138 if ((i == PT_MSR) || (i == PT_SOFTE))
139 continue;
140 if (__get_user(gregs[i], &sr->mc_gregs[i]))
141 return -EFAULT;
142 }
143 return 0;
144}
145
146#else /* CONFIG_PPC64 */
147
81e7009e
SR
148#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
149
150static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
151{
152 return copy_to_user(uset, set, sizeof(*uset));
153}
154
9b7cf8b4 155static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
81e7009e
SR
156{
157 return copy_from_user(set, uset, sizeof(*uset));
158}
159
29e646df
AV
160#define to_user_ptr(p) ((unsigned long)(p))
161#define from_user_ptr(p) ((void __user *)(p))
81e7009e
SR
162
163static inline int save_general_regs(struct pt_regs *regs,
164 struct mcontext __user *frame)
165{
1bd79336 166 WARN_ON(!FULL_REGS(regs));
81e7009e
SR
167 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
168}
169
170static inline int restore_general_regs(struct pt_regs *regs,
171 struct mcontext __user *sr)
172{
173 /* copy up to but not including MSR */
174 if (__copy_from_user(regs, &sr->mc_gregs,
175 PT_MSR * sizeof(elf_greg_t)))
176 return -EFAULT;
177 /* copy from orig_r3 (the word after the MSR) up to the end */
178 if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
179 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
180 return -EFAULT;
181 return 0;
182}
81e7009e
SR
183#endif
184
1da177e4
LT
185/*
186 * When we have signals to deliver, we set up on the
187 * user stack, going down from the original stack pointer:
a3f61dc0
BH
188 * an ABI gap of 56 words
189 * an mcontext struct
81e7009e
SR
190 * a sigcontext struct
191 * a gap of __SIGNAL_FRAMESIZE bytes
1da177e4 192 *
a3f61dc0
BH
193 * Each of these things must be a multiple of 16 bytes in size. The following
194 * structure represent all of this except the __SIGNAL_FRAMESIZE gap
1da177e4
LT
195 *
196 */
a3f61dc0
BH
197struct sigframe {
198 struct sigcontext sctx; /* the sigcontext */
81e7009e 199 struct mcontext mctx; /* all the register values */
2b0a576d
MN
200#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
201 struct sigcontext sctx_transact;
202 struct mcontext mctx_transact;
203#endif
1da177e4
LT
204 /*
205 * Programs using the rs6000/xcoff abi can save up to 19 gp
206 * regs and 18 fp regs below sp before decrementing it.
207 */
208 int abigap[56];
209};
210
211/* We use the mc_pad field for the signal return trampoline. */
212#define tramp mc_pad
213
214/*
215 * When we have rt signals to deliver, we set up on the
216 * user stack, going down from the original stack pointer:
81e7009e
SR
217 * one rt_sigframe struct (siginfo + ucontext + ABI gap)
218 * a gap of __SIGNAL_FRAMESIZE+16 bytes
219 * (the +16 is to get the siginfo and ucontext in the same
1da177e4
LT
220 * positions as in older kernels).
221 *
222 * Each of these things must be a multiple of 16 bytes in size.
223 *
224 */
81e7009e
SR
225struct rt_sigframe {
226#ifdef CONFIG_PPC64
227 compat_siginfo_t info;
228#else
229 struct siginfo info;
230#endif
231 struct ucontext uc;
2b0a576d
MN
232#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
233 struct ucontext uc_transact;
234#endif
1da177e4
LT
235 /*
236 * Programs using the rs6000/xcoff abi can save up to 19 gp
237 * regs and 18 fp regs below sp before decrementing it.
238 */
239 int abigap[56];
240};
241
6a274c08
MN
242#ifdef CONFIG_VSX
243unsigned long copy_fpr_to_user(void __user *to,
244 struct task_struct *task)
245{
de79f7b9 246 u64 buf[ELF_NFPREG];
6a274c08
MN
247 int i;
248
249 /* save FPR copy to local buffer then write to the thread_struct */
250 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
251 buf[i] = task->thread.TS_FPR(i);
de79f7b9 252 buf[i] = task->thread.fp_state.fpscr;
6a274c08
MN
253 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
254}
255
256unsigned long copy_fpr_from_user(struct task_struct *task,
257 void __user *from)
258{
de79f7b9 259 u64 buf[ELF_NFPREG];
6a274c08
MN
260 int i;
261
262 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
263 return 1;
264 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
265 task->thread.TS_FPR(i) = buf[i];
de79f7b9 266 task->thread.fp_state.fpscr = buf[i];
6a274c08
MN
267
268 return 0;
269}
270
271unsigned long copy_vsx_to_user(void __user *to,
272 struct task_struct *task)
273{
de79f7b9 274 u64 buf[ELF_NVSRHALFREG];
6a274c08
MN
275 int i;
276
277 /* save FPR copy to local buffer then write to the thread_struct */
278 for (i = 0; i < ELF_NVSRHALFREG; i++)
de79f7b9 279 buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
6a274c08
MN
280 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
281}
282
283unsigned long copy_vsx_from_user(struct task_struct *task,
284 void __user *from)
285{
de79f7b9 286 u64 buf[ELF_NVSRHALFREG];
6a274c08
MN
287 int i;
288
289 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
290 return 1;
291 for (i = 0; i < ELF_NVSRHALFREG ; i++)
de79f7b9 292 task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
6a274c08
MN
293 return 0;
294}
2b0a576d
MN
295
296#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
000ec280 297unsigned long copy_ckfpr_to_user(void __user *to,
2b0a576d
MN
298 struct task_struct *task)
299{
de79f7b9 300 u64 buf[ELF_NFPREG];
2b0a576d
MN
301 int i;
302
303 /* save FPR copy to local buffer then write to the thread_struct */
304 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
000ec280
CB
305 buf[i] = task->thread.TS_CKFPR(i);
306 buf[i] = task->thread.ckfp_state.fpscr;
2b0a576d
MN
307 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
308}
309
000ec280 310unsigned long copy_ckfpr_from_user(struct task_struct *task,
2b0a576d
MN
311 void __user *from)
312{
de79f7b9 313 u64 buf[ELF_NFPREG];
2b0a576d
MN
314 int i;
315
316 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
317 return 1;
318 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
000ec280
CB
319 task->thread.TS_CKFPR(i) = buf[i];
320 task->thread.ckfp_state.fpscr = buf[i];
2b0a576d
MN
321
322 return 0;
323}
324
000ec280 325unsigned long copy_ckvsx_to_user(void __user *to,
2b0a576d
MN
326 struct task_struct *task)
327{
de79f7b9 328 u64 buf[ELF_NVSRHALFREG];
2b0a576d
MN
329 int i;
330
331 /* save FPR copy to local buffer then write to the thread_struct */
332 for (i = 0; i < ELF_NVSRHALFREG; i++)
000ec280 333 buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
2b0a576d
MN
334 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
335}
336
000ec280 337unsigned long copy_ckvsx_from_user(struct task_struct *task,
2b0a576d
MN
338 void __user *from)
339{
de79f7b9 340 u64 buf[ELF_NVSRHALFREG];
2b0a576d
MN
341 int i;
342
343 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
344 return 1;
345 for (i = 0; i < ELF_NVSRHALFREG ; i++)
000ec280 346 task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
2b0a576d
MN
347 return 0;
348}
349#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
6a274c08
MN
350#else
351inline unsigned long copy_fpr_to_user(void __user *to,
352 struct task_struct *task)
353{
de79f7b9 354 return __copy_to_user(to, task->thread.fp_state.fpr,
6a274c08
MN
355 ELF_NFPREG * sizeof(double));
356}
357
358inline unsigned long copy_fpr_from_user(struct task_struct *task,
359 void __user *from)
360{
de79f7b9 361 return __copy_from_user(task->thread.fp_state.fpr, from,
6a274c08
MN
362 ELF_NFPREG * sizeof(double));
363}
2b0a576d
MN
364
365#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
000ec280 366inline unsigned long copy_ckfpr_to_user(void __user *to,
2b0a576d
MN
367 struct task_struct *task)
368{
000ec280 369 return __copy_to_user(to, task->thread.ckfp_state.fpr,
2b0a576d
MN
370 ELF_NFPREG * sizeof(double));
371}
372
000ec280 373inline unsigned long copy_ckfpr_from_user(struct task_struct *task,
2b0a576d
MN
374 void __user *from)
375{
000ec280 376 return __copy_from_user(task->thread.ckfp_state.fpr, from,
2b0a576d
MN
377 ELF_NFPREG * sizeof(double));
378}
379#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
6a274c08
MN
380#endif
381
1da177e4
LT
382/*
383 * Save the current user registers on the user stack.
81e7009e
SR
384 * We only save the altivec/spe registers if the process has used
385 * altivec/spe instructions at some point.
1da177e4 386 */
81e7009e 387static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
1d25f11f
MN
388 struct mcontext __user *tm_frame, int sigret,
389 int ctx_has_vsx_region)
1da177e4 390{
9e751186
MN
391 unsigned long msr = regs->msr;
392
1da177e4
LT
393 /* Make sure floating point registers are stored in regs */
394 flush_fp_to_thread(current);
395
c6e6771b
MN
396 /* save general registers */
397 if (save_general_regs(regs, frame))
1da177e4
LT
398 return 1;
399
1da177e4
LT
400#ifdef CONFIG_ALTIVEC
401 /* save altivec registers */
402 if (current->thread.used_vr) {
403 flush_altivec_to_thread(current);
de79f7b9 404 if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
81e7009e 405 ELF_NVRREG * sizeof(vector128)))
1da177e4
LT
406 return 1;
407 /* set MSR_VEC in the saved MSR value to indicate that
408 frame->mc_vregs contains valid data */
9e751186 409 msr |= MSR_VEC;
1da177e4
LT
410 }
411 /* else assert((regs->msr & MSR_VEC) == 0) */
412
413 /* We always copy to/from vrsave, it's 0 if we don't have or don't
414 * use altivec. Since VSCR only contains 32 bits saved in the least
415 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
416 * most significant bits of that same vector. --BenH
408a7e08 417 * Note that the current VRSAVE value is in the SPR at this point.
1da177e4 418 */
408a7e08
PM
419 if (cpu_has_feature(CPU_FTR_ALTIVEC))
420 current->thread.vrsave = mfspr(SPRN_VRSAVE);
1da177e4
LT
421 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
422 return 1;
423#endif /* CONFIG_ALTIVEC */
6a274c08 424 if (copy_fpr_to_user(&frame->mc_fregs, current))
c6e6771b 425 return 1;
ec67ad82
MN
426
427 /*
428 * Clear the MSR VSX bit to indicate there is no valid state attached
429 * to this context, except in the specific case below where we set it.
430 */
431 msr &= ~MSR_VSX;
6a274c08 432#ifdef CONFIG_VSX
ce48b210
MN
433 /*
434 * Copy VSR 0-31 upper half from thread_struct to local
435 * buffer, then write that to userspace. Also set MSR_VSX in
436 * the saved MSR value to indicate that frame->mc_vregs
437 * contains valid data
438 */
16c29d18 439 if (current->thread.used_vsr && ctx_has_vsx_region) {
a7d623d4 440 flush_vsx_to_thread(current);
6a274c08 441 if (copy_vsx_to_user(&frame->mc_vsregs, current))
ce48b210
MN
442 return 1;
443 msr |= MSR_VSX;
ec67ad82 444 }
c6e6771b 445#endif /* CONFIG_VSX */
81e7009e
SR
446#ifdef CONFIG_SPE
447 /* save spe registers */
448 if (current->thread.used_spe) {
449 flush_spe_to_thread(current);
450 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
451 ELF_NEVRREG * sizeof(u32)))
452 return 1;
453 /* set MSR_SPE in the saved MSR value to indicate that
454 frame->mc_vregs contains valid data */
9e751186 455 msr |= MSR_SPE;
81e7009e
SR
456 }
457 /* else assert((regs->msr & MSR_SPE) == 0) */
458
459 /* We always copy to/from spefscr */
460 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
461 return 1;
462#endif /* CONFIG_SPE */
463
9e751186
MN
464 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
465 return 1;
1d25f11f
MN
466 /* We need to write 0 the MSR top 32 bits in the tm frame so that we
467 * can check it on the restore to see if TM is active
468 */
469 if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
470 return 1;
471
1da177e4 472 if (sigret) {
d16952a6
CL
473 /* Set up the sigreturn trampoline: li 0,sigret; sc */
474 if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
475 || __put_user(PPC_INST_SC, &frame->tramp[1]))
1da177e4
LT
476 return 1;
477 flush_icache_range((unsigned long) &frame->tramp[0],
478 (unsigned long) &frame->tramp[2]);
479 }
480
481 return 0;
482}
483
2b0a576d
MN
484#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
485/*
486 * Save the current user registers on the user stack.
487 * We only save the altivec/spe registers if the process has used
488 * altivec/spe instructions at some point.
489 * We also save the transactional registers to a second ucontext in the
490 * frame.
491 *
492 * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
493 */
494static int save_tm_user_regs(struct pt_regs *regs,
495 struct mcontext __user *frame,
496 struct mcontext __user *tm_frame, int sigret)
497{
498 unsigned long msr = regs->msr;
499
92fb8690
MN
500 WARN_ON(tm_suspend_disabled);
501
d31626f7
PM
502 /* Remove TM bits from thread's MSR. The MSR in the sigcontext
503 * just indicates to userland that we were doing a transaction, but we
504 * don't want to return in transactional state. This also ensures
505 * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
506 */
507 regs->msr &= ~MSR_TS_MASK;
508
2b0a576d
MN
509 /* Save both sets of general registers */
510 if (save_general_regs(&current->thread.ckpt_regs, frame)
511 || save_general_regs(regs, tm_frame))
512 return 1;
513
514 /* Stash the top half of the 64bit MSR into the 32bit MSR word
515 * of the transactional mcontext. This way we have a backward-compatible
516 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
517 * also look at what type of transaction (T or S) was active at the
518 * time of the signal.
519 */
520 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
521 return 1;
522
523#ifdef CONFIG_ALTIVEC
524 /* save altivec registers */
525 if (current->thread.used_vr) {
000ec280 526 if (__copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
2b0a576d
MN
527 ELF_NVRREG * sizeof(vector128)))
528 return 1;
529 if (msr & MSR_VEC) {
530 if (__copy_to_user(&tm_frame->mc_vregs,
dc310669 531 &current->thread.vr_state,
2b0a576d
MN
532 ELF_NVRREG * sizeof(vector128)))
533 return 1;
534 } else {
535 if (__copy_to_user(&tm_frame->mc_vregs,
000ec280 536 &current->thread.ckvr_state,
2b0a576d
MN
537 ELF_NVRREG * sizeof(vector128)))
538 return 1;
539 }
540
541 /* set MSR_VEC in the saved MSR value to indicate that
542 * frame->mc_vregs contains valid data
543 */
544 msr |= MSR_VEC;
545 }
546
547 /* We always copy to/from vrsave, it's 0 if we don't have or don't
548 * use altivec. Since VSCR only contains 32 bits saved in the least
549 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
550 * most significant bits of that same vector. --BenH
551 */
408a7e08 552 if (cpu_has_feature(CPU_FTR_ALTIVEC))
000ec280
CB
553 current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
554 if (__put_user(current->thread.ckvrsave,
2b0a576d
MN
555 (u32 __user *)&frame->mc_vregs[32]))
556 return 1;
557 if (msr & MSR_VEC) {
dc310669 558 if (__put_user(current->thread.vrsave,
2b0a576d
MN
559 (u32 __user *)&tm_frame->mc_vregs[32]))
560 return 1;
561 } else {
000ec280 562 if (__put_user(current->thread.ckvrsave,
2b0a576d
MN
563 (u32 __user *)&tm_frame->mc_vregs[32]))
564 return 1;
565 }
566#endif /* CONFIG_ALTIVEC */
567
000ec280 568 if (copy_ckfpr_to_user(&frame->mc_fregs, current))
2b0a576d
MN
569 return 1;
570 if (msr & MSR_FP) {
dc310669 571 if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
2b0a576d
MN
572 return 1;
573 } else {
000ec280 574 if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
2b0a576d
MN
575 return 1;
576 }
577
578#ifdef CONFIG_VSX
579 /*
580 * Copy VSR 0-31 upper half from thread_struct to local
581 * buffer, then write that to userspace. Also set MSR_VSX in
582 * the saved MSR value to indicate that frame->mc_vregs
583 * contains valid data
584 */
585 if (current->thread.used_vsr) {
000ec280 586 if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
2b0a576d
MN
587 return 1;
588 if (msr & MSR_VSX) {
dc310669 589 if (copy_vsx_to_user(&tm_frame->mc_vsregs,
2b0a576d
MN
590 current))
591 return 1;
592 } else {
000ec280 593 if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
2b0a576d
MN
594 return 1;
595 }
596
597 msr |= MSR_VSX;
598 }
599#endif /* CONFIG_VSX */
600#ifdef CONFIG_SPE
601 /* SPE regs are not checkpointed with TM, so this section is
602 * simply the same as in save_user_regs().
603 */
604 if (current->thread.used_spe) {
605 flush_spe_to_thread(current);
606 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
607 ELF_NEVRREG * sizeof(u32)))
608 return 1;
609 /* set MSR_SPE in the saved MSR value to indicate that
610 * frame->mc_vregs contains valid data */
611 msr |= MSR_SPE;
612 }
613
614 /* We always copy to/from spefscr */
615 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
616 return 1;
617#endif /* CONFIG_SPE */
618
619 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
620 return 1;
621 if (sigret) {
d16952a6
CL
622 /* Set up the sigreturn trampoline: li 0,sigret; sc */
623 if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
624 || __put_user(PPC_INST_SC, &frame->tramp[1]))
2b0a576d
MN
625 return 1;
626 flush_icache_range((unsigned long) &frame->tramp[0],
627 (unsigned long) &frame->tramp[2]);
628 }
629
630 return 0;
631}
632#endif
633
1da177e4
LT
634/*
635 * Restore the current user register values from the user stack,
636 * (except for MSR).
637 */
638static long restore_user_regs(struct pt_regs *regs,
81e7009e 639 struct mcontext __user *sr, int sig)
1da177e4 640{
81e7009e 641 long err;
1da177e4 642 unsigned int save_r2 = 0;
1da177e4 643 unsigned long msr;
c6e6771b 644#ifdef CONFIG_VSX
c6e6771b
MN
645 int i;
646#endif
1da177e4
LT
647
648 /*
649 * restore general registers but not including MSR or SOFTE. Also
650 * take care of keeping r2 (TLS) intact if not a signal
651 */
652 if (!sig)
653 save_r2 = (unsigned int)regs->gpr[2];
81e7009e 654 err = restore_general_regs(regs, sr);
9a81c16b 655 regs->trap = 0;
fab5db97 656 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
1da177e4
LT
657 if (!sig)
658 regs->gpr[2] = (unsigned long) save_r2;
659 if (err)
660 return 1;
661
fab5db97
PM
662 /* if doing signal return, restore the previous little-endian mode */
663 if (sig)
664 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
665
1da177e4 666#ifdef CONFIG_ALTIVEC
c6e6771b
MN
667 /*
668 * Force the process to reload the altivec registers from
669 * current->thread when it next does altivec instructions
670 */
1da177e4 671 regs->msr &= ~MSR_VEC;
fab5db97 672 if (msr & MSR_VEC) {
1da177e4 673 /* restore altivec registers from the stack */
de79f7b9 674 if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
1da177e4
LT
675 sizeof(sr->mc_vregs)))
676 return 1;
e1c0d66f 677 current->thread.used_vr = true;
1da177e4 678 } else if (current->thread.used_vr)
de79f7b9
PM
679 memset(&current->thread.vr_state, 0,
680 ELF_NVRREG * sizeof(vector128));
1da177e4
LT
681
682 /* Always get VRSAVE back */
683 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
684 return 1;
408a7e08
PM
685 if (cpu_has_feature(CPU_FTR_ALTIVEC))
686 mtspr(SPRN_VRSAVE, current->thread.vrsave);
1da177e4 687#endif /* CONFIG_ALTIVEC */
6a274c08
MN
688 if (copy_fpr_from_user(current, &sr->mc_fregs))
689 return 1;
1da177e4 690
c6e6771b 691#ifdef CONFIG_VSX
ce48b210
MN
692 /*
693 * Force the process to reload the VSX registers from
694 * current->thread when it next does VSX instruction.
695 */
696 regs->msr &= ~MSR_VSX;
697 if (msr & MSR_VSX) {
698 /*
699 * Restore altivec registers from the stack to a local
700 * buffer, then write this out to the thread_struct
701 */
6a274c08 702 if (copy_vsx_from_user(current, &sr->mc_vsregs))
ce48b210 703 return 1;
e1c0d66f 704 current->thread.used_vsr = true;
ce48b210
MN
705 } else if (current->thread.used_vsr)
706 for (i = 0; i < 32 ; i++)
de79f7b9 707 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
c6e6771b
MN
708#endif /* CONFIG_VSX */
709 /*
710 * force the process to reload the FP registers from
711 * current->thread when it next does FP instructions
712 */
713 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
714
81e7009e
SR
715#ifdef CONFIG_SPE
716 /* force the process to reload the spe registers from
717 current->thread when it next does spe instructions */
718 regs->msr &= ~MSR_SPE;
fab5db97 719 if (msr & MSR_SPE) {
81e7009e
SR
720 /* restore spe registers from the stack */
721 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
722 ELF_NEVRREG * sizeof(u32)))
723 return 1;
e1c0d66f 724 current->thread.used_spe = true;
81e7009e
SR
725 } else if (current->thread.used_spe)
726 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
727
728 /* Always get SPEFSCR back */
729 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
730 return 1;
731#endif /* CONFIG_SPE */
732
1da177e4
LT
733 return 0;
734}
735
2b0a576d
MN
736#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
737/*
738 * Restore the current user register values from the user stack, except for
739 * MSR, and recheckpoint the original checkpointed register state for processes
740 * in transactions.
741 */
742static long restore_tm_user_regs(struct pt_regs *regs,
743 struct mcontext __user *sr,
744 struct mcontext __user *tm_sr)
745{
746 long err;
2c27a18f 747 unsigned long msr, msr_hi;
2b0a576d
MN
748#ifdef CONFIG_VSX
749 int i;
750#endif
751
92fb8690
MN
752 if (tm_suspend_disabled)
753 return 1;
2b0a576d
MN
754 /*
755 * restore general registers but not including MSR or SOFTE. Also
756 * take care of keeping r2 (TLS) intact if not a signal.
757 * See comment in signal_64.c:restore_tm_sigcontexts();
758 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
759 * were set by the signal delivery.
760 */
761 err = restore_general_regs(regs, tm_sr);
762 err |= restore_general_regs(&current->thread.ckpt_regs, sr);
763
764 err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
765
766 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
767 if (err)
768 return 1;
769
770 /* Restore the previous little-endian mode */
771 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
772
2b0a576d
MN
773#ifdef CONFIG_ALTIVEC
774 regs->msr &= ~MSR_VEC;
775 if (msr & MSR_VEC) {
776 /* restore altivec registers from the stack */
000ec280 777 if (__copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
2b0a576d 778 sizeof(sr->mc_vregs)) ||
dc310669 779 __copy_from_user(&current->thread.vr_state,
2b0a576d
MN
780 &tm_sr->mc_vregs,
781 sizeof(sr->mc_vregs)))
782 return 1;
e1c0d66f 783 current->thread.used_vr = true;
2b0a576d 784 } else if (current->thread.used_vr) {
de79f7b9
PM
785 memset(&current->thread.vr_state, 0,
786 ELF_NVRREG * sizeof(vector128));
000ec280 787 memset(&current->thread.ckvr_state, 0,
2b0a576d
MN
788 ELF_NVRREG * sizeof(vector128));
789 }
790
791 /* Always get VRSAVE back */
000ec280 792 if (__get_user(current->thread.ckvrsave,
2b0a576d 793 (u32 __user *)&sr->mc_vregs[32]) ||
dc310669 794 __get_user(current->thread.vrsave,
2b0a576d
MN
795 (u32 __user *)&tm_sr->mc_vregs[32]))
796 return 1;
408a7e08 797 if (cpu_has_feature(CPU_FTR_ALTIVEC))
000ec280 798 mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
2b0a576d
MN
799#endif /* CONFIG_ALTIVEC */
800
801 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
802
803 if (copy_fpr_from_user(current, &sr->mc_fregs) ||
000ec280 804 copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
2b0a576d
MN
805 return 1;
806
807#ifdef CONFIG_VSX
808 regs->msr &= ~MSR_VSX;
809 if (msr & MSR_VSX) {
810 /*
811 * Restore altivec registers from the stack to a local
812 * buffer, then write this out to the thread_struct
813 */
dc310669 814 if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
000ec280 815 copy_ckvsx_from_user(current, &sr->mc_vsregs))
2b0a576d 816 return 1;
e1c0d66f 817 current->thread.used_vsr = true;
2b0a576d
MN
818 } else if (current->thread.used_vsr)
819 for (i = 0; i < 32 ; i++) {
de79f7b9 820 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
000ec280 821 current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
2b0a576d
MN
822 }
823#endif /* CONFIG_VSX */
824
825#ifdef CONFIG_SPE
826 /* SPE regs are not checkpointed with TM, so this section is
827 * simply the same as in restore_user_regs().
828 */
829 regs->msr &= ~MSR_SPE;
830 if (msr & MSR_SPE) {
831 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
832 ELF_NEVRREG * sizeof(u32)))
833 return 1;
e1c0d66f 834 current->thread.used_spe = true;
2b0a576d
MN
835 } else if (current->thread.used_spe)
836 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
837
838 /* Always get SPEFSCR back */
839 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
840 + ELF_NEVRREG))
841 return 1;
842#endif /* CONFIG_SPE */
843
d2b9d2a5
MN
844 /* Get the top half of the MSR from the user context */
845 if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
846 return 1;
847 msr_hi <<= 32;
848 /* If TM bits are set to the reserved value, it's an invalid context */
849 if (MSR_TM_RESV(msr_hi))
850 return 1;
e1c3743e
BL
851
852 /*
853 * Disabling preemption, since it is unsafe to be preempted
854 * with MSR[TS] set without recheckpointing.
855 */
856 preempt_disable();
857
858 /*
859 * CAUTION:
860 * After regs->MSR[TS] being updated, make sure that get_user(),
861 * put_user() or similar functions are *not* called. These
862 * functions can generate page faults which will cause the process
863 * to be de-scheduled with MSR[TS] set but without calling
864 * tm_recheckpoint(). This can cause a bug.
865 *
866 * Pull in the MSR TM bits from the user context
867 */
d2b9d2a5 868 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
2b0a576d
MN
869 /* Now, recheckpoint. This loads up all of the checkpointed (older)
870 * registers, including FP and V[S]Rs. After recheckpointing, the
871 * transactional versions should be loaded.
872 */
873 tm_enable();
e6b8fd02
MN
874 /* Make sure the transaction is marked as failed */
875 current->thread.tm_texasr |= TEXASR_FS;
2b0a576d 876 /* This loads the checkpointed FP/VEC state, if used */
eb5c3f1c 877 tm_recheckpoint(&current->thread);
2b0a576d
MN
878
879 /* This loads the speculative FP/VEC state, if used */
dc310669 880 msr_check_and_set(msr & (MSR_FP | MSR_VEC));
2b0a576d 881 if (msr & MSR_FP) {
dc310669 882 load_fp_state(&current->thread.fp_state);
2b0a576d
MN
883 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
884 }
f110c0c1 885#ifdef CONFIG_ALTIVEC
2b0a576d 886 if (msr & MSR_VEC) {
dc310669 887 load_vr_state(&current->thread.vr_state);
2b0a576d
MN
888 regs->msr |= MSR_VEC;
889 }
f110c0c1 890#endif
2b0a576d 891
e1c3743e
BL
892 preempt_enable();
893
2b0a576d
MN
894 return 0;
895}
896#endif
897
81e7009e 898#ifdef CONFIG_PPC64
1da177e4 899
81e7009e
SR
900#define copy_siginfo_to_user copy_siginfo_to_user32
901
81e7009e 902#endif /* CONFIG_PPC64 */
1da177e4 903
1da177e4
LT
904/*
905 * Set up a signal frame for a "real-time" signal handler
906 * (one which gets siginfo).
907 */
129b69df 908int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
d1199431 909 struct task_struct *tsk)
1da177e4 910{
81e7009e
SR
911 struct rt_sigframe __user *rt_sf;
912 struct mcontext __user *frame;
1d25f11f 913 struct mcontext __user *tm_frame = NULL;
d0c3d534 914 void __user *addr;
a3f61dc0 915 unsigned long newsp = 0;
2b0a576d
MN
916 int sigret;
917 unsigned long tramp;
d1199431
CB
918 struct pt_regs *regs = tsk->thread.regs;
919
920 BUG_ON(tsk != current);
1da177e4
LT
921
922 /* Set up Signal Frame */
923 /* Put a Real Time Context onto stack */
d1199431 924 rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
d0c3d534 925 addr = rt_sf;
a3f61dc0 926 if (unlikely(rt_sf == NULL))
1da177e4
LT
927 goto badframe;
928
1da177e4 929 /* Put the siginfo & fill in most of the ucontext */
129b69df 930 if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
1da177e4 931 || __put_user(0, &rt_sf->uc.uc_flags)
7cce2465 932 || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
81e7009e
SR
933 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
934 &rt_sf->uc.uc_regs)
935 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
1da177e4
LT
936 goto badframe;
937
938 /* Save user registers on the stack */
939 frame = &rt_sf->uc.uc_mcontext;
d0c3d534 940 addr = frame;
d1199431 941 if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
2b0a576d 942 sigret = 0;
d1199431 943 tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
a7f290da 944 } else {
2b0a576d
MN
945 sigret = __NR_rt_sigreturn;
946 tramp = (unsigned long) frame->tramp;
947 }
948
949#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1d25f11f 950 tm_frame = &rt_sf->uc_transact.uc_mcontext;
2b0a576d 951 if (MSR_TM_ACTIVE(regs->msr)) {
d765ff23
PM
952 if (__put_user((unsigned long)&rt_sf->uc_transact,
953 &rt_sf->uc.uc_link) ||
954 __put_user((unsigned long)tm_frame,
955 &rt_sf->uc_transact.uc_regs))
956 goto badframe;
1d25f11f 957 if (save_tm_user_regs(regs, frame, tm_frame, sigret))
1da177e4 958 goto badframe;
1da177e4 959 }
2b0a576d
MN
960 else
961#endif
1d25f11f 962 {
d765ff23
PM
963 if (__put_user(0, &rt_sf->uc.uc_link))
964 goto badframe;
1d25f11f 965 if (save_user_regs(regs, frame, tm_frame, sigret, 1))
2b0a576d 966 goto badframe;
1d25f11f 967 }
2b0a576d
MN
968 regs->link = tramp;
969
d1199431 970 tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
cc657f53 971
a3f61dc0
BH
972 /* create a stack frame for the caller of the handler */
973 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
d0c3d534 974 addr = (void __user *)regs->gpr[1];
e2b55306 975 if (put_user(regs->gpr[1], (u32 __user *)newsp))
81e7009e 976 goto badframe;
a3f61dc0
BH
977
978 /* Fill registers for signal handler */
81e7009e 979 regs->gpr[1] = newsp;
129b69df 980 regs->gpr[3] = ksig->sig;
1da177e4
LT
981 regs->gpr[4] = (unsigned long) &rt_sf->info;
982 regs->gpr[5] = (unsigned long) &rt_sf->uc;
983 regs->gpr[6] = (unsigned long) rt_sf;
129b69df 984 regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
e871c6bb 985 /* enter the signal handler in native-endian mode */
fab5db97 986 regs->msr &= ~MSR_LE;
e871c6bb 987 regs->msr |= (MSR_KERNEL & MSR_LE);
129b69df 988 return 0;
1da177e4
LT
989
990badframe:
76462232
CD
991 if (show_unhandled_signals)
992 printk_ratelimited(KERN_INFO
993 "%s[%d]: bad frame in handle_rt_signal32: "
994 "%p nip %08lx lr %08lx\n",
d1199431 995 tsk->comm, tsk->pid,
76462232 996 addr, regs->nip, regs->link);
d0c3d534 997
129b69df 998 return 1;
1da177e4
LT
999}
1000
81e7009e 1001static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
1da177e4 1002{
1da177e4 1003 sigset_t set;
81e7009e
SR
1004 struct mcontext __user *mcp;
1005
1006 if (get_sigset_t(&set, &ucp->uc_sigmask))
1007 return -EFAULT;
1008#ifdef CONFIG_PPC64
1009 {
1010 u32 cmcp;
1da177e4 1011
81e7009e
SR
1012 if (__get_user(cmcp, &ucp->uc_regs))
1013 return -EFAULT;
1014 mcp = (struct mcontext __user *)(u64)cmcp;
7c85d1f9 1015 /* no need to check access_ok(mcp), since mcp < 4GB */
81e7009e
SR
1016 }
1017#else
1018 if (__get_user(mcp, &ucp->uc_regs))
1da177e4 1019 return -EFAULT;
7c85d1f9
PM
1020 if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
1021 return -EFAULT;
81e7009e 1022#endif
17440f17 1023 set_current_blocked(&set);
81e7009e 1024 if (restore_user_regs(regs, mcp, sig))
1da177e4
LT
1025 return -EFAULT;
1026
1027 return 0;
1028}
1029
2b0a576d
MN
1030#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1031static int do_setcontext_tm(struct ucontext __user *ucp,
1032 struct ucontext __user *tm_ucp,
1033 struct pt_regs *regs)
1034{
1035 sigset_t set;
1036 struct mcontext __user *mcp;
1037 struct mcontext __user *tm_mcp;
1038 u32 cmcp;
1039 u32 tm_cmcp;
1040
1041 if (get_sigset_t(&set, &ucp->uc_sigmask))
1042 return -EFAULT;
1043
1044 if (__get_user(cmcp, &ucp->uc_regs) ||
1045 __get_user(tm_cmcp, &tm_ucp->uc_regs))
1046 return -EFAULT;
1047 mcp = (struct mcontext __user *)(u64)cmcp;
1048 tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1049 /* no need to check access_ok(mcp), since mcp < 4GB */
1050
1051 set_current_blocked(&set);
1052 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1053 return -EFAULT;
1054
1055 return 0;
1056}
1057#endif
1058
f3675644
AV
1059#ifdef CONFIG_PPC64
1060COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1061 struct ucontext __user *, new_ctx, int, ctx_size)
1062#else
1063SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1064 struct ucontext __user *, new_ctx, long, ctx_size)
1065#endif
1da177e4 1066{
f3675644 1067 struct pt_regs *regs = current_pt_regs();
16c29d18 1068 int ctx_has_vsx_region = 0;
1da177e4 1069
c1cb299e
MN
1070#ifdef CONFIG_PPC64
1071 unsigned long new_msr = 0;
1072
77eb50ae
AS
1073 if (new_ctx) {
1074 struct mcontext __user *mcp;
1075 u32 cmcp;
1076
1077 /*
1078 * Get pointer to the real mcontext. No need for
1079 * access_ok since we are dealing with compat
1080 * pointers.
1081 */
1082 if (__get_user(cmcp, &new_ctx->uc_regs))
1083 return -EFAULT;
1084 mcp = (struct mcontext __user *)(u64)cmcp;
1085 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1086 return -EFAULT;
1087 }
c1cb299e
MN
1088 /*
1089 * Check that the context is not smaller than the original
1090 * size (with VMX but without VSX)
1091 */
1092 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1093 return -EINVAL;
1094 /*
1095 * If the new context state sets the MSR VSX bits but
1096 * it doesn't provide VSX state.
1097 */
1098 if ((ctx_size < sizeof(struct ucontext)) &&
1099 (new_msr & MSR_VSX))
1100 return -EINVAL;
16c29d18
MN
1101 /* Does the context have enough room to store VSX data? */
1102 if (ctx_size >= sizeof(struct ucontext))
1103 ctx_has_vsx_region = 1;
c1cb299e 1104#else
1da177e4
LT
1105 /* Context size is for future use. Right now, we only make sure
1106 * we are passed something we understand
1107 */
81e7009e 1108 if (ctx_size < sizeof(struct ucontext))
1da177e4 1109 return -EINVAL;
c1cb299e 1110#endif
1da177e4 1111 if (old_ctx != NULL) {
1c9bb1a0
PM
1112 struct mcontext __user *mctx;
1113
1114 /*
1115 * old_ctx might not be 16-byte aligned, in which
1116 * case old_ctx->uc_mcontext won't be either.
1117 * Because we have the old_ctx->uc_pad2 field
1118 * before old_ctx->uc_mcontext, we need to round down
1119 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1120 */
1121 mctx = (struct mcontext __user *)
1122 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
16c29d18 1123 if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
1d25f11f 1124 || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
81e7009e 1125 || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
1c9bb1a0 1126 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
1da177e4
LT
1127 return -EFAULT;
1128 }
1129 if (new_ctx == NULL)
1130 return 0;
56b04d56
CL
1131 if (!access_ok(VERIFY_READ, new_ctx, ctx_size) ||
1132 fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
1da177e4
LT
1133 return -EFAULT;
1134
1135 /*
1136 * If we get a fault copying the context into the kernel's
1137 * image of the user's registers, we can't just return -EFAULT
1138 * because the user's registers will be corrupted. For instance
1139 * the NIP value may have been updated but not some of the
1140 * other registers. Given that we have done the access_ok
1141 * and successfully read the first and last bytes of the region
1142 * above, this should only happen in an out-of-memory situation
1143 * or if another thread unmaps the region containing the context.
1144 * We kill the task with a SIGSEGV in this situation.
1145 */
81e7009e 1146 if (do_setcontext(new_ctx, regs, 0))
1da177e4 1147 do_exit(SIGSEGV);
401d1f02
DW
1148
1149 set_thread_flag(TIF_RESTOREALL);
1da177e4
LT
1150 return 0;
1151}
1152
f3675644
AV
1153#ifdef CONFIG_PPC64
1154COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1155#else
1156SYSCALL_DEFINE0(rt_sigreturn)
1157#endif
1da177e4 1158{
81e7009e 1159 struct rt_sigframe __user *rt_sf;
f3675644 1160 struct pt_regs *regs = current_pt_regs();
2b0a576d
MN
1161#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1162 struct ucontext __user *uc_transact;
1163 unsigned long msr_hi;
1164 unsigned long tmp;
1165 int tm_restore = 0;
1166#endif
1da177e4 1167 /* Always make any pending restarted system calls return -EINTR */
f56141e3 1168 current->restart_block.fn = do_no_restart_syscall;
1da177e4 1169
81e7009e
SR
1170 rt_sf = (struct rt_sigframe __user *)
1171 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1da177e4
LT
1172 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1173 goto bad;
78a3e888 1174
2b0a576d 1175#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
78a3e888
CB
1176 /*
1177 * If there is a transactional state then throw it away.
1178 * The purpose of a sigreturn is to destroy all traces of the
1179 * signal frame, this includes any transactional state created
1180 * within in. We only check for suspended as we can never be
1181 * active in the kernel, we are active, there is nothing better to
1182 * do than go ahead and Bad Thing later.
1183 * The cause is not important as there will never be a
1184 * recheckpoint so it's not user visible.
1185 */
1186 if (MSR_TM_SUSPENDED(mfmsr()))
1187 tm_reclaim_current(0);
1188
2b0a576d
MN
1189 if (__get_user(tmp, &rt_sf->uc.uc_link))
1190 goto bad;
1191 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1192 if (uc_transact) {
1193 u32 cmcp;
1194 struct mcontext __user *mcp;
1195
1196 if (__get_user(cmcp, &uc_transact->uc_regs))
1197 return -EFAULT;
1198 mcp = (struct mcontext __user *)(u64)cmcp;
1199 /* The top 32 bits of the MSR are stashed in the transactional
1200 * ucontext. */
1201 if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1202 goto bad;
1203
55e43418 1204 if (MSR_TM_ACTIVE(msr_hi<<32)) {
2b0a576d
MN
1205 /* We only recheckpoint on return if we're
1206 * transaction.
1207 */
1208 tm_restore = 1;
1209 if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1210 goto bad;
1211 }
1212 }
1213 if (!tm_restore)
1214 /* Fall through, for non-TM restore */
1215#endif
81e7009e 1216 if (do_setcontext(&rt_sf->uc, regs, 1))
1da177e4
LT
1217 goto bad;
1218
1219 /*
1220 * It's not clear whether or why it is desirable to save the
1221 * sigaltstack setting on signal delivery and restore it on
1222 * signal return. But other architectures do this and we have
1223 * always done it up until now so it is probably better not to
1224 * change it. -- paulus
81e7009e
SR
1225 */
1226#ifdef CONFIG_PPC64
7cce2465
AV
1227 if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1228 goto bad;
81e7009e 1229#else
7cce2465
AV
1230 if (restore_altstack(&rt_sf->uc.uc_stack))
1231 goto bad;
81e7009e 1232#endif
401d1f02
DW
1233 set_thread_flag(TIF_RESTOREALL);
1234 return 0;
1da177e4
LT
1235
1236 bad:
76462232
CD
1237 if (show_unhandled_signals)
1238 printk_ratelimited(KERN_INFO
1239 "%s[%d]: bad frame in sys_rt_sigreturn: "
1240 "%p nip %08lx lr %08lx\n",
1241 current->comm, current->pid,
1242 rt_sf, regs->nip, regs->link);
d0c3d534 1243
1da177e4
LT
1244 force_sig(SIGSEGV, current);
1245 return 0;
1246}
1247
81e7009e 1248#ifdef CONFIG_PPC32
f3675644
AV
1249SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1250 int, ndbg, struct sig_dbg_op __user *, dbg)
81e7009e 1251{
f3675644 1252 struct pt_regs *regs = current_pt_regs();
81e7009e
SR
1253 struct sig_dbg_op op;
1254 int i;
1255 unsigned long new_msr = regs->msr;
172ae2e7 1256#ifdef CONFIG_PPC_ADV_DEBUG_REGS
51ae8d4a 1257 unsigned long new_dbcr0 = current->thread.debug.dbcr0;
81e7009e
SR
1258#endif
1259
1260 for (i=0; i<ndbg; i++) {
7c85d1f9 1261 if (copy_from_user(&op, dbg + i, sizeof(op)))
81e7009e
SR
1262 return -EFAULT;
1263 switch (op.dbg_type) {
1264 case SIG_DBG_SINGLE_STEPPING:
172ae2e7 1265#ifdef CONFIG_PPC_ADV_DEBUG_REGS
81e7009e
SR
1266 if (op.dbg_value) {
1267 new_msr |= MSR_DE;
1268 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1269 } else {
3bffb652
DK
1270 new_dbcr0 &= ~DBCR0_IC;
1271 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
51ae8d4a 1272 current->thread.debug.dbcr1)) {
3bffb652
DK
1273 new_msr &= ~MSR_DE;
1274 new_dbcr0 &= ~DBCR0_IDM;
1275 }
81e7009e
SR
1276 }
1277#else
1278 if (op.dbg_value)
1279 new_msr |= MSR_SE;
1280 else
1281 new_msr &= ~MSR_SE;
1282#endif
1283 break;
1284 case SIG_DBG_BRANCH_TRACING:
172ae2e7 1285#ifdef CONFIG_PPC_ADV_DEBUG_REGS
81e7009e
SR
1286 return -EINVAL;
1287#else
1288 if (op.dbg_value)
1289 new_msr |= MSR_BE;
1290 else
1291 new_msr &= ~MSR_BE;
1292#endif
1293 break;
1294
1295 default:
1296 return -EINVAL;
1297 }
1298 }
1299
1300 /* We wait until here to actually install the values in the
1301 registers so if we fail in the above loop, it will not
1302 affect the contents of these registers. After this point,
1303 failure is a problem, anyway, and it's very unlikely unless
1304 the user is really doing something wrong. */
1305 regs->msr = new_msr;
172ae2e7 1306#ifdef CONFIG_PPC_ADV_DEBUG_REGS
51ae8d4a 1307 current->thread.debug.dbcr0 = new_dbcr0;
81e7009e
SR
1308#endif
1309
56b04d56
CL
1310 if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx)) ||
1311 fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
7c85d1f9
PM
1312 return -EFAULT;
1313
81e7009e
SR
1314 /*
1315 * If we get a fault copying the context into the kernel's
1316 * image of the user's registers, we can't just return -EFAULT
1317 * because the user's registers will be corrupted. For instance
1318 * the NIP value may have been updated but not some of the
1319 * other registers. Given that we have done the access_ok
1320 * and successfully read the first and last bytes of the region
1321 * above, this should only happen in an out-of-memory situation
1322 * or if another thread unmaps the region containing the context.
1323 * We kill the task with a SIGSEGV in this situation.
1324 */
1325 if (do_setcontext(ctx, regs, 1)) {
76462232
CD
1326 if (show_unhandled_signals)
1327 printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1328 "sys_debug_setcontext: %p nip %08lx "
1329 "lr %08lx\n",
1330 current->comm, current->pid,
1331 ctx, regs->nip, regs->link);
d0c3d534 1332
81e7009e
SR
1333 force_sig(SIGSEGV, current);
1334 goto out;
1335 }
1336
1337 /*
1338 * It's not clear whether or why it is desirable to save the
1339 * sigaltstack setting on signal delivery and restore it on
1340 * signal return. But other architectures do this and we have
1341 * always done it up until now so it is probably better not to
1342 * change it. -- paulus
1343 */
7cce2465 1344 restore_altstack(&ctx->uc_stack);
81e7009e 1345
401d1f02 1346 set_thread_flag(TIF_RESTOREALL);
81e7009e
SR
1347 out:
1348 return 0;
1349}
1350#endif
1da177e4
LT
1351
1352/*
1353 * OK, we're invoking a handler
1354 */
d1199431
CB
1355int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1356 struct task_struct *tsk)
1da177e4 1357{
81e7009e 1358 struct sigcontext __user *sc;
a3f61dc0 1359 struct sigframe __user *frame;
1d25f11f 1360 struct mcontext __user *tm_mctx = NULL;
a3f61dc0 1361 unsigned long newsp = 0;
2b0a576d
MN
1362 int sigret;
1363 unsigned long tramp;
d1199431
CB
1364 struct pt_regs *regs = tsk->thread.regs;
1365
1366 BUG_ON(tsk != current);
1da177e4
LT
1367
1368 /* Set up Signal Frame */
d1199431 1369 frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
a3f61dc0 1370 if (unlikely(frame == NULL))
1da177e4 1371 goto badframe;
a3f61dc0 1372 sc = (struct sigcontext __user *) &frame->sctx;
1da177e4
LT
1373
1374#if _NSIG != 64
81e7009e 1375#error "Please adjust handle_signal()"
1da177e4 1376#endif
129b69df 1377 if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
1da177e4 1378 || __put_user(oldset->sig[0], &sc->oldmask)
81e7009e 1379#ifdef CONFIG_PPC64
1da177e4 1380 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
81e7009e
SR
1381#else
1382 || __put_user(oldset->sig[1], &sc->_unused[3])
1383#endif
a3f61dc0 1384 || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
129b69df 1385 || __put_user(ksig->sig, &sc->signal))
1da177e4
LT
1386 goto badframe;
1387
d1199431 1388 if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
2b0a576d 1389 sigret = 0;
d1199431 1390 tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
a7f290da 1391 } else {
2b0a576d
MN
1392 sigret = __NR_sigreturn;
1393 tramp = (unsigned long) frame->mctx.tramp;
1394 }
1395
1396#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1d25f11f 1397 tm_mctx = &frame->mctx_transact;
2b0a576d
MN
1398 if (MSR_TM_ACTIVE(regs->msr)) {
1399 if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1400 sigret))
1da177e4 1401 goto badframe;
1da177e4 1402 }
2b0a576d
MN
1403 else
1404#endif
1d25f11f
MN
1405 {
1406 if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
2b0a576d 1407 goto badframe;
1d25f11f 1408 }
2b0a576d
MN
1409
1410 regs->link = tramp;
1da177e4 1411
d1199431 1412 tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
cc657f53 1413
a3f61dc0
BH
1414 /* create a stack frame for the caller of the handler */
1415 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
9747dd6f 1416 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1da177e4 1417 goto badframe;
a3f61dc0 1418
81e7009e 1419 regs->gpr[1] = newsp;
129b69df 1420 regs->gpr[3] = ksig->sig;
1da177e4 1421 regs->gpr[4] = (unsigned long) sc;
129b69df 1422 regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
fab5db97
PM
1423 /* enter the signal handler in big-endian mode */
1424 regs->msr &= ~MSR_LE;
129b69df 1425 return 0;
1da177e4
LT
1426
1427badframe:
76462232
CD
1428 if (show_unhandled_signals)
1429 printk_ratelimited(KERN_INFO
1430 "%s[%d]: bad frame in handle_signal32: "
1431 "%p nip %08lx lr %08lx\n",
d1199431 1432 tsk->comm, tsk->pid,
76462232 1433 frame, regs->nip, regs->link);
d0c3d534 1434
129b69df 1435 return 1;
1da177e4
LT
1436}
1437
1438/*
1439 * Do a signal return; undo the signal stack.
1440 */
f3675644
AV
1441#ifdef CONFIG_PPC64
1442COMPAT_SYSCALL_DEFINE0(sigreturn)
1443#else
1444SYSCALL_DEFINE0(sigreturn)
1445#endif
1da177e4 1446{
f3675644 1447 struct pt_regs *regs = current_pt_regs();
fee55450 1448 struct sigframe __user *sf;
81e7009e
SR
1449 struct sigcontext __user *sc;
1450 struct sigcontext sigctx;
1451 struct mcontext __user *sr;
d0c3d534 1452 void __user *addr;
1da177e4 1453 sigset_t set;
fee55450
MN
1454#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1455 struct mcontext __user *mcp, *tm_mcp;
1456 unsigned long msr_hi;
1457#endif
1da177e4
LT
1458
1459 /* Always make any pending restarted system calls return -EINTR */
f56141e3 1460 current->restart_block.fn = do_no_restart_syscall;
1da177e4 1461
fee55450
MN
1462 sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1463 sc = &sf->sctx;
d0c3d534 1464 addr = sc;
1da177e4
LT
1465 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1466 goto badframe;
1467
81e7009e 1468#ifdef CONFIG_PPC64
1da177e4
LT
1469 /*
1470 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1471 * unused part of the signal stackframe
1472 */
1473 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
81e7009e
SR
1474#else
1475 set.sig[0] = sigctx.oldmask;
1476 set.sig[1] = sigctx._unused[3];
1477#endif
17440f17 1478 set_current_blocked(&set);
1da177e4 1479
fee55450
MN
1480#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1481 mcp = (struct mcontext __user *)&sf->mctx;
1482 tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1483 if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1da177e4 1484 goto badframe;
fee55450
MN
1485 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1486 if (!cpu_has_feature(CPU_FTR_TM))
1487 goto badframe;
1488 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1489 goto badframe;
1490 } else
1491#endif
1492 {
1493 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1494 addr = sr;
1495 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1496 || restore_user_regs(regs, sr, 1))
1497 goto badframe;
1498 }
1da177e4 1499
401d1f02 1500 set_thread_flag(TIF_RESTOREALL);
81e7009e 1501 return 0;
1da177e4
LT
1502
1503badframe:
76462232
CD
1504 if (show_unhandled_signals)
1505 printk_ratelimited(KERN_INFO
1506 "%s[%d]: bad frame in sys_sigreturn: "
1507 "%p nip %08lx lr %08lx\n",
1508 current->comm, current->pid,
1509 addr, regs->nip, regs->link);
d0c3d534 1510
1da177e4
LT
1511 force_sig(SIGSEGV, current);
1512 return 0;
1513}