]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - arch/ia64/kernel/ptrace.c
[ia64] access_uarea(): don't bother with fpregs_[gs]et()
[thirdparty/kernel/stable.git] / arch / ia64 / kernel / ptrace.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * Kernel support for the ptrace() and syscall tracing interfaces.
4 *
5 * Copyright (C) 1999-2005 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
c70f8f68
SL
7 * Copyright (C) 2006 Intel Co
8 * 2006-08-12 - IA64 Native Utrace implementation support added by
9 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
1da177e4
LT
10 *
11 * Derived from the x86 and Alpha versions.
12 */
1da177e4
LT
13#include <linux/kernel.h>
14#include <linux/sched.h>
29930025 15#include <linux/sched/task.h>
68db0cf1 16#include <linux/sched/task_stack.h>
1da177e4
LT
17#include <linux/mm.h>
18#include <linux/errno.h>
19#include <linux/ptrace.h>
1da177e4
LT
20#include <linux/user.h>
21#include <linux/security.h>
22#include <linux/audit.h>
7ed20e1a 23#include <linux/signal.h>
c70f8f68
SL
24#include <linux/regset.h>
25#include <linux/elf.h>
f14488cc 26#include <linux/tracehook.h>
1da177e4 27
1da177e4
LT
28#include <asm/processor.h>
29#include <asm/ptrace_offsets.h>
30#include <asm/rse.h>
7c0f6ba6 31#include <linux/uaccess.h>
1da177e4
LT
32#include <asm/unwind.h>
33#ifdef CONFIG_PERFMON
34#include <asm/perfmon.h>
35#endif
36
37#include "entry.h"
38
39/*
40 * Bits in the PSR that we allow ptrace() to change:
41 * be, up, ac, mfl, mfh (the user mask; five bits total)
42 * db (debug breakpoint fault; one bit)
43 * id (instruction debug fault disable; one bit)
44 * dd (data debug fault disable; one bit)
45 * ri (restart instruction; two bits)
46 * is (instruction set; one bit)
47 */
48#define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
49 | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
50
51#define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */
52#define PFM_MASK MASK(38)
53
54#define PTRACE_DEBUG 0
55
56#if PTRACE_DEBUG
57# define dprintk(format...) printk(format)
58# define inline
59#else
60# define dprintk(format...)
61#endif
62
63/* Return TRUE if PT was created due to kernel-entry via a system-call. */
64
65static inline int
66in_syscall (struct pt_regs *pt)
67{
68 return (long) pt->cr_ifs >= 0;
69}
70
71/*
72 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
73 * bitset where bit i is set iff the NaT bit of register i is set.
74 */
75unsigned long
76ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
77{
78# define GET_BITS(first, last, unat) \
79 ({ \
80 unsigned long bit = ia64_unat_pos(&pt->r##first); \
81 unsigned long nbits = (last - first + 1); \
82 unsigned long mask = MASK(nbits) << first; \
83 unsigned long dist; \
84 if (bit < first) \
85 dist = 64 + bit - first; \
86 else \
87 dist = bit - first; \
88 ia64_rotr(unat, dist) & mask; \
89 })
90 unsigned long val;
91
92 /*
93 * Registers that are stored consecutively in struct pt_regs
94 * can be handled in parallel. If the register order in
95 * struct_pt_regs changes, this code MUST be updated.
96 */
97 val = GET_BITS( 1, 1, scratch_unat);
98 val |= GET_BITS( 2, 3, scratch_unat);
99 val |= GET_BITS(12, 13, scratch_unat);
100 val |= GET_BITS(14, 14, scratch_unat);
101 val |= GET_BITS(15, 15, scratch_unat);
102 val |= GET_BITS( 8, 11, scratch_unat);
103 val |= GET_BITS(16, 31, scratch_unat);
104 return val;
105
106# undef GET_BITS
107}
108
109/*
110 * Set the NaT bits for the scratch registers according to NAT and
111 * return the resulting unat (assuming the scratch registers are
112 * stored in PT).
113 */
114unsigned long
115ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
116{
117# define PUT_BITS(first, last, nat) \
118 ({ \
119 unsigned long bit = ia64_unat_pos(&pt->r##first); \
120 unsigned long nbits = (last - first + 1); \
121 unsigned long mask = MASK(nbits) << first; \
122 long dist; \
123 if (bit < first) \
124 dist = 64 + bit - first; \
125 else \
126 dist = bit - first; \
127 ia64_rotl(nat & mask, dist); \
128 })
129 unsigned long scratch_unat;
130
131 /*
132 * Registers that are stored consecutively in struct pt_regs
133 * can be handled in parallel. If the register order in
134 * struct_pt_regs changes, this code MUST be updated.
135 */
136 scratch_unat = PUT_BITS( 1, 1, nat);
137 scratch_unat |= PUT_BITS( 2, 3, nat);
138 scratch_unat |= PUT_BITS(12, 13, nat);
139 scratch_unat |= PUT_BITS(14, 14, nat);
140 scratch_unat |= PUT_BITS(15, 15, nat);
141 scratch_unat |= PUT_BITS( 8, 11, nat);
142 scratch_unat |= PUT_BITS(16, 31, nat);
143
144 return scratch_unat;
145
146# undef PUT_BITS
147}
148
149#define IA64_MLX_TEMPLATE 0x2
150#define IA64_MOVL_OPCODE 6
151
152void
153ia64_increment_ip (struct pt_regs *regs)
154{
155 unsigned long w0, ri = ia64_psr(regs)->ri + 1;
156
157 if (ri > 2) {
158 ri = 0;
159 regs->cr_iip += 16;
160 } else if (ri == 2) {
161 get_user(w0, (char __user *) regs->cr_iip + 0);
162 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
163 /*
164 * rfi'ing to slot 2 of an MLX bundle causes
165 * an illegal operation fault. We don't want
166 * that to happen...
167 */
168 ri = 0;
169 regs->cr_iip += 16;
170 }
171 }
172 ia64_psr(regs)->ri = ri;
173}
174
175void
176ia64_decrement_ip (struct pt_regs *regs)
177{
178 unsigned long w0, ri = ia64_psr(regs)->ri - 1;
179
180 if (ia64_psr(regs)->ri == 0) {
181 regs->cr_iip -= 16;
182 ri = 2;
183 get_user(w0, (char __user *) regs->cr_iip + 0);
184 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
185 /*
186 * rfi'ing to slot 2 of an MLX bundle causes
187 * an illegal operation fault. We don't want
188 * that to happen...
189 */
190 ri = 1;
191 }
192 }
193 ia64_psr(regs)->ri = ri;
194}
195
196/*
197 * This routine is used to read an rnat bits that are stored on the
198 * kernel backing store. Since, in general, the alignment of the user
199 * and kernel are different, this is not completely trivial. In
200 * essence, we need to construct the user RNAT based on up to two
201 * kernel RNAT values and/or the RNAT value saved in the child's
202 * pt_regs.
203 *
204 * user rbs
205 *
206 * +--------+ <-- lowest address
207 * | slot62 |
208 * +--------+
209 * | rnat | 0x....1f8
210 * +--------+
211 * | slot00 | \
212 * +--------+ |
213 * | slot01 | > child_regs->ar_rnat
214 * +--------+ |
215 * | slot02 | / kernel rbs
216 * +--------+ +--------+
217 * <- child_regs->ar_bspstore | slot61 | <-- krbs
218 * +- - - - + +--------+
219 * | slot62 |
220 * +- - - - + +--------+
221 * | rnat |
222 * +- - - - + +--------+
223 * vrnat | slot00 |
224 * +- - - - + +--------+
225 * = =
226 * +--------+
227 * | slot00 | \
228 * +--------+ |
229 * | slot01 | > child_stack->ar_rnat
230 * +--------+ |
231 * | slot02 | /
232 * +--------+
233 * <--- child_stack->ar_bspstore
234 *
235 * The way to think of this code is as follows: bit 0 in the user rnat
236 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
237 * value. The kernel rnat value holding this bit is stored in
238 * variable rnat0. rnat1 is loaded with the kernel rnat value that
239 * form the upper bits of the user rnat value.
240 *
241 * Boundary cases:
242 *
243 * o when reading the rnat "below" the first rnat slot on the kernel
244 * backing store, rnat0/rnat1 are set to 0 and the low order bits are
245 * merged in from pt->ar_rnat.
246 *
247 * o when reading the rnat "above" the last rnat slot on the kernel
248 * backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
249 */
250static unsigned long
251get_rnat (struct task_struct *task, struct switch_stack *sw,
252 unsigned long *krbs, unsigned long *urnat_addr,
253 unsigned long *urbs_end)
254{
255 unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
256 unsigned long umask = 0, mask, m;
257 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
258 long num_regs, nbits;
259 struct pt_regs *pt;
260
6450578f 261 pt = task_pt_regs(task);
1da177e4
LT
262 kbsp = (unsigned long *) sw->ar_bspstore;
263 ubspstore = (unsigned long *) pt->ar_bspstore;
264
265 if (urbs_end < urnat_addr)
266 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
267 else
268 nbits = 63;
269 mask = MASK(nbits);
270 /*
271 * First, figure out which bit number slot 0 in user-land maps
272 * to in the kernel rnat. Do this by figuring out how many
273 * register slots we're beyond the user's backingstore and
274 * then computing the equivalent address in kernel space.
275 */
276 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
277 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
278 shift = ia64_rse_slot_num(slot0_kaddr);
279 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
280 rnat0_kaddr = rnat1_kaddr - 64;
281
282 if (ubspstore + 63 > urnat_addr) {
283 /* some bits need to be merged in from pt->ar_rnat */
284 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
285 urnat = (pt->ar_rnat & umask);
286 mask &= ~umask;
287 if (!mask)
288 return urnat;
289 }
290
291 m = mask << shift;
292 if (rnat0_kaddr >= kbsp)
293 rnat0 = sw->ar_rnat;
294 else if (rnat0_kaddr > krbs)
295 rnat0 = *rnat0_kaddr;
296 urnat |= (rnat0 & m) >> shift;
297
298 m = mask >> (63 - shift);
299 if (rnat1_kaddr >= kbsp)
300 rnat1 = sw->ar_rnat;
301 else if (rnat1_kaddr > krbs)
302 rnat1 = *rnat1_kaddr;
303 urnat |= (rnat1 & m) << (63 - shift);
304 return urnat;
305}
306
307/*
308 * The reverse of get_rnat.
309 */
310static void
311put_rnat (struct task_struct *task, struct switch_stack *sw,
312 unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
313 unsigned long *urbs_end)
314{
315 unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
316 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
317 long num_regs, nbits;
318 struct pt_regs *pt;
319 unsigned long cfm, *urbs_kargs;
320
6450578f 321 pt = task_pt_regs(task);
1da177e4
LT
322 kbsp = (unsigned long *) sw->ar_bspstore;
323 ubspstore = (unsigned long *) pt->ar_bspstore;
324
325 urbs_kargs = urbs_end;
326 if (in_syscall(pt)) {
327 /*
328 * If entered via syscall, don't allow user to set rnat bits
329 * for syscall args.
330 */
331 cfm = pt->cr_ifs;
332 urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
333 }
334
335 if (urbs_kargs >= urnat_addr)
336 nbits = 63;
337 else {
338 if ((urnat_addr - 63) >= urbs_kargs)
339 return;
340 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
341 }
342 mask = MASK(nbits);
343
344 /*
345 * First, figure out which bit number slot 0 in user-land maps
346 * to in the kernel rnat. Do this by figuring out how many
347 * register slots we're beyond the user's backingstore and
348 * then computing the equivalent address in kernel space.
349 */
350 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
351 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
352 shift = ia64_rse_slot_num(slot0_kaddr);
353 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
354 rnat0_kaddr = rnat1_kaddr - 64;
355
356 if (ubspstore + 63 > urnat_addr) {
357 /* some bits need to be place in pt->ar_rnat: */
358 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
359 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
360 mask &= ~umask;
361 if (!mask)
362 return;
363 }
364 /*
365 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
366 * rnat slot is ignored. so we don't have to clear it here.
367 */
368 rnat0 = (urnat << shift);
369 m = mask << shift;
370 if (rnat0_kaddr >= kbsp)
371 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
372 else if (rnat0_kaddr > krbs)
373 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
374
375 rnat1 = (urnat >> (63 - shift));
376 m = mask >> (63 - shift);
377 if (rnat1_kaddr >= kbsp)
378 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
379 else if (rnat1_kaddr > krbs)
380 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
381}
382
383static inline int
384on_kernel_rbs (unsigned long addr, unsigned long bspstore,
385 unsigned long urbs_end)
386{
387 unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
388 urbs_end);
389 return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
390}
391
392/*
393 * Read a word from the user-level backing store of task CHILD. ADDR
394 * is the user-level address to read the word from, VAL a pointer to
395 * the return value, and USER_BSP gives the end of the user-level
396 * backing store (i.e., it's the address that would be in ar.bsp after
397 * the user executed a "cover" instruction).
398 *
399 * This routine takes care of accessing the kernel register backing
400 * store for those registers that got spilled there. It also takes
401 * care of calculating the appropriate RNaT collection words.
402 */
403long
404ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
405 unsigned long user_rbs_end, unsigned long addr, long *val)
406{
407 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
408 struct pt_regs *child_regs;
409 size_t copied;
410 long ret;
411
412 urbs_end = (long *) user_rbs_end;
413 laddr = (unsigned long *) addr;
6450578f 414 child_regs = task_pt_regs(child);
1da177e4
LT
415 bspstore = (unsigned long *) child_regs->ar_bspstore;
416 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
417 if (on_kernel_rbs(addr, (unsigned long) bspstore,
418 (unsigned long) urbs_end))
419 {
420 /*
421 * Attempt to read the RBS in an area that's actually
422 * on the kernel RBS => read the corresponding bits in
423 * the kernel RBS.
424 */
425 rnat_addr = ia64_rse_rnat_addr(laddr);
426 ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
427
428 if (laddr == rnat_addr) {
429 /* return NaT collection word itself */
430 *val = ret;
431 return 0;
432 }
433
434 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
435 /*
436 * It is implementation dependent whether the
437 * data portion of a NaT value gets saved on a
438 * st8.spill or RSE spill (e.g., see EAS 2.6,
439 * 4.4.4.6 Register Spill and Fill). To get
440 * consistent behavior across all possible
441 * IA-64 implementations, we return zero in
442 * this case.
443 */
444 *val = 0;
445 return 0;
446 }
447
448 if (laddr < urbs_end) {
449 /*
450 * The desired word is on the kernel RBS and
451 * is not a NaT.
452 */
453 regnum = ia64_rse_num_regs(bspstore, laddr);
454 *val = *ia64_rse_skip_regs(krbs, regnum);
455 return 0;
456 }
457 }
f307ab6d 458 copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE);
1da177e4
LT
459 if (copied != sizeof(ret))
460 return -EIO;
461 *val = ret;
462 return 0;
463}
464
465long
466ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
467 unsigned long user_rbs_end, unsigned long addr, long val)
468{
469 unsigned long *bspstore, *krbs, regnum, *laddr;
470 unsigned long *urbs_end = (long *) user_rbs_end;
471 struct pt_regs *child_regs;
472
473 laddr = (unsigned long *) addr;
6450578f 474 child_regs = task_pt_regs(child);
1da177e4
LT
475 bspstore = (unsigned long *) child_regs->ar_bspstore;
476 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
477 if (on_kernel_rbs(addr, (unsigned long) bspstore,
478 (unsigned long) urbs_end))
479 {
480 /*
481 * Attempt to write the RBS in an area that's actually
482 * on the kernel RBS => write the corresponding bits
483 * in the kernel RBS.
484 */
485 if (ia64_rse_is_rnat_slot(laddr))
486 put_rnat(child, child_stack, krbs, laddr, val,
487 urbs_end);
488 else {
489 if (laddr < urbs_end) {
490 regnum = ia64_rse_num_regs(bspstore, laddr);
491 *ia64_rse_skip_regs(krbs, regnum) = val;
492 }
493 }
f307ab6d
LS
494 } else if (access_process_vm(child, addr, &val, sizeof(val),
495 FOLL_FORCE | FOLL_WRITE)
1da177e4
LT
496 != sizeof(val))
497 return -EIO;
498 return 0;
499}
500
501/*
502 * Calculate the address of the end of the user-level register backing
503 * store. This is the address that would have been stored in ar.bsp
504 * if the user had executed a "cover" instruction right before
505 * entering the kernel. If CFMP is not NULL, it is used to return the
506 * "current frame mask" that was active at the time the kernel was
507 * entered.
508 */
509unsigned long
510ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
511 unsigned long *cfmp)
512{
513 unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
514 long ndirty;
515
516 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
517 bspstore = (unsigned long *) pt->ar_bspstore;
518 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
519
520 if (in_syscall(pt))
521 ndirty += (cfm & 0x7f);
522 else
523 cfm &= ~(1UL << 63); /* clear valid bit */
524
525 if (cfmp)
526 *cfmp = cfm;
527 return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
528}
529
530/*
531 * Synchronize (i.e, write) the RSE backing store living in kernel
532 * space to the VM of the CHILD task. SW and PT are the pointers to
533 * the switch_stack and pt_regs structures, respectively.
534 * USER_RBS_END is the user-level address at which the backing store
535 * ends.
536 */
537long
538ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
539 unsigned long user_rbs_start, unsigned long user_rbs_end)
540{
541 unsigned long addr, val;
542 long ret;
543
544 /* now copy word for word from kernel rbs to user rbs: */
545 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
546 ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
547 if (ret < 0)
548 return ret;
f307ab6d
LS
549 if (access_process_vm(child, addr, &val, sizeof(val),
550 FOLL_FORCE | FOLL_WRITE)
1da177e4
LT
551 != sizeof(val))
552 return -EIO;
553 }
554 return 0;
555}
556
3b2ce0b1
PT
557static long
558ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
559 unsigned long user_rbs_start, unsigned long user_rbs_end)
560{
561 unsigned long addr, val;
562 long ret;
563
564 /* now copy word for word from user rbs to kernel rbs: */
565 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
f307ab6d
LS
566 if (access_process_vm(child, addr, &val, sizeof(val),
567 FOLL_FORCE)
3b2ce0b1
PT
568 != sizeof(val))
569 return -EIO;
570
571 ret = ia64_poke(child, sw, user_rbs_end, addr, val);
572 if (ret < 0)
573 return ret;
574 }
575 return 0;
576}
577
578typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
579 unsigned long, unsigned long);
580
581static void do_sync_rbs(struct unw_frame_info *info, void *arg)
582{
583 struct pt_regs *pt;
584 unsigned long urbs_end;
585 syncfunc_t fn = arg;
586
587 if (unw_unwind_to_user(info) < 0)
588 return;
589 pt = task_pt_regs(info->task);
590 urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
591
592 fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
593}
594
595/*
596 * when a thread is stopped (ptraced), debugger might change thread's user
597 * stack (change memory directly), and we must avoid the RSE stored in kernel
598 * to override user stack (user space's RSE is newer than kernel's in the
599 * case). To workaround the issue, we copy kernel RSE to user RSE before the
600 * task is stopped, so user RSE has updated data. we then copy user RSE to
601 * kernel after the task is resummed from traced stop and kernel will use the
602 * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
603 * synchronize user RSE to kernel.
604 */
605void ia64_ptrace_stop(void)
606{
607 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
608 return;
f14488cc 609 set_notify_resume(current);
3b2ce0b1
PT
610 unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
611}
612
613/*
614 * This is called to read back the register backing store.
615 */
616void ia64_sync_krbs(void)
617{
618 clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
3b2ce0b1
PT
619
620 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
621}
622
aa91a2e9
PT
623/*
624 * After PTRACE_ATTACH, a thread's register backing store area in user
625 * space is assumed to contain correct data whenever the thread is
626 * stopped. arch_ptrace_stop takes care of this on tracing stops.
627 * But if the child was already stopped for job control when we attach
628 * to it, then it might not ever get into ptrace_stop by the time we
629 * want to examine the user memory containing the RBS.
630 */
631void
632ptrace_attach_sync_user_rbs (struct task_struct *child)
633{
634 int stopped = 0;
635 struct unw_frame_info info;
636
637 /*
638 * If the child is in TASK_STOPPED, we need to change that to
639 * TASK_TRACED momentarily while we operate on it. This ensures
640 * that the child won't be woken up and return to user mode while
641 * we are doing the sync. (It can only be woken up for SIGKILL.)
642 */
643
644 read_lock(&tasklist_lock);
ffdf9185 645 if (child->sighand) {
aa91a2e9
PT
646 spin_lock_irq(&child->sighand->siglock);
647 if (child->state == TASK_STOPPED &&
648 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
f14488cc 649 set_notify_resume(child);
aa91a2e9
PT
650
651 child->state = TASK_TRACED;
652 stopped = 1;
653 }
654 spin_unlock_irq(&child->sighand->siglock);
655 }
656 read_unlock(&tasklist_lock);
657
658 if (!stopped)
659 return;
660
661 unw_init_from_blocked_task(&info, child);
662 do_sync_rbs(&info, ia64_sync_user_rbs);
663
664 /*
665 * Now move the child back into TASK_STOPPED if it should be in a
666 * job control stop, so that SIGCONT can be used to wake it up.
667 */
668 read_lock(&tasklist_lock);
ffdf9185 669 if (child->sighand) {
aa91a2e9
PT
670 spin_lock_irq(&child->sighand->siglock);
671 if (child->state == TASK_TRACED &&
672 (child->signal->flags & SIGNAL_STOP_STOPPED)) {
673 child->state = TASK_STOPPED;
674 }
675 spin_unlock_irq(&child->sighand->siglock);
676 }
677 read_unlock(&tasklist_lock);
678}
679
1da177e4
LT
680/*
681 * Write f32-f127 back to task->thread.fph if it has been modified.
682 */
683inline void
684ia64_flush_fph (struct task_struct *task)
685{
6450578f 686 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
1da177e4 687
05062d96
PC
688 /*
689 * Prevent migrating this task while
690 * we're fiddling with the FPU state
691 */
692 preempt_disable();
1da177e4
LT
693 if (ia64_is_local_fpu_owner(task) && psr->mfh) {
694 psr->mfh = 0;
695 task->thread.flags |= IA64_THREAD_FPH_VALID;
696 ia64_save_fpu(&task->thread.fph[0]);
697 }
05062d96 698 preempt_enable();
1da177e4
LT
699}
700
701/*
702 * Sync the fph state of the task so that it can be manipulated
703 * through thread.fph. If necessary, f32-f127 are written back to
704 * thread.fph or, if the fph state hasn't been used before, thread.fph
705 * is cleared to zeroes. Also, access to f32-f127 is disabled to
706 * ensure that the task picks up the state from thread.fph when it
707 * executes again.
708 */
709void
710ia64_sync_fph (struct task_struct *task)
711{
6450578f 712 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
1da177e4
LT
713
714 ia64_flush_fph(task);
715 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
716 task->thread.flags |= IA64_THREAD_FPH_VALID;
717 memset(&task->thread.fph, 0, sizeof(task->thread.fph));
718 }
719 ia64_drop_fpu(task);
720 psr->dfh = 1;
721}
722
1da177e4
LT
723/*
724 * Change the machine-state of CHILD such that it will return via the normal
725 * kernel exit-path, rather than the syscall-exit path.
726 */
727static void
728convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
729 unsigned long cfm)
730{
731 struct unw_frame_info info, prev_info;
02a017a9 732 unsigned long ip, sp, pr;
1da177e4
LT
733
734 unw_init_from_blocked_task(&info, child);
735 while (1) {
736 prev_info = info;
737 if (unw_unwind(&info) < 0)
738 return;
02a017a9
DMT
739
740 unw_get_sp(&info, &sp);
741 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
742 < IA64_PT_REGS_SIZE) {
743 dprintk("ptrace.%s: ran off the top of the kernel "
d4ed8084 744 "stack\n", __func__);
02a017a9
DMT
745 return;
746 }
747 if (unw_get_pr (&prev_info, &pr) < 0) {
748 unw_get_rp(&prev_info, &ip);
749 dprintk("ptrace.%s: failed to read "
750 "predicate register (ip=0x%lx)\n",
d4ed8084 751 __func__, ip);
1da177e4 752 return;
02a017a9
DMT
753 }
754 if (unw_is_intr_frame(&info)
755 && (pr & (1UL << PRED_USER_STACK)))
1da177e4
LT
756 break;
757 }
758
7f9eaedf
DMT
759 /*
760 * Note: at the time of this call, the target task is blocked
761 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
762 * (aka, "pLvSys") we redirect execution from
763 * .work_pending_syscall_end to .work_processed_kernel.
764 */
1da177e4 765 unw_get_pr(&prev_info, &pr);
7f9eaedf 766 pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
1da177e4
LT
767 pr |= (1UL << PRED_NON_SYSCALL);
768 unw_set_pr(&prev_info, pr);
769
770 pt->cr_ifs = (1UL << 63) | cfm;
7f9eaedf
DMT
771 /*
772 * Clear the memory that is NOT written on syscall-entry to
773 * ensure we do not leak kernel-state to user when execution
774 * resumes.
775 */
776 pt->r2 = 0;
777 pt->r3 = 0;
778 pt->r14 = 0;
779 memset(&pt->r16, 0, 16*8); /* clear r16-r31 */
780 memset(&pt->f6, 0, 6*16); /* clear f6-f11 */
781 pt->b7 = 0;
782 pt->ar_ccv = 0;
783 pt->ar_csd = 0;
784 pt->ar_ssd = 0;
1da177e4
LT
785}
786
787static int
788access_nat_bits (struct task_struct *child, struct pt_regs *pt,
789 struct unw_frame_info *info,
790 unsigned long *data, int write_access)
791{
792 unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
793 char nat = 0;
794
795 if (write_access) {
796 nat_bits = *data;
797 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
798 if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
799 dprintk("ptrace: failed to set ar.unat\n");
800 return -1;
801 }
802 for (regnum = 4; regnum <= 7; ++regnum) {
803 unw_get_gr(info, regnum, &dummy, &nat);
804 unw_set_gr(info, regnum, dummy,
805 (nat_bits >> regnum) & 1);
806 }
807 } else {
808 if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
809 dprintk("ptrace: failed to read ar.unat\n");
810 return -1;
811 }
812 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
813 for (regnum = 4; regnum <= 7; ++regnum) {
814 unw_get_gr(info, regnum, &dummy, &nat);
815 nat_bits |= (nat != 0) << regnum;
816 }
817 *data = nat_bits;
818 }
819 return 0;
820}
821
822static int
823access_uarea (struct task_struct *child, unsigned long addr,
4cd8dc83 824 unsigned long *data, int write_access);
1da177e4
LT
825
826static long
827ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
828{
829 unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
830 struct unw_frame_info info;
831 struct ia64_fpreg fpval;
832 struct switch_stack *sw;
833 struct pt_regs *pt;
834 long ret, retval = 0;
835 char nat = 0;
836 int i;
837
96d4f267 838 if (!access_ok(ppr, sizeof(struct pt_all_user_regs)))
1da177e4
LT
839 return -EIO;
840
6450578f 841 pt = task_pt_regs(child);
1da177e4
LT
842 sw = (struct switch_stack *) (child->thread.ksp + 16);
843 unw_init_from_blocked_task(&info, child);
844 if (unw_unwind_to_user(&info) < 0) {
845 return -EIO;
846 }
847
848 if (((unsigned long) ppr & 0x7) != 0) {
849 dprintk("ptrace:unaligned register address %p\n", ppr);
850 return -EIO;
851 }
852
853 if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
854 || access_uarea(child, PT_AR_EC, &ec, 0) < 0
855 || access_uarea(child, PT_AR_LC, &lc, 0) < 0
856 || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
857 || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
858 || access_uarea(child, PT_CFM, &cfm, 0)
859 || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
860 return -EIO;
861
862 /* control regs */
863
864 retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
865 retval |= __put_user(psr, &ppr->cr_ipsr);
866
867 /* app regs */
868
869 retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
870 retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
871 retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
872 retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
873 retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
874 retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
875
876 retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
877 retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
878 retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
879 retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
880 retval |= __put_user(cfm, &ppr->cfm);
881
882 /* gr1-gr3 */
883
884 retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
885 retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
886
887 /* gr4-gr7 */
888
889 for (i = 4; i < 8; i++) {
890 if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
891 return -EIO;
892 retval |= __put_user(val, &ppr->gr[i]);
893 }
894
895 /* gr8-gr11 */
896
897 retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
898
899 /* gr12-gr15 */
900
901 retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
902 retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
903 retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
904
905 /* gr16-gr31 */
906
907 retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
908
909 /* b0 */
910
911 retval |= __put_user(pt->b0, &ppr->br[0]);
912
913 /* b1-b5 */
914
915 for (i = 1; i < 6; i++) {
916 if (unw_access_br(&info, i, &val, 0) < 0)
917 return -EIO;
918 __put_user(val, &ppr->br[i]);
919 }
920
921 /* b6-b7 */
922
923 retval |= __put_user(pt->b6, &ppr->br[6]);
924 retval |= __put_user(pt->b7, &ppr->br[7]);
925
926 /* fr2-fr5 */
927
928 for (i = 2; i < 6; i++) {
929 if (unw_get_fr(&info, i, &fpval) < 0)
930 return -EIO;
931 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
932 }
933
934 /* fr6-fr11 */
935
936 retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
937 sizeof(struct ia64_fpreg) * 6);
938
939 /* fp scratch regs(12-15) */
940
941 retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
942 sizeof(struct ia64_fpreg) * 4);
943
944 /* fr16-fr31 */
945
946 for (i = 16; i < 32; i++) {
947 if (unw_get_fr(&info, i, &fpval) < 0)
948 return -EIO;
949 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
950 }
951
952 /* fph */
953
954 ia64_flush_fph(child);
955 retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
956 sizeof(ppr->fr[32]) * 96);
957
958 /* preds */
959
960 retval |= __put_user(pt->pr, &ppr->pr);
961
962 /* nat bits */
963
964 retval |= __put_user(nat_bits, &ppr->nat);
965
966 ret = retval ? -EIO : 0;
967 return ret;
968}
969
970static long
971ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
972{
4ea78729 973 unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
1da177e4
LT
974 struct unw_frame_info info;
975 struct switch_stack *sw;
976 struct ia64_fpreg fpval;
977 struct pt_regs *pt;
978 long ret, retval = 0;
979 int i;
980
981 memset(&fpval, 0, sizeof(fpval));
982
96d4f267 983 if (!access_ok(ppr, sizeof(struct pt_all_user_regs)))
1da177e4
LT
984 return -EIO;
985
6450578f 986 pt = task_pt_regs(child);
1da177e4
LT
987 sw = (struct switch_stack *) (child->thread.ksp + 16);
988 unw_init_from_blocked_task(&info, child);
989 if (unw_unwind_to_user(&info) < 0) {
990 return -EIO;
991 }
992
993 if (((unsigned long) ppr & 0x7) != 0) {
994 dprintk("ptrace:unaligned register address %p\n", ppr);
995 return -EIO;
996 }
997
998 /* control regs */
999
1000 retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
1001 retval |= __get_user(psr, &ppr->cr_ipsr);
1002
1003 /* app regs */
1004
1005 retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
4ea78729 1006 retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1da177e4
LT
1007 retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1008 retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1009 retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1010 retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1011
1012 retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1013 retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1014 retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1015 retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1016 retval |= __get_user(cfm, &ppr->cfm);
1017
1018 /* gr1-gr3 */
1019
1020 retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1021 retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1022
1023 /* gr4-gr7 */
1024
1025 for (i = 4; i < 8; i++) {
1026 retval |= __get_user(val, &ppr->gr[i]);
1027 /* NaT bit will be set via PT_NAT_BITS: */
1028 if (unw_set_gr(&info, i, val, 0) < 0)
1029 return -EIO;
1030 }
1031
1032 /* gr8-gr11 */
1033
1034 retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1035
1036 /* gr12-gr15 */
1037
1038 retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1039 retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1040 retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1041
1042 /* gr16-gr31 */
1043
1044 retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1045
1046 /* b0 */
1047
1048 retval |= __get_user(pt->b0, &ppr->br[0]);
1049
1050 /* b1-b5 */
1051
1052 for (i = 1; i < 6; i++) {
1053 retval |= __get_user(val, &ppr->br[i]);
1054 unw_set_br(&info, i, val);
1055 }
1056
1057 /* b6-b7 */
1058
1059 retval |= __get_user(pt->b6, &ppr->br[6]);
1060 retval |= __get_user(pt->b7, &ppr->br[7]);
1061
1062 /* fr2-fr5 */
1063
1064 for (i = 2; i < 6; i++) {
1065 retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1066 if (unw_set_fr(&info, i, fpval) < 0)
1067 return -EIO;
1068 }
1069
1070 /* fr6-fr11 */
1071
1072 retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1073 sizeof(ppr->fr[6]) * 6);
1074
1075 /* fp scratch regs(12-15) */
1076
1077 retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1078 sizeof(ppr->fr[12]) * 4);
1079
1080 /* fr16-fr31 */
1081
1082 for (i = 16; i < 32; i++) {
1083 retval |= __copy_from_user(&fpval, &ppr->fr[i],
1084 sizeof(fpval));
1085 if (unw_set_fr(&info, i, fpval) < 0)
1086 return -EIO;
1087 }
1088
1089 /* fph */
1090
1091 ia64_sync_fph(child);
1092 retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1093 sizeof(ppr->fr[32]) * 96);
1094
1095 /* preds */
1096
1097 retval |= __get_user(pt->pr, &ppr->pr);
1098
1099 /* nat bits */
1100
1101 retval |= __get_user(nat_bits, &ppr->nat);
1102
1103 retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
4ea78729 1104 retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
1da177e4
LT
1105 retval |= access_uarea(child, PT_AR_EC, &ec, 1);
1106 retval |= access_uarea(child, PT_AR_LC, &lc, 1);
1107 retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
1108 retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
1109 retval |= access_uarea(child, PT_CFM, &cfm, 1);
1110 retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
1111
1112 ret = retval ? -EIO : 0;
1113 return ret;
1114}
1115
8db3f525
PT
1116void
1117user_enable_single_step (struct task_struct *child)
1118{
1119 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1120
1121 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1122 child_psr->ss = 1;
1123}
1124
1125void
1126user_enable_block_step (struct task_struct *child)
1127{
1128 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1129
1130 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1131 child_psr->tb = 1;
1132}
1133
1134void
1135user_disable_single_step (struct task_struct *child)
1136{
1137 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1138
1139 /* make sure the single step/taken-branch trap bits are not set: */
1140 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1141 child_psr->ss = 0;
1142 child_psr->tb = 0;
1143}
1144
1da177e4
LT
1145/*
1146 * Called by kernel/ptrace.c when detaching..
1147 *
1148 * Make sure the single step bit is not set.
1149 */
1150void
1151ptrace_disable (struct task_struct *child)
1152{
aa17f6f9 1153 user_disable_single_step(child);
1da177e4
LT
1154}
1155
eac738e6 1156long
9b05a69e
NK
1157arch_ptrace (struct task_struct *child, long request,
1158 unsigned long addr, unsigned long data)
1da177e4 1159{
1da177e4 1160 switch (request) {
aa17f6f9
PT
1161 case PTRACE_PEEKTEXT:
1162 case PTRACE_PEEKDATA:
1da177e4 1163 /* read word at location addr */
84d77d3f 1164 if (ptrace_access_vm(child, addr, &data, sizeof(data),
f307ab6d 1165 FOLL_FORCE)
aa17f6f9
PT
1166 != sizeof(data))
1167 return -EIO;
1168 /* ensure return value is not mistaken for error code */
972559a0 1169 force_successful_syscall_return();
aa17f6f9 1170 return data;
1da177e4 1171
972559a0
PT
1172 /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
1173 * by the generic ptrace_request().
1174 */
1da177e4 1175
aa17f6f9 1176 case PTRACE_PEEKUSR:
1da177e4 1177 /* read the word at addr in the USER area */
aa17f6f9
PT
1178 if (access_uarea(child, addr, &data, 0) < 0)
1179 return -EIO;
1180 /* ensure return value is not mistaken for error code */
1da177e4 1181 force_successful_syscall_return();
aa17f6f9 1182 return data;
1da177e4 1183
aa17f6f9 1184 case PTRACE_POKEUSR:
1da177e4 1185 /* write the word at addr in the USER area */
aa17f6f9
PT
1186 if (access_uarea(child, addr, &data, 1) < 0)
1187 return -EIO;
1188 return 0;
1da177e4 1189
aa17f6f9 1190 case PTRACE_OLD_GETSIGINFO:
1da177e4 1191 /* for backwards-compatibility */
aa17f6f9 1192 return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1da177e4 1193
aa17f6f9 1194 case PTRACE_OLD_SETSIGINFO:
1da177e4 1195 /* for backwards-compatibility */
aa17f6f9
PT
1196 return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1197
1198 case PTRACE_GETREGS:
1199 return ptrace_getregs(child,
1200 (struct pt_all_user_regs __user *) data);
1201
1202 case PTRACE_SETREGS:
1203 return ptrace_setregs(child,
1204 (struct pt_all_user_regs __user *) data);
1205
1206 default:
1207 return ptrace_request(child, request, addr, data);
1da177e4 1208 }
1da177e4
LT
1209}
1210
1211
1da177e4
LT
1212/* "asmlinkage" so the input arguments are preserved... */
1213
f14488cc 1214asmlinkage long
1da177e4
LT
1215syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1216 long arg4, long arg5, long arg6, long arg7,
1217 struct pt_regs regs)
1218{
f14488cc
SL
1219 if (test_thread_flag(TIF_SYSCALL_TRACE))
1220 if (tracehook_report_syscall_entry(&regs))
1221 return -ENOSYS;
1da177e4 1222
3b2ce0b1
PT
1223 /* copy user rbs to kernel rbs */
1224 if (test_thread_flag(TIF_RESTORE_RSE))
1225 ia64_sync_krbs();
1226
2fd6f58b 1227
91397401 1228 audit_syscall_entry(regs.r15, arg0, arg1, arg2, arg3);
1da177e4 1229
f14488cc 1230 return 0;
1da177e4
LT
1231}
1232
1233/* "asmlinkage" so the input arguments are preserved... */
1234
1235asmlinkage void
1236syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1237 long arg4, long arg5, long arg6, long arg7,
1238 struct pt_regs regs)
1239{
f14488cc
SL
1240 int step;
1241
d7e7528b 1242 audit_syscall_exit(&regs);
1da177e4 1243
f14488cc
SL
1244 step = test_thread_flag(TIF_SINGLESTEP);
1245 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1246 tracehook_report_syscall_exit(&regs, step);
3b2ce0b1
PT
1247
1248 /* copy user rbs to kernel rbs */
1249 if (test_thread_flag(TIF_RESTORE_RSE))
1250 ia64_sync_krbs();
1da177e4 1251}
c70f8f68
SL
1252
1253/* Utrace implementation starts here */
1254struct regset_get {
1255 void *kbuf;
1256 void __user *ubuf;
1257};
1258
1259struct regset_set {
1260 const void *kbuf;
1261 const void __user *ubuf;
1262};
1263
1264struct regset_getset {
1265 struct task_struct *target;
1266 const struct user_regset *regset;
1267 union {
1268 struct regset_get get;
1269 struct regset_set set;
1270 } u;
1271 unsigned int pos;
1272 unsigned int count;
1273 int ret;
1274};
1275
e2115cf3 1276static const ptrdiff_t pt_offsets[32] =
4c35bf3a
AV
1277{
1278#define R(n) offsetof(struct pt_regs, r##n)
1279 [0] = -1, R(1), R(2), R(3),
1280 [4] = -1, [5] = -1, [6] = -1, [7] = -1,
1281 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
e2115cf3
AV
1282 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1283 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
4c35bf3a
AV
1284#undef R
1285};
1286
c70f8f68
SL
1287static int
1288access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
1289 unsigned long addr, unsigned long *data, int write_access)
1290{
4c35bf3a
AV
1291 struct pt_regs *pt = task_pt_regs(target);
1292 unsigned reg = addr / sizeof(unsigned long);
1293 ptrdiff_t d = pt_offsets[reg];
1294
1295 if (d >= 0) {
1296 unsigned long *ptr = (void *)pt + d;
1297 if (write_access)
1298 *ptr = *data;
1299 else
1300 *data = *ptr;
1301 return 0;
1302 } else {
1303 char nat = 0;
c70f8f68
SL
1304 if (write_access) {
1305 /* read NaT bit first: */
1306 unsigned long dummy;
4c35bf3a 1307 int ret = unw_get_gr(info, reg, &dummy, &nat);
c70f8f68
SL
1308 if (ret < 0)
1309 return ret;
1310 }
4c35bf3a 1311 return unw_access_gr(info, reg, data, &nat, write_access);
c70f8f68 1312 }
c70f8f68
SL
1313}
1314
1315static int
1316access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
1317 unsigned long addr, unsigned long *data, int write_access)
1318{
1319 struct pt_regs *pt;
1320 unsigned long *ptr = NULL;
1321
1322 pt = task_pt_regs(target);
1323 switch (addr) {
1324 case ELF_BR_OFFSET(0):
1325 ptr = &pt->b0;
1326 break;
1327 case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1328 return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
1329 data, write_access);
1330 case ELF_BR_OFFSET(6):
1331 ptr = &pt->b6;
1332 break;
1333 case ELF_BR_OFFSET(7):
1334 ptr = &pt->b7;
1335 }
1336 if (write_access)
1337 *ptr = *data;
1338 else
1339 *data = *ptr;
1340 return 0;
1341}
1342
1343static int
1344access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
1345 unsigned long addr, unsigned long *data, int write_access)
1346{
1347 struct pt_regs *pt;
1348 unsigned long cfm, urbs_end;
1349 unsigned long *ptr = NULL;
1350
1351 pt = task_pt_regs(target);
1352 if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
1353 switch (addr) {
1354 case ELF_AR_RSC_OFFSET:
1355 /* force PL3 */
1356 if (write_access)
1357 pt->ar_rsc = *data | (3 << 2);
1358 else
1359 *data = pt->ar_rsc;
1360 return 0;
1361 case ELF_AR_BSP_OFFSET:
1362 /*
1363 * By convention, we use PT_AR_BSP to refer to
1364 * the end of the user-level backing store.
1365 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
1366 * to get the real value of ar.bsp at the time
1367 * the kernel was entered.
1368 *
1369 * Furthermore, when changing the contents of
1370 * PT_AR_BSP (or PT_CFM) while the task is
1371 * blocked in a system call, convert the state
1372 * so that the non-system-call exit
1373 * path is used. This ensures that the proper
1374 * state will be picked up when resuming
1375 * execution. However, it *also* means that
1376 * once we write PT_AR_BSP/PT_CFM, it won't be
1377 * possible to modify the syscall arguments of
1378 * the pending system call any longer. This
1379 * shouldn't be an issue because modifying
1380 * PT_AR_BSP/PT_CFM generally implies that
1381 * we're either abandoning the pending system
1382 * call or that we defer it's re-execution
1383 * (e.g., due to GDB doing an inferior
1384 * function call).
1385 */
1386 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1387 if (write_access) {
1388 if (*data != urbs_end) {
1389 if (in_syscall(pt))
1390 convert_to_non_syscall(target,
1391 pt,
1392 cfm);
1393 /*
1394 * Simulate user-level write
1395 * of ar.bsp:
1396 */
1397 pt->loadrs = 0;
1398 pt->ar_bspstore = *data;
1399 }
1400 } else
1401 *data = urbs_end;
1402 return 0;
1403 case ELF_AR_BSPSTORE_OFFSET:
1404 ptr = &pt->ar_bspstore;
1405 break;
1406 case ELF_AR_RNAT_OFFSET:
1407 ptr = &pt->ar_rnat;
1408 break;
1409 case ELF_AR_CCV_OFFSET:
1410 ptr = &pt->ar_ccv;
1411 break;
1412 case ELF_AR_UNAT_OFFSET:
1413 ptr = &pt->ar_unat;
1414 break;
1415 case ELF_AR_FPSR_OFFSET:
1416 ptr = &pt->ar_fpsr;
1417 break;
1418 case ELF_AR_PFS_OFFSET:
1419 ptr = &pt->ar_pfs;
1420 break;
1421 case ELF_AR_LC_OFFSET:
1422 return unw_access_ar(info, UNW_AR_LC, data,
1423 write_access);
1424 case ELF_AR_EC_OFFSET:
1425 return unw_access_ar(info, UNW_AR_EC, data,
1426 write_access);
1427 case ELF_AR_CSD_OFFSET:
1428 ptr = &pt->ar_csd;
1429 break;
1430 case ELF_AR_SSD_OFFSET:
1431 ptr = &pt->ar_ssd;
1432 }
1433 } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
1434 switch (addr) {
1435 case ELF_CR_IIP_OFFSET:
1436 ptr = &pt->cr_iip;
1437 break;
1438 case ELF_CFM_OFFSET:
1439 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1440 if (write_access) {
1441 if (((cfm ^ *data) & PFM_MASK) != 0) {
1442 if (in_syscall(pt))
1443 convert_to_non_syscall(target,
1444 pt,
1445 cfm);
1446 pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1447 | (*data & PFM_MASK));
1448 }
1449 } else
1450 *data = cfm;
1451 return 0;
1452 case ELF_CR_IPSR_OFFSET:
1453 if (write_access) {
1454 unsigned long tmp = *data;
1455 /* psr.ri==3 is a reserved value: SDM 2:25 */
1456 if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1457 tmp &= ~IA64_PSR_RI;
1458 pt->cr_ipsr = ((tmp & IPSR_MASK)
1459 | (pt->cr_ipsr & ~IPSR_MASK));
1460 } else
1461 *data = (pt->cr_ipsr & IPSR_MASK);
1462 return 0;
1463 }
1464 } else if (addr == ELF_NAT_OFFSET)
1465 return access_nat_bits(target, pt, info,
1466 data, write_access);
1467 else if (addr == ELF_PR_OFFSET)
1468 ptr = &pt->pr;
1469 else
1470 return -1;
1471
1472 if (write_access)
1473 *ptr = *data;
1474 else
1475 *data = *ptr;
1476
1477 return 0;
1478}
1479
1480static int
1481access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1482 unsigned long addr, unsigned long *data, int write_access)
1483{
e2115cf3 1484 if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(31))
c70f8f68
SL
1485 return access_elf_gpreg(target, info, addr, data, write_access);
1486 else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1487 return access_elf_breg(target, info, addr, data, write_access);
1488 else
1489 return access_elf_areg(target, info, addr, data, write_access);
1490}
1491
1492void do_gpregs_get(struct unw_frame_info *info, void *arg)
1493{
c70f8f68 1494 struct regset_getset *dst = arg;
c70f8f68
SL
1495
1496 if (unw_unwind_to_user(info) < 0)
1497 return;
1498
1499 /*
1500 * coredump format:
1501 * r0-r31
1502 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
1503 * predicate registers (p0-p63)
1504 * b0-b7
1505 * ip cfm user-mask
1506 * ar.rsc ar.bsp ar.bspstore ar.rnat
1507 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
1508 */
1509
1510
1511 /* Skip r0 */
1512 if (dst->count > 0 && dst->pos < ELF_GR_OFFSET(1)) {
1513 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1514 &dst->u.get.kbuf,
1515 &dst->u.get.ubuf,
1516 0, ELF_GR_OFFSET(1));
a79ca8e7 1517 if (dst->ret)
c70f8f68
SL
1518 return;
1519 }
1520
a79ca8e7
AV
1521 while (dst->count && dst->pos < ELF_AR_END_OFFSET) {
1522 unsigned int n, from, to;
1523 elf_greg_t tmp[16];
1524
1525 from = dst->pos;
1526 to = from + min(dst->count, (unsigned)sizeof(tmp));
1527 if (to > ELF_AR_END_OFFSET)
1528 to = ELF_AR_END_OFFSET;
1529 for (n = 0; from < to; from += sizeof(elf_greg_t), n++) {
1530 if (access_elf_reg(dst->target, info, from,
1531 &tmp[n], 0) < 0) {
c70f8f68
SL
1532 dst->ret = -EIO;
1533 return;
1534 }
a79ca8e7 1535 }
c70f8f68
SL
1536 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1537 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
a79ca8e7
AV
1538 dst->pos, to);
1539 if (dst->ret)
c70f8f68
SL
1540 return;
1541 }
c70f8f68
SL
1542}
1543
1544void do_gpregs_set(struct unw_frame_info *info, void *arg)
1545{
c70f8f68 1546 struct regset_getset *dst = arg;
c70f8f68
SL
1547
1548 if (unw_unwind_to_user(info) < 0)
1549 return;
1550
a79ca8e7
AV
1551 if (!dst->count)
1552 return;
c70f8f68 1553 /* Skip r0 */
a79ca8e7 1554 if (dst->pos < ELF_GR_OFFSET(1)) {
c70f8f68
SL
1555 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1556 &dst->u.set.kbuf,
1557 &dst->u.set.ubuf,
1558 0, ELF_GR_OFFSET(1));
c70f8f68
SL
1559 if (dst->ret)
1560 return;
c70f8f68
SL
1561 }
1562
a79ca8e7
AV
1563 while (dst->count && dst->pos < ELF_AR_END_OFFSET) {
1564 unsigned int n, from, to;
1565 elf_greg_t tmp[16];
c70f8f68 1566
a79ca8e7
AV
1567 from = dst->pos;
1568 to = from + sizeof(tmp);
1569 if (to > ELF_AR_END_OFFSET)
1570 to = ELF_AR_END_OFFSET;
1571 /* get up to 16 values */
c70f8f68
SL
1572 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1573 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
a79ca8e7 1574 from, to);
c70f8f68
SL
1575 if (dst->ret)
1576 return;
a79ca8e7
AV
1577 /* now copy them into registers */
1578 for (n = 0; from < dst->pos; from += sizeof(elf_greg_t), n++)
1579 if (access_elf_reg(dst->target, info, from,
1580 &tmp[n], 1) < 0) {
c70f8f68
SL
1581 dst->ret = -EIO;
1582 return;
1583 }
1584 }
1585}
1586
1587#define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
1588
1589void do_fpregs_get(struct unw_frame_info *info, void *arg)
1590{
1591 struct regset_getset *dst = arg;
1592 struct task_struct *task = dst->target;
1593 elf_fpreg_t tmp[30];
1594 int index, min_copy, i;
1595
1596 if (unw_unwind_to_user(info) < 0)
1597 return;
1598
1599 /* Skip pos 0 and 1 */
1600 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1601 dst->ret = user_regset_copyout_zero(&dst->pos, &dst->count,
1602 &dst->u.get.kbuf,
1603 &dst->u.get.ubuf,
1604 0, ELF_FP_OFFSET(2));
1605 if (dst->count == 0 || dst->ret)
1606 return;
1607 }
1608
1609 /* fr2-fr31 */
1610 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1611 index = (dst->pos - ELF_FP_OFFSET(2)) / sizeof(elf_fpreg_t);
1612
1613 min_copy = min(((unsigned int)ELF_FP_OFFSET(32)),
1614 dst->pos + dst->count);
1615 for (i = dst->pos; i < min_copy; i += sizeof(elf_fpreg_t),
1616 index++)
1617 if (unw_get_fr(info, i / sizeof(elf_fpreg_t),
1618 &tmp[index])) {
1619 dst->ret = -EIO;
1620 return;
1621 }
1622 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
1623 &dst->u.get.kbuf, &dst->u.get.ubuf, tmp,
1624 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1625 if (dst->count == 0 || dst->ret)
1626 return;
1627 }
1628
1629 /* fph */
1630 if (dst->count > 0) {
1631 ia64_flush_fph(dst->target);
1632 if (task->thread.flags & IA64_THREAD_FPH_VALID)
1633 dst->ret = user_regset_copyout(
1634 &dst->pos, &dst->count,
1635 &dst->u.get.kbuf, &dst->u.get.ubuf,
1636 &dst->target->thread.fph,
1637 ELF_FP_OFFSET(32), -1);
1638 else
1639 /* Zero fill instead. */
1640 dst->ret = user_regset_copyout_zero(
1641 &dst->pos, &dst->count,
1642 &dst->u.get.kbuf, &dst->u.get.ubuf,
1643 ELF_FP_OFFSET(32), -1);
1644 }
1645}
1646
1647void do_fpregs_set(struct unw_frame_info *info, void *arg)
1648{
1649 struct regset_getset *dst = arg;
1650 elf_fpreg_t fpreg, tmp[30];
1651 int index, start, end;
1652
1653 if (unw_unwind_to_user(info) < 0)
1654 return;
1655
1656 /* Skip pos 0 and 1 */
1657 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1658 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1659 &dst->u.set.kbuf,
1660 &dst->u.set.ubuf,
1661 0, ELF_FP_OFFSET(2));
1662 if (dst->count == 0 || dst->ret)
1663 return;
1664 }
1665
1666 /* fr2-fr31 */
1667 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1668 start = dst->pos;
1669 end = min(((unsigned int)ELF_FP_OFFSET(32)),
1670 dst->pos + dst->count);
1671 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1672 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1673 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1674 if (dst->ret)
1675 return;
1676
1677 if (start & 0xF) { /* only write high part */
1678 if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
1679 &fpreg)) {
1680 dst->ret = -EIO;
1681 return;
1682 }
1683 tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
1684 = fpreg.u.bits[0];
1685 start &= ~0xFUL;
1686 }
1687 if (end & 0xF) { /* only write low part */
1688 if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
1689 &fpreg)) {
1690 dst->ret = -EIO;
1691 return;
1692 }
1693 tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
1694 = fpreg.u.bits[1];
1695 end = (end + 0xF) & ~0xFUL;
1696 }
1697
1698 for ( ; start < end ; start += sizeof(elf_fpreg_t)) {
1699 index = start / sizeof(elf_fpreg_t);
1700 if (unw_set_fr(info, index, tmp[index - 2])) {
1701 dst->ret = -EIO;
1702 return;
1703 }
1704 }
1705 if (dst->ret || dst->count == 0)
1706 return;
1707 }
1708
1709 /* fph */
1710 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
1711 ia64_sync_fph(dst->target);
1712 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1713 &dst->u.set.kbuf,
1714 &dst->u.set.ubuf,
1715 &dst->target->thread.fph,
1716 ELF_FP_OFFSET(32), -1);
1717 }
1718}
1719
1720static int
1721do_regset_call(void (*call)(struct unw_frame_info *, void *),
1722 struct task_struct *target,
1723 const struct user_regset *regset,
1724 unsigned int pos, unsigned int count,
1725 const void *kbuf, const void __user *ubuf)
1726{
1727 struct regset_getset info = { .target = target, .regset = regset,
1728 .pos = pos, .count = count,
1729 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
1730 .ret = 0 };
1731
1732 if (target == current)
1733 unw_init_running(call, &info);
1734 else {
1735 struct unw_frame_info ufi;
1736 memset(&ufi, 0, sizeof(ufi));
1737 unw_init_from_blocked_task(&ufi, target);
1738 (*call)(&ufi, &info);
1739 }
1740
1741 return info.ret;
1742}
1743
1744static int
1745gpregs_get(struct task_struct *target,
1746 const struct user_regset *regset,
1747 unsigned int pos, unsigned int count,
1748 void *kbuf, void __user *ubuf)
1749{
1750 return do_regset_call(do_gpregs_get, target, regset, pos, count,
1751 kbuf, ubuf);
1752}
1753
1754static int gpregs_set(struct task_struct *target,
1755 const struct user_regset *regset,
1756 unsigned int pos, unsigned int count,
1757 const void *kbuf, const void __user *ubuf)
1758{
1759 return do_regset_call(do_gpregs_set, target, regset, pos, count,
1760 kbuf, ubuf);
1761}
1762
1763static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
1764{
1765 do_sync_rbs(info, ia64_sync_user_rbs);
1766}
1767
1768/*
1769 * This is called to write back the register backing store.
1770 * ptrace does this before it stops, so that a tracer reading the user
1771 * memory after the thread stops will get the current register data.
1772 */
1773static int
1774gpregs_writeback(struct task_struct *target,
1775 const struct user_regset *regset,
1776 int now)
1777{
1778 if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1779 return 0;
f14488cc 1780 set_notify_resume(target);
c70f8f68
SL
1781 return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1782 NULL, NULL);
1783}
1784
1785static int
1786fpregs_active(struct task_struct *target, const struct user_regset *regset)
1787{
1788 return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
1789}
1790
1791static int fpregs_get(struct task_struct *target,
1792 const struct user_regset *regset,
1793 unsigned int pos, unsigned int count,
1794 void *kbuf, void __user *ubuf)
1795{
1796 return do_regset_call(do_fpregs_get, target, regset, pos, count,
1797 kbuf, ubuf);
1798}
1799
1800static int fpregs_set(struct task_struct *target,
1801 const struct user_regset *regset,
1802 unsigned int pos, unsigned int count,
1803 const void *kbuf, const void __user *ubuf)
1804{
1805 return do_regset_call(do_fpregs_set, target, regset, pos, count,
1806 kbuf, ubuf);
1807}
1808
4cd8dc83
SL
1809static int
1810access_uarea(struct task_struct *child, unsigned long addr,
1811 unsigned long *data, int write_access)
1812{
1813 unsigned int pos = -1; /* an invalid value */
4cd8dc83
SL
1814 unsigned long *ptr, regnum;
1815
1816 if ((addr & 0x7) != 0) {
1817 dprintk("ptrace: unaligned register address 0x%lx\n", addr);
1818 return -1;
1819 }
1820 if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
1821 (addr >= PT_R7 + 8 && addr < PT_B1) ||
1822 (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
1823 (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
1824 dprintk("ptrace: rejecting access to register "
1825 "address 0x%lx\n", addr);
1826 return -1;
1827 }
1828
1829 switch (addr) {
1830 case PT_F32 ... (PT_F127 + 15):
1831 pos = addr - PT_F32 + ELF_FP_OFFSET(32);
1832 break;
1833 case PT_F2 ... (PT_F5 + 15):
1834 pos = addr - PT_F2 + ELF_FP_OFFSET(2);
1835 break;
1836 case PT_F10 ... (PT_F31 + 15):
1837 pos = addr - PT_F10 + ELF_FP_OFFSET(10);
1838 break;
1839 case PT_F6 ... (PT_F9 + 15):
1840 pos = addr - PT_F6 + ELF_FP_OFFSET(6);
1841 break;
1842 }
1843
1844 if (pos != -1) {
e3fdfa37
AV
1845 unsigned reg = pos / sizeof(elf_fpreg_t);
1846 int which_half = (pos / sizeof(unsigned long)) & 1;
1847
1848 if (reg < 32) { /* fr2-fr31 */
1849 struct unw_frame_info info;
1850 elf_fpreg_t fpreg;
1851
1852 memset(&info, 0, sizeof(info));
1853 unw_init_from_blocked_task(&info, child);
1854 if (unw_unwind_to_user(&info) < 0)
1855 return 0;
1856
1857 if (unw_get_fr(&info, reg, &fpreg))
1858 return -1;
1859 if (write_access) {
1860 fpreg.u.bits[which_half] = *data;
1861 if (unw_set_fr(&info, reg, fpreg))
1862 return -1;
1863 } else {
1864 *data = fpreg.u.bits[which_half];
1865 }
1866 } else { /* fph */
1867 elf_fpreg_t *p = &child->thread.fph[reg - 32];
1868 unsigned long *bits = &p->u.bits[which_half];
1869
1870 ia64_sync_fph(child);
1871 if (write_access)
1872 *bits = *data;
1873 else if (child->thread.flags & IA64_THREAD_FPH_VALID)
1874 *data = *bits;
1875 else
1876 *data = 0;
1877 }
4cd8dc83
SL
1878 return 0;
1879 }
1880
1881 switch (addr) {
1882 case PT_NAT_BITS:
1883 pos = ELF_NAT_OFFSET;
1884 break;
1885 case PT_R4 ... PT_R7:
1886 pos = addr - PT_R4 + ELF_GR_OFFSET(4);
1887 break;
1888 case PT_B1 ... PT_B5:
1889 pos = addr - PT_B1 + ELF_BR_OFFSET(1);
1890 break;
1891 case PT_AR_EC:
1892 pos = ELF_AR_EC_OFFSET;
1893 break;
1894 case PT_AR_LC:
1895 pos = ELF_AR_LC_OFFSET;
1896 break;
1897 case PT_CR_IPSR:
1898 pos = ELF_CR_IPSR_OFFSET;
1899 break;
1900 case PT_CR_IIP:
1901 pos = ELF_CR_IIP_OFFSET;
1902 break;
1903 case PT_CFM:
1904 pos = ELF_CFM_OFFSET;
1905 break;
1906 case PT_AR_UNAT:
1907 pos = ELF_AR_UNAT_OFFSET;
1908 break;
1909 case PT_AR_PFS:
1910 pos = ELF_AR_PFS_OFFSET;
1911 break;
1912 case PT_AR_RSC:
1913 pos = ELF_AR_RSC_OFFSET;
1914 break;
1915 case PT_AR_RNAT:
1916 pos = ELF_AR_RNAT_OFFSET;
1917 break;
1918 case PT_AR_BSPSTORE:
1919 pos = ELF_AR_BSPSTORE_OFFSET;
1920 break;
1921 case PT_PR:
1922 pos = ELF_PR_OFFSET;
1923 break;
1924 case PT_B6:
1925 pos = ELF_BR_OFFSET(6);
1926 break;
1927 case PT_AR_BSP:
1928 pos = ELF_AR_BSP_OFFSET;
1929 break;
1930 case PT_R1 ... PT_R3:
1931 pos = addr - PT_R1 + ELF_GR_OFFSET(1);
1932 break;
1933 case PT_R12 ... PT_R15:
1934 pos = addr - PT_R12 + ELF_GR_OFFSET(12);
1935 break;
1936 case PT_R8 ... PT_R11:
1937 pos = addr - PT_R8 + ELF_GR_OFFSET(8);
1938 break;
1939 case PT_R16 ... PT_R31:
1940 pos = addr - PT_R16 + ELF_GR_OFFSET(16);
1941 break;
1942 case PT_AR_CCV:
1943 pos = ELF_AR_CCV_OFFSET;
1944 break;
1945 case PT_AR_FPSR:
1946 pos = ELF_AR_FPSR_OFFSET;
1947 break;
1948 case PT_B0:
1949 pos = ELF_BR_OFFSET(0);
1950 break;
1951 case PT_B7:
1952 pos = ELF_BR_OFFSET(7);
1953 break;
1954 case PT_AR_CSD:
1955 pos = ELF_AR_CSD_OFFSET;
1956 break;
1957 case PT_AR_SSD:
1958 pos = ELF_AR_SSD_OFFSET;
1959 break;
1960 }
1961
1962 if (pos != -1) {
6bc4f16c
AV
1963 struct unw_frame_info info;
1964
1965 memset(&info, 0, sizeof(info));
1966 unw_init_from_blocked_task(&info, child);
1967 if (unw_unwind_to_user(&info) < 0)
1968 return 0;
1969
1970 return access_elf_reg(child, &info, pos, data, write_access);
4cd8dc83
SL
1971 }
1972
1973 /* access debug registers */
1974 if (addr >= PT_IBR) {
1975 regnum = (addr - PT_IBR) >> 3;
1976 ptr = &child->thread.ibr[0];
1977 } else {
1978 regnum = (addr - PT_DBR) >> 3;
1979 ptr = &child->thread.dbr[0];
1980 }
1981
1982 if (regnum >= 8) {
1983 dprintk("ptrace: rejecting access to register "
1984 "address 0x%lx\n", addr);
1985 return -1;
1986 }
1987#ifdef CONFIG_PERFMON
1988 /*
1989 * Check if debug registers are used by perfmon. This
1990 * test must be done once we know that we can do the
1991 * operation, i.e. the arguments are all valid, but
1992 * before we start modifying the state.
1993 *
1994 * Perfmon needs to keep a count of how many processes
1995 * are trying to modify the debug registers for system
1996 * wide monitoring sessions.
1997 *
1998 * We also include read access here, because they may
1999 * cause the PMU-installed debug register state
2000 * (dbr[], ibr[]) to be reset. The two arrays are also
2001 * used by perfmon, but we do not use
2002 * IA64_THREAD_DBG_VALID. The registers are restored
2003 * by the PMU context switch code.
2004 */
2005 if (pfm_use_debug_registers(child))
2006 return -1;
2007#endif
2008
2009 if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
2010 child->thread.flags |= IA64_THREAD_DBG_VALID;
2011 memset(child->thread.dbr, 0,
2012 sizeof(child->thread.dbr));
2013 memset(child->thread.ibr, 0,
2014 sizeof(child->thread.ibr));
2015 }
2016
2017 ptr += regnum;
2018
2019 if ((regnum & 1) && write_access) {
2020 /* don't let the user set kernel-level breakpoints: */
2021 *ptr = *data & ~(7UL << 56);
2022 return 0;
2023 }
2024 if (write_access)
2025 *ptr = *data;
2026 else
2027 *data = *ptr;
2028 return 0;
2029}
2030
c70f8f68
SL
2031static const struct user_regset native_regsets[] = {
2032 {
2033 .core_note_type = NT_PRSTATUS,
2034 .n = ELF_NGREG,
2035 .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
2036 .get = gpregs_get, .set = gpregs_set,
2037 .writeback = gpregs_writeback
2038 },
2039 {
2040 .core_note_type = NT_PRFPREG,
2041 .n = ELF_NFPREG,
2042 .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
2043 .get = fpregs_get, .set = fpregs_set, .active = fpregs_active
2044 },
2045};
2046
2047static const struct user_regset_view user_ia64_view = {
2048 .name = "ia64",
2049 .e_machine = EM_IA_64,
2050 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
2051};
2052
2053const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2054{
2055 return &user_ia64_view;
2056}
cfb361f1
SL
2057
2058struct syscall_get_set_args {
2059 unsigned int i;
2060 unsigned int n;
2061 unsigned long *args;
2062 struct pt_regs *regs;
2063 int rw;
2064};
2065
2066static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2067{
2068 struct syscall_get_set_args *args = data;
2069 struct pt_regs *pt = args->regs;
2070 unsigned long *krbs, cfm, ndirty;
2071 int i, count;
2072
2073 if (unw_unwind_to_user(info) < 0)
2074 return;
2075
2076 cfm = pt->cr_ifs;
2077 krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2078 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2079
2080 count = 0;
2081 if (in_syscall(pt))
2082 count = min_t(int, args->n, cfm & 0x7f);
2083
2084 for (i = 0; i < count; i++) {
2085 if (args->rw)
2086 *ia64_rse_skip_regs(krbs, ndirty + i + args->i) =
2087 args->args[i];
2088 else
2089 args->args[i] = *ia64_rse_skip_regs(krbs,
2090 ndirty + i + args->i);
2091 }
2092
2093 if (!args->rw) {
2094 while (i < args->n) {
2095 args->args[i] = 0;
2096 i++;
2097 }
2098 }
2099}
2100
2101void ia64_syscall_get_set_arguments(struct task_struct *task,
32d92586 2102 struct pt_regs *regs, unsigned long *args, int rw)
cfb361f1
SL
2103{
2104 struct syscall_get_set_args data = {
32d92586
SRV
2105 .i = 0,
2106 .n = 6,
cfb361f1
SL
2107 .args = args,
2108 .regs = regs,
2109 .rw = rw,
2110 };
2111
2112 if (task == current)
2113 unw_init_running(syscall_get_set_args_cb, &data);
2114 else {
2115 struct unw_frame_info ufi;
2116 memset(&ufi, 0, sizeof(ufi));
2117 unw_init_from_blocked_task(&ufi, task);
2118 syscall_get_set_args_cb(&ufi, &data);
2119 }
2120}