]> git.ipfire.org Git - thirdparty/kernel/stable.git/blame - arch/ia64/kernel/ptrace.c
resume_user_mode: Move to resume_user_mode.h
[thirdparty/kernel/stable.git] / arch / ia64 / kernel / ptrace.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * Kernel support for the ptrace() and syscall tracing interfaces.
4 *
5 * Copyright (C) 1999-2005 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
c70f8f68
SL
7 * Copyright (C) 2006 Intel Co
8 * 2006-08-12 - IA64 Native Utrace implementation support added by
9 * Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
1da177e4
LT
10 *
11 * Derived from the x86 and Alpha versions.
12 */
1da177e4
LT
13#include <linux/kernel.h>
14#include <linux/sched.h>
29930025 15#include <linux/sched/task.h>
68db0cf1 16#include <linux/sched/task_stack.h>
1da177e4
LT
17#include <linux/mm.h>
18#include <linux/errno.h>
19#include <linux/ptrace.h>
1da177e4
LT
20#include <linux/user.h>
21#include <linux/security.h>
22#include <linux/audit.h>
7ed20e1a 23#include <linux/signal.h>
c70f8f68
SL
24#include <linux/regset.h>
25#include <linux/elf.h>
03248add 26#include <linux/resume_user_mode.h>
1da177e4 27
1da177e4
LT
28#include <asm/processor.h>
29#include <asm/ptrace_offsets.h>
30#include <asm/rse.h>
7c0f6ba6 31#include <linux/uaccess.h>
1da177e4 32#include <asm/unwind.h>
1da177e4
LT
33
34#include "entry.h"
35
36/*
37 * Bits in the PSR that we allow ptrace() to change:
38 * be, up, ac, mfl, mfh (the user mask; five bits total)
39 * db (debug breakpoint fault; one bit)
40 * id (instruction debug fault disable; one bit)
41 * dd (data debug fault disable; one bit)
42 * ri (restart instruction; two bits)
43 * is (instruction set; one bit)
44 */
45#define IPSR_MASK (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS \
46 | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
47
48#define MASK(nbits) ((1UL << (nbits)) - 1) /* mask with NBITS bits set */
49#define PFM_MASK MASK(38)
50
51#define PTRACE_DEBUG 0
52
53#if PTRACE_DEBUG
54# define dprintk(format...) printk(format)
55# define inline
56#else
57# define dprintk(format...)
58#endif
59
60/* Return TRUE if PT was created due to kernel-entry via a system-call. */
61
62static inline int
63in_syscall (struct pt_regs *pt)
64{
65 return (long) pt->cr_ifs >= 0;
66}
67
68/*
69 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
70 * bitset where bit i is set iff the NaT bit of register i is set.
71 */
72unsigned long
73ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
74{
75# define GET_BITS(first, last, unat) \
76 ({ \
77 unsigned long bit = ia64_unat_pos(&pt->r##first); \
78 unsigned long nbits = (last - first + 1); \
79 unsigned long mask = MASK(nbits) << first; \
80 unsigned long dist; \
81 if (bit < first) \
82 dist = 64 + bit - first; \
83 else \
84 dist = bit - first; \
85 ia64_rotr(unat, dist) & mask; \
86 })
87 unsigned long val;
88
89 /*
90 * Registers that are stored consecutively in struct pt_regs
91 * can be handled in parallel. If the register order in
92 * struct_pt_regs changes, this code MUST be updated.
93 */
94 val = GET_BITS( 1, 1, scratch_unat);
95 val |= GET_BITS( 2, 3, scratch_unat);
96 val |= GET_BITS(12, 13, scratch_unat);
97 val |= GET_BITS(14, 14, scratch_unat);
98 val |= GET_BITS(15, 15, scratch_unat);
99 val |= GET_BITS( 8, 11, scratch_unat);
100 val |= GET_BITS(16, 31, scratch_unat);
101 return val;
102
103# undef GET_BITS
104}
105
106/*
107 * Set the NaT bits for the scratch registers according to NAT and
108 * return the resulting unat (assuming the scratch registers are
109 * stored in PT).
110 */
111unsigned long
112ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
113{
114# define PUT_BITS(first, last, nat) \
115 ({ \
116 unsigned long bit = ia64_unat_pos(&pt->r##first); \
117 unsigned long nbits = (last - first + 1); \
118 unsigned long mask = MASK(nbits) << first; \
119 long dist; \
120 if (bit < first) \
121 dist = 64 + bit - first; \
122 else \
123 dist = bit - first; \
124 ia64_rotl(nat & mask, dist); \
125 })
126 unsigned long scratch_unat;
127
128 /*
129 * Registers that are stored consecutively in struct pt_regs
130 * can be handled in parallel. If the register order in
131 * struct_pt_regs changes, this code MUST be updated.
132 */
133 scratch_unat = PUT_BITS( 1, 1, nat);
134 scratch_unat |= PUT_BITS( 2, 3, nat);
135 scratch_unat |= PUT_BITS(12, 13, nat);
136 scratch_unat |= PUT_BITS(14, 14, nat);
137 scratch_unat |= PUT_BITS(15, 15, nat);
138 scratch_unat |= PUT_BITS( 8, 11, nat);
139 scratch_unat |= PUT_BITS(16, 31, nat);
140
141 return scratch_unat;
142
143# undef PUT_BITS
144}
145
146#define IA64_MLX_TEMPLATE 0x2
147#define IA64_MOVL_OPCODE 6
148
149void
150ia64_increment_ip (struct pt_regs *regs)
151{
152 unsigned long w0, ri = ia64_psr(regs)->ri + 1;
153
154 if (ri > 2) {
155 ri = 0;
156 regs->cr_iip += 16;
157 } else if (ri == 2) {
158 get_user(w0, (char __user *) regs->cr_iip + 0);
159 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
160 /*
161 * rfi'ing to slot 2 of an MLX bundle causes
162 * an illegal operation fault. We don't want
163 * that to happen...
164 */
165 ri = 0;
166 regs->cr_iip += 16;
167 }
168 }
169 ia64_psr(regs)->ri = ri;
170}
171
172void
173ia64_decrement_ip (struct pt_regs *regs)
174{
175 unsigned long w0, ri = ia64_psr(regs)->ri - 1;
176
177 if (ia64_psr(regs)->ri == 0) {
178 regs->cr_iip -= 16;
179 ri = 2;
180 get_user(w0, (char __user *) regs->cr_iip + 0);
181 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
182 /*
183 * rfi'ing to slot 2 of an MLX bundle causes
184 * an illegal operation fault. We don't want
185 * that to happen...
186 */
187 ri = 1;
188 }
189 }
190 ia64_psr(regs)->ri = ri;
191}
192
193/*
194 * This routine is used to read an rnat bits that are stored on the
195 * kernel backing store. Since, in general, the alignment of the user
196 * and kernel are different, this is not completely trivial. In
197 * essence, we need to construct the user RNAT based on up to two
198 * kernel RNAT values and/or the RNAT value saved in the child's
199 * pt_regs.
200 *
201 * user rbs
202 *
203 * +--------+ <-- lowest address
204 * | slot62 |
205 * +--------+
206 * | rnat | 0x....1f8
207 * +--------+
208 * | slot00 | \
209 * +--------+ |
210 * | slot01 | > child_regs->ar_rnat
211 * +--------+ |
212 * | slot02 | / kernel rbs
213 * +--------+ +--------+
214 * <- child_regs->ar_bspstore | slot61 | <-- krbs
215 * +- - - - + +--------+
216 * | slot62 |
217 * +- - - - + +--------+
218 * | rnat |
219 * +- - - - + +--------+
220 * vrnat | slot00 |
221 * +- - - - + +--------+
222 * = =
223 * +--------+
224 * | slot00 | \
225 * +--------+ |
226 * | slot01 | > child_stack->ar_rnat
227 * +--------+ |
228 * | slot02 | /
229 * +--------+
230 * <--- child_stack->ar_bspstore
231 *
232 * The way to think of this code is as follows: bit 0 in the user rnat
233 * corresponds to some bit N (0 <= N <= 62) in one of the kernel rnat
234 * value. The kernel rnat value holding this bit is stored in
235 * variable rnat0. rnat1 is loaded with the kernel rnat value that
236 * form the upper bits of the user rnat value.
237 *
238 * Boundary cases:
239 *
240 * o when reading the rnat "below" the first rnat slot on the kernel
241 * backing store, rnat0/rnat1 are set to 0 and the low order bits are
242 * merged in from pt->ar_rnat.
243 *
244 * o when reading the rnat "above" the last rnat slot on the kernel
245 * backing store, rnat0/rnat1 gets its value from sw->ar_rnat.
246 */
247static unsigned long
248get_rnat (struct task_struct *task, struct switch_stack *sw,
249 unsigned long *krbs, unsigned long *urnat_addr,
250 unsigned long *urbs_end)
251{
252 unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr;
253 unsigned long umask = 0, mask, m;
254 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
255 long num_regs, nbits;
256 struct pt_regs *pt;
257
6450578f 258 pt = task_pt_regs(task);
1da177e4
LT
259 kbsp = (unsigned long *) sw->ar_bspstore;
260 ubspstore = (unsigned long *) pt->ar_bspstore;
261
262 if (urbs_end < urnat_addr)
263 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
264 else
265 nbits = 63;
266 mask = MASK(nbits);
267 /*
268 * First, figure out which bit number slot 0 in user-land maps
269 * to in the kernel rnat. Do this by figuring out how many
270 * register slots we're beyond the user's backingstore and
271 * then computing the equivalent address in kernel space.
272 */
273 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
274 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
275 shift = ia64_rse_slot_num(slot0_kaddr);
276 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
277 rnat0_kaddr = rnat1_kaddr - 64;
278
279 if (ubspstore + 63 > urnat_addr) {
280 /* some bits need to be merged in from pt->ar_rnat */
281 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
282 urnat = (pt->ar_rnat & umask);
283 mask &= ~umask;
284 if (!mask)
285 return urnat;
286 }
287
288 m = mask << shift;
289 if (rnat0_kaddr >= kbsp)
290 rnat0 = sw->ar_rnat;
291 else if (rnat0_kaddr > krbs)
292 rnat0 = *rnat0_kaddr;
293 urnat |= (rnat0 & m) >> shift;
294
295 m = mask >> (63 - shift);
296 if (rnat1_kaddr >= kbsp)
297 rnat1 = sw->ar_rnat;
298 else if (rnat1_kaddr > krbs)
299 rnat1 = *rnat1_kaddr;
300 urnat |= (rnat1 & m) << (63 - shift);
301 return urnat;
302}
303
304/*
305 * The reverse of get_rnat.
306 */
307static void
308put_rnat (struct task_struct *task, struct switch_stack *sw,
309 unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
310 unsigned long *urbs_end)
311{
312 unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
313 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
314 long num_regs, nbits;
315 struct pt_regs *pt;
316 unsigned long cfm, *urbs_kargs;
317
6450578f 318 pt = task_pt_regs(task);
1da177e4
LT
319 kbsp = (unsigned long *) sw->ar_bspstore;
320 ubspstore = (unsigned long *) pt->ar_bspstore;
321
322 urbs_kargs = urbs_end;
323 if (in_syscall(pt)) {
324 /*
325 * If entered via syscall, don't allow user to set rnat bits
326 * for syscall args.
327 */
328 cfm = pt->cr_ifs;
329 urbs_kargs = ia64_rse_skip_regs(urbs_end, -(cfm & 0x7f));
330 }
331
332 if (urbs_kargs >= urnat_addr)
333 nbits = 63;
334 else {
335 if ((urnat_addr - 63) >= urbs_kargs)
336 return;
337 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
338 }
339 mask = MASK(nbits);
340
341 /*
342 * First, figure out which bit number slot 0 in user-land maps
343 * to in the kernel rnat. Do this by figuring out how many
344 * register slots we're beyond the user's backingstore and
345 * then computing the equivalent address in kernel space.
346 */
347 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
348 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
349 shift = ia64_rse_slot_num(slot0_kaddr);
350 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
351 rnat0_kaddr = rnat1_kaddr - 64;
352
353 if (ubspstore + 63 > urnat_addr) {
354 /* some bits need to be place in pt->ar_rnat: */
355 umask = MASK(ia64_rse_slot_num(ubspstore)) & mask;
356 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
357 mask &= ~umask;
358 if (!mask)
359 return;
360 }
361 /*
362 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
363 * rnat slot is ignored. so we don't have to clear it here.
364 */
365 rnat0 = (urnat << shift);
366 m = mask << shift;
367 if (rnat0_kaddr >= kbsp)
368 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
369 else if (rnat0_kaddr > krbs)
370 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
371
372 rnat1 = (urnat >> (63 - shift));
373 m = mask >> (63 - shift);
374 if (rnat1_kaddr >= kbsp)
375 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
376 else if (rnat1_kaddr > krbs)
377 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
378}
379
380static inline int
381on_kernel_rbs (unsigned long addr, unsigned long bspstore,
382 unsigned long urbs_end)
383{
384 unsigned long *rnat_addr = ia64_rse_rnat_addr((unsigned long *)
385 urbs_end);
386 return (addr >= bspstore && addr <= (unsigned long) rnat_addr);
387}
388
389/*
390 * Read a word from the user-level backing store of task CHILD. ADDR
391 * is the user-level address to read the word from, VAL a pointer to
392 * the return value, and USER_BSP gives the end of the user-level
393 * backing store (i.e., it's the address that would be in ar.bsp after
394 * the user executed a "cover" instruction).
395 *
396 * This routine takes care of accessing the kernel register backing
397 * store for those registers that got spilled there. It also takes
398 * care of calculating the appropriate RNaT collection words.
399 */
400long
401ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
402 unsigned long user_rbs_end, unsigned long addr, long *val)
403{
404 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
405 struct pt_regs *child_regs;
406 size_t copied;
407 long ret;
408
409 urbs_end = (long *) user_rbs_end;
410 laddr = (unsigned long *) addr;
6450578f 411 child_regs = task_pt_regs(child);
1da177e4
LT
412 bspstore = (unsigned long *) child_regs->ar_bspstore;
413 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
414 if (on_kernel_rbs(addr, (unsigned long) bspstore,
415 (unsigned long) urbs_end))
416 {
417 /*
418 * Attempt to read the RBS in an area that's actually
419 * on the kernel RBS => read the corresponding bits in
420 * the kernel RBS.
421 */
422 rnat_addr = ia64_rse_rnat_addr(laddr);
423 ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
424
425 if (laddr == rnat_addr) {
426 /* return NaT collection word itself */
427 *val = ret;
428 return 0;
429 }
430
431 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
432 /*
433 * It is implementation dependent whether the
434 * data portion of a NaT value gets saved on a
435 * st8.spill or RSE spill (e.g., see EAS 2.6,
436 * 4.4.4.6 Register Spill and Fill). To get
437 * consistent behavior across all possible
438 * IA-64 implementations, we return zero in
439 * this case.
440 */
441 *val = 0;
442 return 0;
443 }
444
445 if (laddr < urbs_end) {
446 /*
447 * The desired word is on the kernel RBS and
448 * is not a NaT.
449 */
450 regnum = ia64_rse_num_regs(bspstore, laddr);
451 *val = *ia64_rse_skip_regs(krbs, regnum);
452 return 0;
453 }
454 }
f307ab6d 455 copied = access_process_vm(child, addr, &ret, sizeof(ret), FOLL_FORCE);
1da177e4
LT
456 if (copied != sizeof(ret))
457 return -EIO;
458 *val = ret;
459 return 0;
460}
461
462long
463ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
464 unsigned long user_rbs_end, unsigned long addr, long val)
465{
466 unsigned long *bspstore, *krbs, regnum, *laddr;
467 unsigned long *urbs_end = (long *) user_rbs_end;
468 struct pt_regs *child_regs;
469
470 laddr = (unsigned long *) addr;
6450578f 471 child_regs = task_pt_regs(child);
1da177e4
LT
472 bspstore = (unsigned long *) child_regs->ar_bspstore;
473 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
474 if (on_kernel_rbs(addr, (unsigned long) bspstore,
475 (unsigned long) urbs_end))
476 {
477 /*
478 * Attempt to write the RBS in an area that's actually
479 * on the kernel RBS => write the corresponding bits
480 * in the kernel RBS.
481 */
482 if (ia64_rse_is_rnat_slot(laddr))
483 put_rnat(child, child_stack, krbs, laddr, val,
484 urbs_end);
485 else {
486 if (laddr < urbs_end) {
487 regnum = ia64_rse_num_regs(bspstore, laddr);
488 *ia64_rse_skip_regs(krbs, regnum) = val;
489 }
490 }
f307ab6d
LS
491 } else if (access_process_vm(child, addr, &val, sizeof(val),
492 FOLL_FORCE | FOLL_WRITE)
1da177e4
LT
493 != sizeof(val))
494 return -EIO;
495 return 0;
496}
497
498/*
499 * Calculate the address of the end of the user-level register backing
500 * store. This is the address that would have been stored in ar.bsp
501 * if the user had executed a "cover" instruction right before
502 * entering the kernel. If CFMP is not NULL, it is used to return the
503 * "current frame mask" that was active at the time the kernel was
504 * entered.
505 */
506unsigned long
507ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt,
508 unsigned long *cfmp)
509{
510 unsigned long *krbs, *bspstore, cfm = pt->cr_ifs;
511 long ndirty;
512
513 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
514 bspstore = (unsigned long *) pt->ar_bspstore;
515 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
516
517 if (in_syscall(pt))
518 ndirty += (cfm & 0x7f);
519 else
520 cfm &= ~(1UL << 63); /* clear valid bit */
521
522 if (cfmp)
523 *cfmp = cfm;
524 return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
525}
526
527/*
528 * Synchronize (i.e, write) the RSE backing store living in kernel
529 * space to the VM of the CHILD task. SW and PT are the pointers to
530 * the switch_stack and pt_regs structures, respectively.
531 * USER_RBS_END is the user-level address at which the backing store
532 * ends.
533 */
534long
535ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
536 unsigned long user_rbs_start, unsigned long user_rbs_end)
537{
538 unsigned long addr, val;
539 long ret;
540
541 /* now copy word for word from kernel rbs to user rbs: */
542 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
543 ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
544 if (ret < 0)
545 return ret;
f307ab6d
LS
546 if (access_process_vm(child, addr, &val, sizeof(val),
547 FOLL_FORCE | FOLL_WRITE)
1da177e4
LT
548 != sizeof(val))
549 return -EIO;
550 }
551 return 0;
552}
553
3b2ce0b1
PT
554static long
555ia64_sync_kernel_rbs (struct task_struct *child, struct switch_stack *sw,
556 unsigned long user_rbs_start, unsigned long user_rbs_end)
557{
558 unsigned long addr, val;
559 long ret;
560
561 /* now copy word for word from user rbs to kernel rbs: */
562 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
f307ab6d
LS
563 if (access_process_vm(child, addr, &val, sizeof(val),
564 FOLL_FORCE)
3b2ce0b1
PT
565 != sizeof(val))
566 return -EIO;
567
568 ret = ia64_poke(child, sw, user_rbs_end, addr, val);
569 if (ret < 0)
570 return ret;
571 }
572 return 0;
573}
574
575typedef long (*syncfunc_t)(struct task_struct *, struct switch_stack *,
576 unsigned long, unsigned long);
577
578static void do_sync_rbs(struct unw_frame_info *info, void *arg)
579{
580 struct pt_regs *pt;
581 unsigned long urbs_end;
582 syncfunc_t fn = arg;
583
584 if (unw_unwind_to_user(info) < 0)
585 return;
586 pt = task_pt_regs(info->task);
587 urbs_end = ia64_get_user_rbs_end(info->task, pt, NULL);
588
589 fn(info->task, info->sw, pt->ar_bspstore, urbs_end);
590}
591
592/*
593 * when a thread is stopped (ptraced), debugger might change thread's user
594 * stack (change memory directly), and we must avoid the RSE stored in kernel
595 * to override user stack (user space's RSE is newer than kernel's in the
596 * case). To workaround the issue, we copy kernel RSE to user RSE before the
597 * task is stopped, so user RSE has updated data. we then copy user RSE to
598 * kernel after the task is resummed from traced stop and kernel will use the
599 * newer RSE to return to user. TIF_RESTORE_RSE is the flag to indicate we need
600 * synchronize user RSE to kernel.
601 */
602void ia64_ptrace_stop(void)
603{
604 if (test_and_set_tsk_thread_flag(current, TIF_RESTORE_RSE))
605 return;
f14488cc 606 set_notify_resume(current);
3b2ce0b1
PT
607 unw_init_running(do_sync_rbs, ia64_sync_user_rbs);
608}
609
610/*
611 * This is called to read back the register backing store.
612 */
613void ia64_sync_krbs(void)
614{
615 clear_tsk_thread_flag(current, TIF_RESTORE_RSE);
3b2ce0b1
PT
616
617 unw_init_running(do_sync_rbs, ia64_sync_kernel_rbs);
618}
619
aa91a2e9
PT
620/*
621 * After PTRACE_ATTACH, a thread's register backing store area in user
622 * space is assumed to contain correct data whenever the thread is
623 * stopped. arch_ptrace_stop takes care of this on tracing stops.
624 * But if the child was already stopped for job control when we attach
625 * to it, then it might not ever get into ptrace_stop by the time we
626 * want to examine the user memory containing the RBS.
627 */
628void
629ptrace_attach_sync_user_rbs (struct task_struct *child)
630{
631 int stopped = 0;
632 struct unw_frame_info info;
633
634 /*
635 * If the child is in TASK_STOPPED, we need to change that to
636 * TASK_TRACED momentarily while we operate on it. This ensures
637 * that the child won't be woken up and return to user mode while
638 * we are doing the sync. (It can only be woken up for SIGKILL.)
639 */
640
641 read_lock(&tasklist_lock);
ffdf9185 642 if (child->sighand) {
aa91a2e9 643 spin_lock_irq(&child->sighand->siglock);
2f064a59 644 if (READ_ONCE(child->__state) == TASK_STOPPED &&
aa91a2e9 645 !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) {
f14488cc 646 set_notify_resume(child);
aa91a2e9 647
2f064a59 648 WRITE_ONCE(child->__state, TASK_TRACED);
aa91a2e9
PT
649 stopped = 1;
650 }
651 spin_unlock_irq(&child->sighand->siglock);
652 }
653 read_unlock(&tasklist_lock);
654
655 if (!stopped)
656 return;
657
658 unw_init_from_blocked_task(&info, child);
659 do_sync_rbs(&info, ia64_sync_user_rbs);
660
661 /*
662 * Now move the child back into TASK_STOPPED if it should be in a
663 * job control stop, so that SIGCONT can be used to wake it up.
664 */
665 read_lock(&tasklist_lock);
ffdf9185 666 if (child->sighand) {
aa91a2e9 667 spin_lock_irq(&child->sighand->siglock);
2f064a59 668 if (READ_ONCE(child->__state) == TASK_TRACED &&
aa91a2e9 669 (child->signal->flags & SIGNAL_STOP_STOPPED)) {
2f064a59 670 WRITE_ONCE(child->__state, TASK_STOPPED);
aa91a2e9
PT
671 }
672 spin_unlock_irq(&child->sighand->siglock);
673 }
674 read_unlock(&tasklist_lock);
675}
676
1da177e4
LT
677/*
678 * Write f32-f127 back to task->thread.fph if it has been modified.
679 */
680inline void
681ia64_flush_fph (struct task_struct *task)
682{
6450578f 683 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
1da177e4 684
05062d96
PC
685 /*
686 * Prevent migrating this task while
687 * we're fiddling with the FPU state
688 */
689 preempt_disable();
1da177e4
LT
690 if (ia64_is_local_fpu_owner(task) && psr->mfh) {
691 psr->mfh = 0;
692 task->thread.flags |= IA64_THREAD_FPH_VALID;
693 ia64_save_fpu(&task->thread.fph[0]);
694 }
05062d96 695 preempt_enable();
1da177e4
LT
696}
697
698/*
699 * Sync the fph state of the task so that it can be manipulated
700 * through thread.fph. If necessary, f32-f127 are written back to
701 * thread.fph or, if the fph state hasn't been used before, thread.fph
702 * is cleared to zeroes. Also, access to f32-f127 is disabled to
703 * ensure that the task picks up the state from thread.fph when it
704 * executes again.
705 */
706void
707ia64_sync_fph (struct task_struct *task)
708{
6450578f 709 struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
1da177e4
LT
710
711 ia64_flush_fph(task);
712 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
713 task->thread.flags |= IA64_THREAD_FPH_VALID;
714 memset(&task->thread.fph, 0, sizeof(task->thread.fph));
715 }
716 ia64_drop_fpu(task);
717 psr->dfh = 1;
718}
719
1da177e4
LT
720/*
721 * Change the machine-state of CHILD such that it will return via the normal
722 * kernel exit-path, rather than the syscall-exit path.
723 */
724static void
725convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
726 unsigned long cfm)
727{
728 struct unw_frame_info info, prev_info;
02a017a9 729 unsigned long ip, sp, pr;
1da177e4
LT
730
731 unw_init_from_blocked_task(&info, child);
732 while (1) {
733 prev_info = info;
734 if (unw_unwind(&info) < 0)
735 return;
02a017a9
DMT
736
737 unw_get_sp(&info, &sp);
738 if ((long)((unsigned long)child + IA64_STK_OFFSET - sp)
739 < IA64_PT_REGS_SIZE) {
740 dprintk("ptrace.%s: ran off the top of the kernel "
d4ed8084 741 "stack\n", __func__);
02a017a9
DMT
742 return;
743 }
744 if (unw_get_pr (&prev_info, &pr) < 0) {
745 unw_get_rp(&prev_info, &ip);
746 dprintk("ptrace.%s: failed to read "
747 "predicate register (ip=0x%lx)\n",
d4ed8084 748 __func__, ip);
1da177e4 749 return;
02a017a9
DMT
750 }
751 if (unw_is_intr_frame(&info)
752 && (pr & (1UL << PRED_USER_STACK)))
1da177e4
LT
753 break;
754 }
755
7f9eaedf
DMT
756 /*
757 * Note: at the time of this call, the target task is blocked
758 * in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
759 * (aka, "pLvSys") we redirect execution from
760 * .work_pending_syscall_end to .work_processed_kernel.
761 */
1da177e4 762 unw_get_pr(&prev_info, &pr);
7f9eaedf 763 pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
1da177e4
LT
764 pr |= (1UL << PRED_NON_SYSCALL);
765 unw_set_pr(&prev_info, pr);
766
767 pt->cr_ifs = (1UL << 63) | cfm;
7f9eaedf
DMT
768 /*
769 * Clear the memory that is NOT written on syscall-entry to
770 * ensure we do not leak kernel-state to user when execution
771 * resumes.
772 */
773 pt->r2 = 0;
774 pt->r3 = 0;
775 pt->r14 = 0;
776 memset(&pt->r16, 0, 16*8); /* clear r16-r31 */
777 memset(&pt->f6, 0, 6*16); /* clear f6-f11 */
778 pt->b7 = 0;
779 pt->ar_ccv = 0;
780 pt->ar_csd = 0;
781 pt->ar_ssd = 0;
1da177e4
LT
782}
783
784static int
785access_nat_bits (struct task_struct *child, struct pt_regs *pt,
786 struct unw_frame_info *info,
787 unsigned long *data, int write_access)
788{
789 unsigned long regnum, nat_bits, scratch_unat, dummy = 0;
790 char nat = 0;
791
792 if (write_access) {
793 nat_bits = *data;
794 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
795 if (unw_set_ar(info, UNW_AR_UNAT, scratch_unat) < 0) {
796 dprintk("ptrace: failed to set ar.unat\n");
797 return -1;
798 }
799 for (regnum = 4; regnum <= 7; ++regnum) {
800 unw_get_gr(info, regnum, &dummy, &nat);
801 unw_set_gr(info, regnum, dummy,
802 (nat_bits >> regnum) & 1);
803 }
804 } else {
805 if (unw_get_ar(info, UNW_AR_UNAT, &scratch_unat) < 0) {
806 dprintk("ptrace: failed to read ar.unat\n");
807 return -1;
808 }
809 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
810 for (regnum = 4; regnum <= 7; ++regnum) {
811 unw_get_gr(info, regnum, &dummy, &nat);
812 nat_bits |= (nat != 0) << regnum;
813 }
814 *data = nat_bits;
815 }
816 return 0;
817}
818
819static int
77f9c902
AV
820access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
821 unsigned long addr, unsigned long *data, int write_access);
1da177e4
LT
822
823static long
824ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
825{
826 unsigned long psr, ec, lc, rnat, bsp, cfm, nat_bits, val;
827 struct unw_frame_info info;
828 struct ia64_fpreg fpval;
829 struct switch_stack *sw;
830 struct pt_regs *pt;
831 long ret, retval = 0;
832 char nat = 0;
833 int i;
834
96d4f267 835 if (!access_ok(ppr, sizeof(struct pt_all_user_regs)))
1da177e4
LT
836 return -EIO;
837
6450578f 838 pt = task_pt_regs(child);
1da177e4
LT
839 sw = (struct switch_stack *) (child->thread.ksp + 16);
840 unw_init_from_blocked_task(&info, child);
841 if (unw_unwind_to_user(&info) < 0) {
842 return -EIO;
843 }
844
845 if (((unsigned long) ppr & 0x7) != 0) {
846 dprintk("ptrace:unaligned register address %p\n", ppr);
847 return -EIO;
848 }
849
77f9c902
AV
850 if (access_elf_reg(child, &info, ELF_CR_IPSR_OFFSET, &psr, 0) < 0 ||
851 access_elf_reg(child, &info, ELF_AR_EC_OFFSET, &ec, 0) < 0 ||
852 access_elf_reg(child, &info, ELF_AR_LC_OFFSET, &lc, 0) < 0 ||
853 access_elf_reg(child, &info, ELF_AR_RNAT_OFFSET, &rnat, 0) < 0 ||
854 access_elf_reg(child, &info, ELF_AR_BSP_OFFSET, &bsp, 0) < 0 ||
855 access_elf_reg(child, &info, ELF_CFM_OFFSET, &cfm, 0) < 0 ||
856 access_elf_reg(child, &info, ELF_NAT_OFFSET, &nat_bits, 0) < 0)
1da177e4
LT
857 return -EIO;
858
859 /* control regs */
860
861 retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
862 retval |= __put_user(psr, &ppr->cr_ipsr);
863
864 /* app regs */
865
866 retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
867 retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
868 retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
869 retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
870 retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
871 retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
872
873 retval |= __put_user(ec, &ppr->ar[PT_AUR_EC]);
874 retval |= __put_user(lc, &ppr->ar[PT_AUR_LC]);
875 retval |= __put_user(rnat, &ppr->ar[PT_AUR_RNAT]);
876 retval |= __put_user(bsp, &ppr->ar[PT_AUR_BSP]);
877 retval |= __put_user(cfm, &ppr->cfm);
878
879 /* gr1-gr3 */
880
881 retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
882 retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
883
884 /* gr4-gr7 */
885
886 for (i = 4; i < 8; i++) {
887 if (unw_access_gr(&info, i, &val, &nat, 0) < 0)
888 return -EIO;
889 retval |= __put_user(val, &ppr->gr[i]);
890 }
891
892 /* gr8-gr11 */
893
894 retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
895
896 /* gr12-gr15 */
897
898 retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
899 retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
900 retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
901
902 /* gr16-gr31 */
903
904 retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
905
906 /* b0 */
907
908 retval |= __put_user(pt->b0, &ppr->br[0]);
909
910 /* b1-b5 */
911
912 for (i = 1; i < 6; i++) {
913 if (unw_access_br(&info, i, &val, 0) < 0)
914 return -EIO;
915 __put_user(val, &ppr->br[i]);
916 }
917
918 /* b6-b7 */
919
920 retval |= __put_user(pt->b6, &ppr->br[6]);
921 retval |= __put_user(pt->b7, &ppr->br[7]);
922
923 /* fr2-fr5 */
924
925 for (i = 2; i < 6; i++) {
926 if (unw_get_fr(&info, i, &fpval) < 0)
927 return -EIO;
928 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
929 }
930
931 /* fr6-fr11 */
932
933 retval |= __copy_to_user(&ppr->fr[6], &pt->f6,
934 sizeof(struct ia64_fpreg) * 6);
935
936 /* fp scratch regs(12-15) */
937
938 retval |= __copy_to_user(&ppr->fr[12], &sw->f12,
939 sizeof(struct ia64_fpreg) * 4);
940
941 /* fr16-fr31 */
942
943 for (i = 16; i < 32; i++) {
944 if (unw_get_fr(&info, i, &fpval) < 0)
945 return -EIO;
946 retval |= __copy_to_user(&ppr->fr[i], &fpval, sizeof (fpval));
947 }
948
949 /* fph */
950
951 ia64_flush_fph(child);
952 retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph,
953 sizeof(ppr->fr[32]) * 96);
954
955 /* preds */
956
957 retval |= __put_user(pt->pr, &ppr->pr);
958
959 /* nat bits */
960
961 retval |= __put_user(nat_bits, &ppr->nat);
962
963 ret = retval ? -EIO : 0;
964 return ret;
965}
966
967static long
968ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
969{
4ea78729 970 unsigned long psr, rsc, ec, lc, rnat, bsp, cfm, nat_bits, val = 0;
1da177e4
LT
971 struct unw_frame_info info;
972 struct switch_stack *sw;
973 struct ia64_fpreg fpval;
974 struct pt_regs *pt;
77f9c902 975 long retval = 0;
1da177e4
LT
976 int i;
977
978 memset(&fpval, 0, sizeof(fpval));
979
96d4f267 980 if (!access_ok(ppr, sizeof(struct pt_all_user_regs)))
1da177e4
LT
981 return -EIO;
982
6450578f 983 pt = task_pt_regs(child);
1da177e4
LT
984 sw = (struct switch_stack *) (child->thread.ksp + 16);
985 unw_init_from_blocked_task(&info, child);
986 if (unw_unwind_to_user(&info) < 0) {
987 return -EIO;
988 }
989
990 if (((unsigned long) ppr & 0x7) != 0) {
991 dprintk("ptrace:unaligned register address %p\n", ppr);
992 return -EIO;
993 }
994
995 /* control regs */
996
997 retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
998 retval |= __get_user(psr, &ppr->cr_ipsr);
999
1000 /* app regs */
1001
1002 retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
4ea78729 1003 retval |= __get_user(rsc, &ppr->ar[PT_AUR_RSC]);
1da177e4
LT
1004 retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1005 retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1006 retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1007 retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1008
1009 retval |= __get_user(ec, &ppr->ar[PT_AUR_EC]);
1010 retval |= __get_user(lc, &ppr->ar[PT_AUR_LC]);
1011 retval |= __get_user(rnat, &ppr->ar[PT_AUR_RNAT]);
1012 retval |= __get_user(bsp, &ppr->ar[PT_AUR_BSP]);
1013 retval |= __get_user(cfm, &ppr->cfm);
1014
1015 /* gr1-gr3 */
1016
1017 retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1018 retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1019
1020 /* gr4-gr7 */
1021
1022 for (i = 4; i < 8; i++) {
1023 retval |= __get_user(val, &ppr->gr[i]);
1024 /* NaT bit will be set via PT_NAT_BITS: */
1025 if (unw_set_gr(&info, i, val, 0) < 0)
1026 return -EIO;
1027 }
1028
1029 /* gr8-gr11 */
1030
1031 retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1032
1033 /* gr12-gr15 */
1034
1035 retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1036 retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1037 retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1038
1039 /* gr16-gr31 */
1040
1041 retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1042
1043 /* b0 */
1044
1045 retval |= __get_user(pt->b0, &ppr->br[0]);
1046
1047 /* b1-b5 */
1048
1049 for (i = 1; i < 6; i++) {
1050 retval |= __get_user(val, &ppr->br[i]);
1051 unw_set_br(&info, i, val);
1052 }
1053
1054 /* b6-b7 */
1055
1056 retval |= __get_user(pt->b6, &ppr->br[6]);
1057 retval |= __get_user(pt->b7, &ppr->br[7]);
1058
1059 /* fr2-fr5 */
1060
1061 for (i = 2; i < 6; i++) {
1062 retval |= __copy_from_user(&fpval, &ppr->fr[i], sizeof(fpval));
1063 if (unw_set_fr(&info, i, fpval) < 0)
1064 return -EIO;
1065 }
1066
1067 /* fr6-fr11 */
1068
1069 retval |= __copy_from_user(&pt->f6, &ppr->fr[6],
1070 sizeof(ppr->fr[6]) * 6);
1071
1072 /* fp scratch regs(12-15) */
1073
1074 retval |= __copy_from_user(&sw->f12, &ppr->fr[12],
1075 sizeof(ppr->fr[12]) * 4);
1076
1077 /* fr16-fr31 */
1078
1079 for (i = 16; i < 32; i++) {
1080 retval |= __copy_from_user(&fpval, &ppr->fr[i],
1081 sizeof(fpval));
1082 if (unw_set_fr(&info, i, fpval) < 0)
1083 return -EIO;
1084 }
1085
1086 /* fph */
1087
1088 ia64_sync_fph(child);
1089 retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32],
1090 sizeof(ppr->fr[32]) * 96);
1091
1092 /* preds */
1093
1094 retval |= __get_user(pt->pr, &ppr->pr);
1095
1096 /* nat bits */
1097
1098 retval |= __get_user(nat_bits, &ppr->nat);
1099
77f9c902
AV
1100 retval |= access_elf_reg(child, &info, ELF_CR_IPSR_OFFSET, &psr, 1);
1101 retval |= access_elf_reg(child, &info, ELF_AR_RSC_OFFSET, &rsc, 1);
1102 retval |= access_elf_reg(child, &info, ELF_AR_EC_OFFSET, &ec, 1);
1103 retval |= access_elf_reg(child, &info, ELF_AR_LC_OFFSET, &lc, 1);
1104 retval |= access_elf_reg(child, &info, ELF_AR_RNAT_OFFSET, &rnat, 1);
1105 retval |= access_elf_reg(child, &info, ELF_AR_BSP_OFFSET, &bsp, 1);
1106 retval |= access_elf_reg(child, &info, ELF_CFM_OFFSET, &cfm, 1);
1107 retval |= access_elf_reg(child, &info, ELF_NAT_OFFSET, &nat_bits, 1);
1da177e4 1108
77f9c902 1109 return retval ? -EIO : 0;
1da177e4
LT
1110}
1111
8db3f525
PT
1112void
1113user_enable_single_step (struct task_struct *child)
1114{
1115 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1116
1117 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1118 child_psr->ss = 1;
1119}
1120
1121void
1122user_enable_block_step (struct task_struct *child)
1123{
1124 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1125
1126 set_tsk_thread_flag(child, TIF_SINGLESTEP);
1127 child_psr->tb = 1;
1128}
1129
1130void
1131user_disable_single_step (struct task_struct *child)
1132{
1133 struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
1134
1135 /* make sure the single step/taken-branch trap bits are not set: */
1136 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
1137 child_psr->ss = 0;
1138 child_psr->tb = 0;
1139}
1140
1da177e4
LT
1141/*
1142 * Called by kernel/ptrace.c when detaching..
1143 *
1144 * Make sure the single step bit is not set.
1145 */
1146void
1147ptrace_disable (struct task_struct *child)
1148{
aa17f6f9 1149 user_disable_single_step(child);
1da177e4
LT
1150}
1151
77f9c902
AV
1152static int
1153access_uarea (struct task_struct *child, unsigned long addr,
1154 unsigned long *data, int write_access);
1155
eac738e6 1156long
9b05a69e
NK
1157arch_ptrace (struct task_struct *child, long request,
1158 unsigned long addr, unsigned long data)
1da177e4 1159{
1da177e4 1160 switch (request) {
aa17f6f9
PT
1161 case PTRACE_PEEKTEXT:
1162 case PTRACE_PEEKDATA:
1da177e4 1163 /* read word at location addr */
84d77d3f 1164 if (ptrace_access_vm(child, addr, &data, sizeof(data),
f307ab6d 1165 FOLL_FORCE)
aa17f6f9
PT
1166 != sizeof(data))
1167 return -EIO;
1168 /* ensure return value is not mistaken for error code */
972559a0 1169 force_successful_syscall_return();
aa17f6f9 1170 return data;
1da177e4 1171
972559a0
PT
1172 /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled
1173 * by the generic ptrace_request().
1174 */
1da177e4 1175
aa17f6f9 1176 case PTRACE_PEEKUSR:
1da177e4 1177 /* read the word at addr in the USER area */
aa17f6f9
PT
1178 if (access_uarea(child, addr, &data, 0) < 0)
1179 return -EIO;
1180 /* ensure return value is not mistaken for error code */
1da177e4 1181 force_successful_syscall_return();
aa17f6f9 1182 return data;
1da177e4 1183
aa17f6f9 1184 case PTRACE_POKEUSR:
1da177e4 1185 /* write the word at addr in the USER area */
aa17f6f9
PT
1186 if (access_uarea(child, addr, &data, 1) < 0)
1187 return -EIO;
1188 return 0;
1da177e4 1189
aa17f6f9 1190 case PTRACE_OLD_GETSIGINFO:
1da177e4 1191 /* for backwards-compatibility */
aa17f6f9 1192 return ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1da177e4 1193
aa17f6f9 1194 case PTRACE_OLD_SETSIGINFO:
1da177e4 1195 /* for backwards-compatibility */
aa17f6f9
PT
1196 return ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1197
1198 case PTRACE_GETREGS:
1199 return ptrace_getregs(child,
1200 (struct pt_all_user_regs __user *) data);
1201
1202 case PTRACE_SETREGS:
1203 return ptrace_setregs(child,
1204 (struct pt_all_user_regs __user *) data);
1205
1206 default:
1207 return ptrace_request(child, request, addr, data);
1da177e4 1208 }
1da177e4
LT
1209}
1210
1211
1da177e4
LT
1212/* "asmlinkage" so the input arguments are preserved... */
1213
f14488cc 1214asmlinkage long
1da177e4
LT
1215syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1216 long arg4, long arg5, long arg6, long arg7,
1217 struct pt_regs regs)
1218{
f14488cc 1219 if (test_thread_flag(TIF_SYSCALL_TRACE))
153474ba 1220 if (ptrace_report_syscall_entry(&regs))
f14488cc 1221 return -ENOSYS;
1da177e4 1222
3b2ce0b1
PT
1223 /* copy user rbs to kernel rbs */
1224 if (test_thread_flag(TIF_RESTORE_RSE))
1225 ia64_sync_krbs();
1226
2fd6f58b 1227
91397401 1228 audit_syscall_entry(regs.r15, arg0, arg1, arg2, arg3);
1da177e4 1229
f14488cc 1230 return 0;
1da177e4
LT
1231}
1232
1233/* "asmlinkage" so the input arguments are preserved... */
1234
1235asmlinkage void
1236syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
1237 long arg4, long arg5, long arg6, long arg7,
1238 struct pt_regs regs)
1239{
f14488cc
SL
1240 int step;
1241
d7e7528b 1242 audit_syscall_exit(&regs);
1da177e4 1243
f14488cc
SL
1244 step = test_thread_flag(TIF_SINGLESTEP);
1245 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
153474ba 1246 ptrace_report_syscall_exit(&regs, step);
3b2ce0b1
PT
1247
1248 /* copy user rbs to kernel rbs */
1249 if (test_thread_flag(TIF_RESTORE_RSE))
1250 ia64_sync_krbs();
1da177e4 1251}
c70f8f68
SL
1252
1253/* Utrace implementation starts here */
1254struct regset_get {
1255 void *kbuf;
1256 void __user *ubuf;
1257};
1258
1259struct regset_set {
1260 const void *kbuf;
1261 const void __user *ubuf;
1262};
1263
1264struct regset_getset {
1265 struct task_struct *target;
1266 const struct user_regset *regset;
1267 union {
1268 struct regset_get get;
1269 struct regset_set set;
1270 } u;
1271 unsigned int pos;
1272 unsigned int count;
1273 int ret;
1274};
1275
e2115cf3 1276static const ptrdiff_t pt_offsets[32] =
4c35bf3a
AV
1277{
1278#define R(n) offsetof(struct pt_regs, r##n)
1279 [0] = -1, R(1), R(2), R(3),
1280 [4] = -1, [5] = -1, [6] = -1, [7] = -1,
1281 R(8), R(9), R(10), R(11), R(12), R(13), R(14), R(15),
e2115cf3
AV
1282 R(16), R(17), R(18), R(19), R(20), R(21), R(22), R(23),
1283 R(24), R(25), R(26), R(27), R(28), R(29), R(30), R(31),
4c35bf3a
AV
1284#undef R
1285};
1286
c70f8f68
SL
1287static int
1288access_elf_gpreg(struct task_struct *target, struct unw_frame_info *info,
1289 unsigned long addr, unsigned long *data, int write_access)
1290{
4c35bf3a
AV
1291 struct pt_regs *pt = task_pt_regs(target);
1292 unsigned reg = addr / sizeof(unsigned long);
1293 ptrdiff_t d = pt_offsets[reg];
1294
1295 if (d >= 0) {
1296 unsigned long *ptr = (void *)pt + d;
1297 if (write_access)
1298 *ptr = *data;
1299 else
1300 *data = *ptr;
1301 return 0;
1302 } else {
1303 char nat = 0;
c70f8f68
SL
1304 if (write_access) {
1305 /* read NaT bit first: */
1306 unsigned long dummy;
4c35bf3a 1307 int ret = unw_get_gr(info, reg, &dummy, &nat);
c70f8f68
SL
1308 if (ret < 0)
1309 return ret;
1310 }
4c35bf3a 1311 return unw_access_gr(info, reg, data, &nat, write_access);
c70f8f68 1312 }
c70f8f68
SL
1313}
1314
1315static int
1316access_elf_breg(struct task_struct *target, struct unw_frame_info *info,
1317 unsigned long addr, unsigned long *data, int write_access)
1318{
1319 struct pt_regs *pt;
1320 unsigned long *ptr = NULL;
1321
1322 pt = task_pt_regs(target);
1323 switch (addr) {
1324 case ELF_BR_OFFSET(0):
1325 ptr = &pt->b0;
1326 break;
1327 case ELF_BR_OFFSET(1) ... ELF_BR_OFFSET(5):
1328 return unw_access_br(info, (addr - ELF_BR_OFFSET(0))/8,
1329 data, write_access);
1330 case ELF_BR_OFFSET(6):
1331 ptr = &pt->b6;
1332 break;
1333 case ELF_BR_OFFSET(7):
1334 ptr = &pt->b7;
1335 }
1336 if (write_access)
1337 *ptr = *data;
1338 else
1339 *data = *ptr;
1340 return 0;
1341}
1342
1343static int
1344access_elf_areg(struct task_struct *target, struct unw_frame_info *info,
1345 unsigned long addr, unsigned long *data, int write_access)
1346{
1347 struct pt_regs *pt;
1348 unsigned long cfm, urbs_end;
1349 unsigned long *ptr = NULL;
1350
1351 pt = task_pt_regs(target);
1352 if (addr >= ELF_AR_RSC_OFFSET && addr <= ELF_AR_SSD_OFFSET) {
1353 switch (addr) {
1354 case ELF_AR_RSC_OFFSET:
1355 /* force PL3 */
1356 if (write_access)
1357 pt->ar_rsc = *data | (3 << 2);
1358 else
1359 *data = pt->ar_rsc;
1360 return 0;
1361 case ELF_AR_BSP_OFFSET:
1362 /*
1363 * By convention, we use PT_AR_BSP to refer to
1364 * the end of the user-level backing store.
1365 * Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof)
1366 * to get the real value of ar.bsp at the time
1367 * the kernel was entered.
1368 *
1369 * Furthermore, when changing the contents of
1370 * PT_AR_BSP (or PT_CFM) while the task is
1371 * blocked in a system call, convert the state
1372 * so that the non-system-call exit
1373 * path is used. This ensures that the proper
1374 * state will be picked up when resuming
1375 * execution. However, it *also* means that
1376 * once we write PT_AR_BSP/PT_CFM, it won't be
1377 * possible to modify the syscall arguments of
1378 * the pending system call any longer. This
1379 * shouldn't be an issue because modifying
1380 * PT_AR_BSP/PT_CFM generally implies that
1381 * we're either abandoning the pending system
1382 * call or that we defer it's re-execution
1383 * (e.g., due to GDB doing an inferior
1384 * function call).
1385 */
1386 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1387 if (write_access) {
1388 if (*data != urbs_end) {
1389 if (in_syscall(pt))
1390 convert_to_non_syscall(target,
1391 pt,
1392 cfm);
1393 /*
1394 * Simulate user-level write
1395 * of ar.bsp:
1396 */
1397 pt->loadrs = 0;
1398 pt->ar_bspstore = *data;
1399 }
1400 } else
1401 *data = urbs_end;
1402 return 0;
1403 case ELF_AR_BSPSTORE_OFFSET:
1404 ptr = &pt->ar_bspstore;
1405 break;
1406 case ELF_AR_RNAT_OFFSET:
1407 ptr = &pt->ar_rnat;
1408 break;
1409 case ELF_AR_CCV_OFFSET:
1410 ptr = &pt->ar_ccv;
1411 break;
1412 case ELF_AR_UNAT_OFFSET:
1413 ptr = &pt->ar_unat;
1414 break;
1415 case ELF_AR_FPSR_OFFSET:
1416 ptr = &pt->ar_fpsr;
1417 break;
1418 case ELF_AR_PFS_OFFSET:
1419 ptr = &pt->ar_pfs;
1420 break;
1421 case ELF_AR_LC_OFFSET:
1422 return unw_access_ar(info, UNW_AR_LC, data,
1423 write_access);
1424 case ELF_AR_EC_OFFSET:
1425 return unw_access_ar(info, UNW_AR_EC, data,
1426 write_access);
1427 case ELF_AR_CSD_OFFSET:
1428 ptr = &pt->ar_csd;
1429 break;
1430 case ELF_AR_SSD_OFFSET:
1431 ptr = &pt->ar_ssd;
1432 }
1433 } else if (addr >= ELF_CR_IIP_OFFSET && addr <= ELF_CR_IPSR_OFFSET) {
1434 switch (addr) {
1435 case ELF_CR_IIP_OFFSET:
1436 ptr = &pt->cr_iip;
1437 break;
1438 case ELF_CFM_OFFSET:
1439 urbs_end = ia64_get_user_rbs_end(target, pt, &cfm);
1440 if (write_access) {
1441 if (((cfm ^ *data) & PFM_MASK) != 0) {
1442 if (in_syscall(pt))
1443 convert_to_non_syscall(target,
1444 pt,
1445 cfm);
1446 pt->cr_ifs = ((pt->cr_ifs & ~PFM_MASK)
1447 | (*data & PFM_MASK));
1448 }
1449 } else
1450 *data = cfm;
1451 return 0;
1452 case ELF_CR_IPSR_OFFSET:
1453 if (write_access) {
1454 unsigned long tmp = *data;
1455 /* psr.ri==3 is a reserved value: SDM 2:25 */
1456 if ((tmp & IA64_PSR_RI) == IA64_PSR_RI)
1457 tmp &= ~IA64_PSR_RI;
1458 pt->cr_ipsr = ((tmp & IPSR_MASK)
1459 | (pt->cr_ipsr & ~IPSR_MASK));
1460 } else
1461 *data = (pt->cr_ipsr & IPSR_MASK);
1462 return 0;
1463 }
1464 } else if (addr == ELF_NAT_OFFSET)
1465 return access_nat_bits(target, pt, info,
1466 data, write_access);
1467 else if (addr == ELF_PR_OFFSET)
1468 ptr = &pt->pr;
1469 else
1470 return -1;
1471
1472 if (write_access)
1473 *ptr = *data;
1474 else
1475 *data = *ptr;
1476
1477 return 0;
1478}
1479
1480static int
1481access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
1482 unsigned long addr, unsigned long *data, int write_access)
1483{
e2115cf3 1484 if (addr >= ELF_GR_OFFSET(1) && addr <= ELF_GR_OFFSET(31))
c70f8f68
SL
1485 return access_elf_gpreg(target, info, addr, data, write_access);
1486 else if (addr >= ELF_BR_OFFSET(0) && addr <= ELF_BR_OFFSET(7))
1487 return access_elf_breg(target, info, addr, data, write_access);
1488 else
1489 return access_elf_areg(target, info, addr, data, write_access);
1490}
1491
4ff8a356
AV
1492struct regset_membuf {
1493 struct membuf to;
1494 int ret;
1495};
1496
3b2d387c 1497static void do_gpregs_get(struct unw_frame_info *info, void *arg)
c70f8f68 1498{
4ff8a356
AV
1499 struct regset_membuf *dst = arg;
1500 struct membuf to = dst->to;
1501 unsigned int n;
1502 elf_greg_t reg;
c70f8f68
SL
1503
1504 if (unw_unwind_to_user(info) < 0)
1505 return;
1506
1507 /*
1508 * coredump format:
1509 * r0-r31
1510 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
1511 * predicate registers (p0-p63)
1512 * b0-b7
1513 * ip cfm user-mask
1514 * ar.rsc ar.bsp ar.bspstore ar.rnat
1515 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
1516 */
1517
1518
1519 /* Skip r0 */
4ff8a356
AV
1520 membuf_zero(&to, 8);
1521 for (n = 8; to.left && n < ELF_AR_END_OFFSET; n += 8) {
1522 if (access_elf_reg(info->task, info, n, &reg, 0) < 0) {
1523 dst->ret = -EIO;
c70f8f68 1524 return;
a79ca8e7 1525 }
4ff8a356 1526 membuf_store(&to, reg);
c70f8f68 1527 }
c70f8f68
SL
1528}
1529
3b2d387c 1530static void do_gpregs_set(struct unw_frame_info *info, void *arg)
c70f8f68 1531{
c70f8f68 1532 struct regset_getset *dst = arg;
c70f8f68
SL
1533
1534 if (unw_unwind_to_user(info) < 0)
1535 return;
1536
a79ca8e7
AV
1537 if (!dst->count)
1538 return;
c70f8f68 1539 /* Skip r0 */
a79ca8e7 1540 if (dst->pos < ELF_GR_OFFSET(1)) {
c70f8f68
SL
1541 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1542 &dst->u.set.kbuf,
1543 &dst->u.set.ubuf,
1544 0, ELF_GR_OFFSET(1));
c70f8f68
SL
1545 if (dst->ret)
1546 return;
c70f8f68
SL
1547 }
1548
a79ca8e7
AV
1549 while (dst->count && dst->pos < ELF_AR_END_OFFSET) {
1550 unsigned int n, from, to;
1551 elf_greg_t tmp[16];
c70f8f68 1552
a79ca8e7
AV
1553 from = dst->pos;
1554 to = from + sizeof(tmp);
1555 if (to > ELF_AR_END_OFFSET)
1556 to = ELF_AR_END_OFFSET;
1557 /* get up to 16 values */
c70f8f68
SL
1558 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1559 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
a79ca8e7 1560 from, to);
c70f8f68
SL
1561 if (dst->ret)
1562 return;
a79ca8e7
AV
1563 /* now copy them into registers */
1564 for (n = 0; from < dst->pos; from += sizeof(elf_greg_t), n++)
1565 if (access_elf_reg(dst->target, info, from,
1566 &tmp[n], 1) < 0) {
c70f8f68
SL
1567 dst->ret = -EIO;
1568 return;
1569 }
1570 }
1571}
1572
1573#define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
1574
3b2d387c 1575static void do_fpregs_get(struct unw_frame_info *info, void *arg)
c70f8f68 1576{
4ff8a356
AV
1577 struct task_struct *task = info->task;
1578 struct regset_membuf *dst = arg;
1579 struct membuf to = dst->to;
1580 elf_fpreg_t reg;
1581 unsigned int n;
c70f8f68
SL
1582
1583 if (unw_unwind_to_user(info) < 0)
1584 return;
1585
1586 /* Skip pos 0 and 1 */
4ff8a356 1587 membuf_zero(&to, 2 * sizeof(elf_fpreg_t));
c70f8f68
SL
1588
1589 /* fr2-fr31 */
4ff8a356
AV
1590 for (n = 2; to.left && n < 32; n++) {
1591 if (unw_get_fr(info, n, &reg)) {
1592 dst->ret = -EIO;
c70f8f68 1593 return;
4ff8a356
AV
1594 }
1595 membuf_write(&to, &reg, sizeof(reg));
c70f8f68
SL
1596 }
1597
1598 /* fph */
4ff8a356
AV
1599 if (!to.left)
1600 return;
1601
1602 ia64_flush_fph(task);
1603 if (task->thread.flags & IA64_THREAD_FPH_VALID)
1604 membuf_write(&to, &task->thread.fph, 96 * sizeof(reg));
1605 else
1606 membuf_zero(&to, 96 * sizeof(reg));
c70f8f68
SL
1607}
1608
3b2d387c 1609static void do_fpregs_set(struct unw_frame_info *info, void *arg)
c70f8f68
SL
1610{
1611 struct regset_getset *dst = arg;
1612 elf_fpreg_t fpreg, tmp[30];
1613 int index, start, end;
1614
1615 if (unw_unwind_to_user(info) < 0)
1616 return;
1617
1618 /* Skip pos 0 and 1 */
1619 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(2)) {
1620 dst->ret = user_regset_copyin_ignore(&dst->pos, &dst->count,
1621 &dst->u.set.kbuf,
1622 &dst->u.set.ubuf,
1623 0, ELF_FP_OFFSET(2));
1624 if (dst->count == 0 || dst->ret)
1625 return;
1626 }
1627
1628 /* fr2-fr31 */
1629 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(32)) {
1630 start = dst->pos;
1631 end = min(((unsigned int)ELF_FP_OFFSET(32)),
1632 dst->pos + dst->count);
1633 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1634 &dst->u.set.kbuf, &dst->u.set.ubuf, tmp,
1635 ELF_FP_OFFSET(2), ELF_FP_OFFSET(32));
1636 if (dst->ret)
1637 return;
1638
1639 if (start & 0xF) { /* only write high part */
1640 if (unw_get_fr(info, start / sizeof(elf_fpreg_t),
1641 &fpreg)) {
1642 dst->ret = -EIO;
1643 return;
1644 }
1645 tmp[start / sizeof(elf_fpreg_t) - 2].u.bits[0]
1646 = fpreg.u.bits[0];
1647 start &= ~0xFUL;
1648 }
1649 if (end & 0xF) { /* only write low part */
1650 if (unw_get_fr(info, end / sizeof(elf_fpreg_t),
1651 &fpreg)) {
1652 dst->ret = -EIO;
1653 return;
1654 }
1655 tmp[end / sizeof(elf_fpreg_t) - 2].u.bits[1]
1656 = fpreg.u.bits[1];
1657 end = (end + 0xF) & ~0xFUL;
1658 }
1659
1660 for ( ; start < end ; start += sizeof(elf_fpreg_t)) {
1661 index = start / sizeof(elf_fpreg_t);
1662 if (unw_set_fr(info, index, tmp[index - 2])) {
1663 dst->ret = -EIO;
1664 return;
1665 }
1666 }
1667 if (dst->ret || dst->count == 0)
1668 return;
1669 }
1670
1671 /* fph */
1672 if (dst->count > 0 && dst->pos < ELF_FP_OFFSET(128)) {
1673 ia64_sync_fph(dst->target);
1674 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
1675 &dst->u.set.kbuf,
1676 &dst->u.set.ubuf,
1677 &dst->target->thread.fph,
1678 ELF_FP_OFFSET(32), -1);
1679 }
1680}
1681
4ff8a356
AV
1682static void
1683unwind_and_call(void (*call)(struct unw_frame_info *, void *),
1684 struct task_struct *target, void *data)
1685{
1686 if (target == current)
1687 unw_init_running(call, data);
1688 else {
1689 struct unw_frame_info info;
1690 memset(&info, 0, sizeof(info));
1691 unw_init_from_blocked_task(&info, target);
1692 (*call)(&info, data);
1693 }
1694}
1695
c70f8f68
SL
1696static int
1697do_regset_call(void (*call)(struct unw_frame_info *, void *),
1698 struct task_struct *target,
1699 const struct user_regset *regset,
1700 unsigned int pos, unsigned int count,
1701 const void *kbuf, const void __user *ubuf)
1702{
1703 struct regset_getset info = { .target = target, .regset = regset,
1704 .pos = pos, .count = count,
1705 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
1706 .ret = 0 };
4ff8a356 1707 unwind_and_call(call, target, &info);
c70f8f68
SL
1708 return info.ret;
1709}
1710
1711static int
1712gpregs_get(struct task_struct *target,
1713 const struct user_regset *regset,
4ff8a356 1714 struct membuf to)
c70f8f68 1715{
4ff8a356
AV
1716 struct regset_membuf info = {.to = to};
1717 unwind_and_call(do_gpregs_get, target, &info);
1718 return info.ret;
c70f8f68
SL
1719}
1720
1721static int gpregs_set(struct task_struct *target,
1722 const struct user_regset *regset,
1723 unsigned int pos, unsigned int count,
1724 const void *kbuf, const void __user *ubuf)
1725{
1726 return do_regset_call(do_gpregs_set, target, regset, pos, count,
1727 kbuf, ubuf);
1728}
1729
1730static void do_gpregs_writeback(struct unw_frame_info *info, void *arg)
1731{
1732 do_sync_rbs(info, ia64_sync_user_rbs);
1733}
1734
1735/*
1736 * This is called to write back the register backing store.
1737 * ptrace does this before it stops, so that a tracer reading the user
1738 * memory after the thread stops will get the current register data.
1739 */
1740static int
1741gpregs_writeback(struct task_struct *target,
1742 const struct user_regset *regset,
1743 int now)
1744{
1745 if (test_and_set_tsk_thread_flag(target, TIF_RESTORE_RSE))
1746 return 0;
f14488cc 1747 set_notify_resume(target);
c70f8f68
SL
1748 return do_regset_call(do_gpregs_writeback, target, regset, 0, 0,
1749 NULL, NULL);
1750}
1751
1752static int
1753fpregs_active(struct task_struct *target, const struct user_regset *regset)
1754{
1755 return (target->thread.flags & IA64_THREAD_FPH_VALID) ? 128 : 32;
1756}
1757
1758static int fpregs_get(struct task_struct *target,
1759 const struct user_regset *regset,
4ff8a356 1760 struct membuf to)
c70f8f68 1761{
4ff8a356
AV
1762 struct regset_membuf info = {.to = to};
1763 unwind_and_call(do_fpregs_get, target, &info);
1764 return info.ret;
c70f8f68
SL
1765}
1766
1767static int fpregs_set(struct task_struct *target,
1768 const struct user_regset *regset,
1769 unsigned int pos, unsigned int count,
1770 const void *kbuf, const void __user *ubuf)
1771{
1772 return do_regset_call(do_fpregs_set, target, regset, pos, count,
1773 kbuf, ubuf);
1774}
1775
4cd8dc83
SL
1776static int
1777access_uarea(struct task_struct *child, unsigned long addr,
1778 unsigned long *data, int write_access)
1779{
1780 unsigned int pos = -1; /* an invalid value */
4cd8dc83
SL
1781 unsigned long *ptr, regnum;
1782
1783 if ((addr & 0x7) != 0) {
1784 dprintk("ptrace: unaligned register address 0x%lx\n", addr);
1785 return -1;
1786 }
1787 if ((addr >= PT_NAT_BITS + 8 && addr < PT_F2) ||
1788 (addr >= PT_R7 + 8 && addr < PT_B1) ||
1789 (addr >= PT_AR_LC + 8 && addr < PT_CR_IPSR) ||
1790 (addr >= PT_AR_SSD + 8 && addr < PT_DBR)) {
1791 dprintk("ptrace: rejecting access to register "
1792 "address 0x%lx\n", addr);
1793 return -1;
1794 }
1795
1796 switch (addr) {
1797 case PT_F32 ... (PT_F127 + 15):
1798 pos = addr - PT_F32 + ELF_FP_OFFSET(32);
1799 break;
1800 case PT_F2 ... (PT_F5 + 15):
1801 pos = addr - PT_F2 + ELF_FP_OFFSET(2);
1802 break;
1803 case PT_F10 ... (PT_F31 + 15):
1804 pos = addr - PT_F10 + ELF_FP_OFFSET(10);
1805 break;
1806 case PT_F6 ... (PT_F9 + 15):
1807 pos = addr - PT_F6 + ELF_FP_OFFSET(6);
1808 break;
1809 }
1810
1811 if (pos != -1) {
e3fdfa37
AV
1812 unsigned reg = pos / sizeof(elf_fpreg_t);
1813 int which_half = (pos / sizeof(unsigned long)) & 1;
1814
1815 if (reg < 32) { /* fr2-fr31 */
1816 struct unw_frame_info info;
1817 elf_fpreg_t fpreg;
1818
1819 memset(&info, 0, sizeof(info));
1820 unw_init_from_blocked_task(&info, child);
1821 if (unw_unwind_to_user(&info) < 0)
1822 return 0;
1823
1824 if (unw_get_fr(&info, reg, &fpreg))
1825 return -1;
1826 if (write_access) {
1827 fpreg.u.bits[which_half] = *data;
1828 if (unw_set_fr(&info, reg, fpreg))
1829 return -1;
1830 } else {
1831 *data = fpreg.u.bits[which_half];
1832 }
1833 } else { /* fph */
1834 elf_fpreg_t *p = &child->thread.fph[reg - 32];
1835 unsigned long *bits = &p->u.bits[which_half];
1836
1837 ia64_sync_fph(child);
1838 if (write_access)
1839 *bits = *data;
1840 else if (child->thread.flags & IA64_THREAD_FPH_VALID)
1841 *data = *bits;
1842 else
1843 *data = 0;
1844 }
4cd8dc83
SL
1845 return 0;
1846 }
1847
1848 switch (addr) {
1849 case PT_NAT_BITS:
1850 pos = ELF_NAT_OFFSET;
1851 break;
1852 case PT_R4 ... PT_R7:
1853 pos = addr - PT_R4 + ELF_GR_OFFSET(4);
1854 break;
1855 case PT_B1 ... PT_B5:
1856 pos = addr - PT_B1 + ELF_BR_OFFSET(1);
1857 break;
1858 case PT_AR_EC:
1859 pos = ELF_AR_EC_OFFSET;
1860 break;
1861 case PT_AR_LC:
1862 pos = ELF_AR_LC_OFFSET;
1863 break;
1864 case PT_CR_IPSR:
1865 pos = ELF_CR_IPSR_OFFSET;
1866 break;
1867 case PT_CR_IIP:
1868 pos = ELF_CR_IIP_OFFSET;
1869 break;
1870 case PT_CFM:
1871 pos = ELF_CFM_OFFSET;
1872 break;
1873 case PT_AR_UNAT:
1874 pos = ELF_AR_UNAT_OFFSET;
1875 break;
1876 case PT_AR_PFS:
1877 pos = ELF_AR_PFS_OFFSET;
1878 break;
1879 case PT_AR_RSC:
1880 pos = ELF_AR_RSC_OFFSET;
1881 break;
1882 case PT_AR_RNAT:
1883 pos = ELF_AR_RNAT_OFFSET;
1884 break;
1885 case PT_AR_BSPSTORE:
1886 pos = ELF_AR_BSPSTORE_OFFSET;
1887 break;
1888 case PT_PR:
1889 pos = ELF_PR_OFFSET;
1890 break;
1891 case PT_B6:
1892 pos = ELF_BR_OFFSET(6);
1893 break;
1894 case PT_AR_BSP:
1895 pos = ELF_AR_BSP_OFFSET;
1896 break;
1897 case PT_R1 ... PT_R3:
1898 pos = addr - PT_R1 + ELF_GR_OFFSET(1);
1899 break;
1900 case PT_R12 ... PT_R15:
1901 pos = addr - PT_R12 + ELF_GR_OFFSET(12);
1902 break;
1903 case PT_R8 ... PT_R11:
1904 pos = addr - PT_R8 + ELF_GR_OFFSET(8);
1905 break;
1906 case PT_R16 ... PT_R31:
1907 pos = addr - PT_R16 + ELF_GR_OFFSET(16);
1908 break;
1909 case PT_AR_CCV:
1910 pos = ELF_AR_CCV_OFFSET;
1911 break;
1912 case PT_AR_FPSR:
1913 pos = ELF_AR_FPSR_OFFSET;
1914 break;
1915 case PT_B0:
1916 pos = ELF_BR_OFFSET(0);
1917 break;
1918 case PT_B7:
1919 pos = ELF_BR_OFFSET(7);
1920 break;
1921 case PT_AR_CSD:
1922 pos = ELF_AR_CSD_OFFSET;
1923 break;
1924 case PT_AR_SSD:
1925 pos = ELF_AR_SSD_OFFSET;
1926 break;
1927 }
1928
1929 if (pos != -1) {
6bc4f16c
AV
1930 struct unw_frame_info info;
1931
1932 memset(&info, 0, sizeof(info));
1933 unw_init_from_blocked_task(&info, child);
1934 if (unw_unwind_to_user(&info) < 0)
1935 return 0;
1936
1937 return access_elf_reg(child, &info, pos, data, write_access);
4cd8dc83
SL
1938 }
1939
1940 /* access debug registers */
1941 if (addr >= PT_IBR) {
1942 regnum = (addr - PT_IBR) >> 3;
1943 ptr = &child->thread.ibr[0];
1944 } else {
1945 regnum = (addr - PT_DBR) >> 3;
1946 ptr = &child->thread.dbr[0];
1947 }
1948
1949 if (regnum >= 8) {
1950 dprintk("ptrace: rejecting access to register "
1951 "address 0x%lx\n", addr);
1952 return -1;
1953 }
4cd8dc83
SL
1954
1955 if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
1956 child->thread.flags |= IA64_THREAD_DBG_VALID;
1957 memset(child->thread.dbr, 0,
1958 sizeof(child->thread.dbr));
1959 memset(child->thread.ibr, 0,
1960 sizeof(child->thread.ibr));
1961 }
1962
1963 ptr += regnum;
1964
1965 if ((regnum & 1) && write_access) {
1966 /* don't let the user set kernel-level breakpoints: */
1967 *ptr = *data & ~(7UL << 56);
1968 return 0;
1969 }
1970 if (write_access)
1971 *ptr = *data;
1972 else
1973 *data = *ptr;
1974 return 0;
1975}
1976
c70f8f68
SL
1977static const struct user_regset native_regsets[] = {
1978 {
1979 .core_note_type = NT_PRSTATUS,
1980 .n = ELF_NGREG,
1981 .size = sizeof(elf_greg_t), .align = sizeof(elf_greg_t),
4ff8a356 1982 .regset_get = gpregs_get, .set = gpregs_set,
c70f8f68
SL
1983 .writeback = gpregs_writeback
1984 },
1985 {
1986 .core_note_type = NT_PRFPREG,
1987 .n = ELF_NFPREG,
1988 .size = sizeof(elf_fpreg_t), .align = sizeof(elf_fpreg_t),
4ff8a356 1989 .regset_get = fpregs_get, .set = fpregs_set, .active = fpregs_active
c70f8f68
SL
1990 },
1991};
1992
1993static const struct user_regset_view user_ia64_view = {
1994 .name = "ia64",
1995 .e_machine = EM_IA_64,
1996 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
1997};
1998
1999const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2000{
2001 return &user_ia64_view;
2002}
cfb361f1 2003
7962c2ed 2004struct syscall_get_args {
cfb361f1
SL
2005 unsigned int i;
2006 unsigned int n;
2007 unsigned long *args;
2008 struct pt_regs *regs;
cfb361f1
SL
2009};
2010
7962c2ed 2011static void syscall_get_args_cb(struct unw_frame_info *info, void *data)
cfb361f1 2012{
7962c2ed 2013 struct syscall_get_args *args = data;
cfb361f1 2014 struct pt_regs *pt = args->regs;
0ceb1ace 2015 unsigned long *krbs, cfm, ndirty, nlocals, nouts;
cfb361f1
SL
2016 int i, count;
2017
2018 if (unw_unwind_to_user(info) < 0)
2019 return;
2020
0ceb1ace
ST
2021 /*
2022 * We get here via a few paths:
2023 * - break instruction: cfm is shared with caller.
2024 * syscall args are in out= regs, locals are non-empty.
2025 * - epsinstruction: cfm is set by br.call
2026 * locals don't exist.
2027 *
2028 * For both cases argguments are reachable in cfm.sof - cfm.sol.
2029 * CFM: [ ... | sor: 17..14 | sol : 13..7 | sof : 6..0 ]
2030 */
cfb361f1 2031 cfm = pt->cr_ifs;
0ceb1ace
ST
2032 nlocals = (cfm >> 7) & 0x7f; /* aka sol */
2033 nouts = (cfm & 0x7f) - nlocals; /* aka sof - sol */
cfb361f1
SL
2034 krbs = (unsigned long *)info->task + IA64_RBS_OFFSET/8;
2035 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
2036
2037 count = 0;
2038 if (in_syscall(pt))
0ceb1ace 2039 count = min_t(int, args->n, nouts);
cfb361f1 2040
0ceb1ace 2041 /* Iterate over outs. */
cfb361f1 2042 for (i = 0; i < count; i++) {
0ceb1ace 2043 int j = ndirty + nlocals + i + args->i;
7962c2ed 2044 args->args[i] = *ia64_rse_skip_regs(krbs, j);
cfb361f1
SL
2045 }
2046
7962c2ed
PC
2047 while (i < args->n) {
2048 args->args[i] = 0;
2049 i++;
cfb361f1
SL
2050 }
2051}
2052
7962c2ed
PC
2053void syscall_get_arguments(struct task_struct *task,
2054 struct pt_regs *regs, unsigned long *args)
cfb361f1 2055{
7962c2ed 2056 struct syscall_get_args data = {
32d92586
SRV
2057 .i = 0,
2058 .n = 6,
cfb361f1
SL
2059 .args = args,
2060 .regs = regs,
cfb361f1
SL
2061 };
2062
2063 if (task == current)
7962c2ed 2064 unw_init_running(syscall_get_args_cb, &data);
cfb361f1
SL
2065 else {
2066 struct unw_frame_info ufi;
2067 memset(&ufi, 0, sizeof(ufi));
2068 unw_init_from_blocked_task(&ufi, task);
7962c2ed 2069 syscall_get_args_cb(&ufi, &data);
cfb361f1
SL
2070 }
2071}