1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/ptrace.c
6 * edited by Linus Torvalds
7 * ARM modifications Copyright (C) 2000 Russell King
8 * Copyright (C) 2012 ARM Ltd.
11 #include <linux/audit.h>
12 #include <linux/compat.h>
13 #include <linux/kernel.h>
14 #include <linux/sched/signal.h>
15 #include <linux/sched/task_stack.h>
17 #include <linux/nospec.h>
18 #include <linux/smp.h>
19 #include <linux/ptrace.h>
20 #include <linux/user.h>
21 #include <linux/seccomp.h>
22 #include <linux/security.h>
23 #include <linux/init.h>
24 #include <linux/signal.h>
25 #include <linux/string.h>
26 #include <linux/uaccess.h>
27 #include <linux/perf_event.h>
28 #include <linux/hw_breakpoint.h>
29 #include <linux/regset.h>
30 #include <linux/tracehook.h>
31 #include <linux/elf.h>
33 #include <asm/compat.h>
34 #include <asm/cpufeature.h>
35 #include <asm/debug-monitors.h>
36 #include <asm/fpsimd.h>
38 #include <asm/pointer_auth.h>
39 #include <asm/stacktrace.h>
40 #include <asm/syscall.h>
41 #include <asm/traps.h>
42 #include <asm/system_misc.h>
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/syscalls.h>
47 struct pt_regs_offset
{
52 #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
53 #define REG_OFFSET_END {.name = NULL, .offset = 0}
54 #define GPR_OFFSET_NAME(r) \
55 {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
57 static const struct pt_regs_offset regoffset_table
[] = {
89 {.name
= "lr", .offset
= offsetof(struct pt_regs
, regs
[30])},
92 REG_OFFSET_NAME(pstate
),
97 * regs_query_register_offset() - query register offset from its name
98 * @name: the name of a register
100 * regs_query_register_offset() returns the offset of a register in struct
101 * pt_regs from its name. If the name is invalid, this returns -EINVAL;
103 int regs_query_register_offset(const char *name
)
105 const struct pt_regs_offset
*roff
;
107 for (roff
= regoffset_table
; roff
->name
!= NULL
; roff
++)
108 if (!strcmp(roff
->name
, name
))
114 * regs_within_kernel_stack() - check the address in the stack
115 * @regs: pt_regs which contains kernel stack pointer.
116 * @addr: address which is checked.
118 * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
119 * If @addr is within the kernel stack, it returns true. If not, returns false.
121 static bool regs_within_kernel_stack(struct pt_regs
*regs
, unsigned long addr
)
123 return ((addr
& ~(THREAD_SIZE
- 1)) ==
124 (kernel_stack_pointer(regs
) & ~(THREAD_SIZE
- 1))) ||
125 on_irq_stack(addr
, sizeof(unsigned long), NULL
);
129 * regs_get_kernel_stack_nth() - get Nth entry of the stack
130 * @regs: pt_regs which contains kernel stack pointer.
131 * @n: stack entry number.
133 * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
134 * is specified by @regs. If the @n th entry is NOT in the kernel stack,
137 unsigned long regs_get_kernel_stack_nth(struct pt_regs
*regs
, unsigned int n
)
139 unsigned long *addr
= (unsigned long *)kernel_stack_pointer(regs
);
142 if (regs_within_kernel_stack(regs
, (unsigned long)addr
))
149 * TODO: does not yet catch signals sent when the child dies.
150 * in exit.c or in signal.c.
154 * Called by kernel/ptrace.c when detaching..
156 void ptrace_disable(struct task_struct
*child
)
159 * This would be better off in core code, but PTRACE_DETACH has
160 * grown its fair share of arch-specific worts and changing it
161 * is likely to cause regressions on obscure architectures.
163 user_disable_single_step(child
);
166 #ifdef CONFIG_HAVE_HW_BREAKPOINT
168 * Handle hitting a HW-breakpoint.
170 static void ptrace_hbptriggered(struct perf_event
*bp
,
171 struct perf_sample_data
*data
,
172 struct pt_regs
*regs
)
174 struct arch_hw_breakpoint
*bkpt
= counter_arch_bp(bp
);
175 const char *desc
= "Hardware breakpoint trap (ptrace)";
178 if (is_compat_task()) {
182 for (i
= 0; i
< ARM_MAX_BRP
; ++i
) {
183 if (current
->thread
.debug
.hbp_break
[i
] == bp
) {
184 si_errno
= (i
<< 1) + 1;
189 for (i
= 0; i
< ARM_MAX_WRP
; ++i
) {
190 if (current
->thread
.debug
.hbp_watch
[i
] == bp
) {
191 si_errno
= -((i
<< 1) + 1);
195 arm64_force_sig_ptrace_errno_trap(si_errno
, bkpt
->trigger
,
200 arm64_force_sig_fault(SIGTRAP
, TRAP_HWBKPT
, bkpt
->trigger
, desc
);
204 * Unregister breakpoints from this task and reset the pointers in
207 void flush_ptrace_hw_breakpoint(struct task_struct
*tsk
)
210 struct thread_struct
*t
= &tsk
->thread
;
212 for (i
= 0; i
< ARM_MAX_BRP
; i
++) {
213 if (t
->debug
.hbp_break
[i
]) {
214 unregister_hw_breakpoint(t
->debug
.hbp_break
[i
]);
215 t
->debug
.hbp_break
[i
] = NULL
;
219 for (i
= 0; i
< ARM_MAX_WRP
; i
++) {
220 if (t
->debug
.hbp_watch
[i
]) {
221 unregister_hw_breakpoint(t
->debug
.hbp_watch
[i
]);
222 t
->debug
.hbp_watch
[i
] = NULL
;
227 void ptrace_hw_copy_thread(struct task_struct
*tsk
)
229 memset(&tsk
->thread
.debug
, 0, sizeof(struct debug_info
));
232 static struct perf_event
*ptrace_hbp_get_event(unsigned int note_type
,
233 struct task_struct
*tsk
,
236 struct perf_event
*bp
= ERR_PTR(-EINVAL
);
239 case NT_ARM_HW_BREAK
:
240 if (idx
>= ARM_MAX_BRP
)
242 idx
= array_index_nospec(idx
, ARM_MAX_BRP
);
243 bp
= tsk
->thread
.debug
.hbp_break
[idx
];
245 case NT_ARM_HW_WATCH
:
246 if (idx
>= ARM_MAX_WRP
)
248 idx
= array_index_nospec(idx
, ARM_MAX_WRP
);
249 bp
= tsk
->thread
.debug
.hbp_watch
[idx
];
257 static int ptrace_hbp_set_event(unsigned int note_type
,
258 struct task_struct
*tsk
,
260 struct perf_event
*bp
)
265 case NT_ARM_HW_BREAK
:
266 if (idx
>= ARM_MAX_BRP
)
268 idx
= array_index_nospec(idx
, ARM_MAX_BRP
);
269 tsk
->thread
.debug
.hbp_break
[idx
] = bp
;
272 case NT_ARM_HW_WATCH
:
273 if (idx
>= ARM_MAX_WRP
)
275 idx
= array_index_nospec(idx
, ARM_MAX_WRP
);
276 tsk
->thread
.debug
.hbp_watch
[idx
] = bp
;
285 static struct perf_event
*ptrace_hbp_create(unsigned int note_type
,
286 struct task_struct
*tsk
,
289 struct perf_event
*bp
;
290 struct perf_event_attr attr
;
294 case NT_ARM_HW_BREAK
:
295 type
= HW_BREAKPOINT_X
;
297 case NT_ARM_HW_WATCH
:
298 type
= HW_BREAKPOINT_RW
;
301 return ERR_PTR(-EINVAL
);
304 ptrace_breakpoint_init(&attr
);
307 * Initialise fields to sane defaults
308 * (i.e. values that will pass validation).
311 attr
.bp_len
= HW_BREAKPOINT_LEN_4
;
315 bp
= register_user_hw_breakpoint(&attr
, ptrace_hbptriggered
, NULL
, tsk
);
319 err
= ptrace_hbp_set_event(note_type
, tsk
, idx
, bp
);
326 static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type
,
327 struct arch_hw_breakpoint_ctrl ctrl
,
328 struct perf_event_attr
*attr
)
330 int err
, len
, type
, offset
, disabled
= !ctrl
.enabled
;
332 attr
->disabled
= disabled
;
336 err
= arch_bp_generic_fields(ctrl
, &len
, &type
, &offset
);
341 case NT_ARM_HW_BREAK
:
342 if ((type
& HW_BREAKPOINT_X
) != type
)
345 case NT_ARM_HW_WATCH
:
346 if ((type
& HW_BREAKPOINT_RW
) != type
)
354 attr
->bp_type
= type
;
355 attr
->bp_addr
+= offset
;
360 static int ptrace_hbp_get_resource_info(unsigned int note_type
, u32
*info
)
366 case NT_ARM_HW_BREAK
:
367 num
= hw_breakpoint_slots(TYPE_INST
);
369 case NT_ARM_HW_WATCH
:
370 num
= hw_breakpoint_slots(TYPE_DATA
);
376 reg
|= debug_monitors_arch();
384 static int ptrace_hbp_get_ctrl(unsigned int note_type
,
385 struct task_struct
*tsk
,
389 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
394 *ctrl
= bp
? encode_ctrl_reg(counter_arch_bp(bp
)->ctrl
) : 0;
398 static int ptrace_hbp_get_addr(unsigned int note_type
,
399 struct task_struct
*tsk
,
403 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
408 *addr
= bp
? counter_arch_bp(bp
)->address
: 0;
412 static struct perf_event
*ptrace_hbp_get_initialised_bp(unsigned int note_type
,
413 struct task_struct
*tsk
,
416 struct perf_event
*bp
= ptrace_hbp_get_event(note_type
, tsk
, idx
);
419 bp
= ptrace_hbp_create(note_type
, tsk
, idx
);
424 static int ptrace_hbp_set_ctrl(unsigned int note_type
,
425 struct task_struct
*tsk
,
430 struct perf_event
*bp
;
431 struct perf_event_attr attr
;
432 struct arch_hw_breakpoint_ctrl ctrl
;
434 bp
= ptrace_hbp_get_initialised_bp(note_type
, tsk
, idx
);
441 decode_ctrl_reg(uctrl
, &ctrl
);
442 err
= ptrace_hbp_fill_attr_ctrl(note_type
, ctrl
, &attr
);
446 return modify_user_hw_breakpoint(bp
, &attr
);
449 static int ptrace_hbp_set_addr(unsigned int note_type
,
450 struct task_struct
*tsk
,
455 struct perf_event
*bp
;
456 struct perf_event_attr attr
;
458 bp
= ptrace_hbp_get_initialised_bp(note_type
, tsk
, idx
);
466 err
= modify_user_hw_breakpoint(bp
, &attr
);
470 #define PTRACE_HBP_ADDR_SZ sizeof(u64)
471 #define PTRACE_HBP_CTRL_SZ sizeof(u32)
472 #define PTRACE_HBP_PAD_SZ sizeof(u32)
474 static int hw_break_get(struct task_struct
*target
,
475 const struct user_regset
*regset
,
478 unsigned int note_type
= regset
->core_note_type
;
484 ret
= ptrace_hbp_get_resource_info(note_type
, &info
);
488 membuf_write(&to
, &info
, sizeof(info
));
489 membuf_zero(&to
, sizeof(u32
));
490 /* (address, ctrl) registers */
492 ret
= ptrace_hbp_get_addr(note_type
, target
, idx
, &addr
);
495 ret
= ptrace_hbp_get_ctrl(note_type
, target
, idx
, &ctrl
);
498 membuf_store(&to
, addr
);
499 membuf_store(&to
, ctrl
);
500 membuf_zero(&to
, sizeof(u32
));
506 static int hw_break_set(struct task_struct
*target
,
507 const struct user_regset
*regset
,
508 unsigned int pos
, unsigned int count
,
509 const void *kbuf
, const void __user
*ubuf
)
511 unsigned int note_type
= regset
->core_note_type
;
512 int ret
, idx
= 0, offset
, limit
;
516 /* Resource info and pad */
517 offset
= offsetof(struct user_hwdebug_state
, dbg_regs
);
518 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
, 0, offset
);
522 /* (address, ctrl) registers */
523 limit
= regset
->n
* regset
->size
;
524 while (count
&& offset
< limit
) {
525 if (count
< PTRACE_HBP_ADDR_SZ
)
527 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &addr
,
528 offset
, offset
+ PTRACE_HBP_ADDR_SZ
);
531 ret
= ptrace_hbp_set_addr(note_type
, target
, idx
, addr
);
534 offset
+= PTRACE_HBP_ADDR_SZ
;
538 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &ctrl
,
539 offset
, offset
+ PTRACE_HBP_CTRL_SZ
);
542 ret
= ptrace_hbp_set_ctrl(note_type
, target
, idx
, ctrl
);
545 offset
+= PTRACE_HBP_CTRL_SZ
;
547 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
549 offset
+ PTRACE_HBP_PAD_SZ
);
552 offset
+= PTRACE_HBP_PAD_SZ
;
558 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
560 static int gpr_get(struct task_struct
*target
,
561 const struct user_regset
*regset
,
564 struct user_pt_regs
*uregs
= &task_pt_regs(target
)->user_regs
;
565 return membuf_write(&to
, uregs
, sizeof(*uregs
));
568 static int gpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
569 unsigned int pos
, unsigned int count
,
570 const void *kbuf
, const void __user
*ubuf
)
573 struct user_pt_regs newregs
= task_pt_regs(target
)->user_regs
;
575 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &newregs
, 0, -1);
579 if (!valid_user_regs(&newregs
, target
))
582 task_pt_regs(target
)->user_regs
= newregs
;
586 static int fpr_active(struct task_struct
*target
, const struct user_regset
*regset
)
588 if (!system_supports_fpsimd())
594 * TODO: update fp accessors for lazy context switching (sync/flush hwstate)
596 static int __fpr_get(struct task_struct
*target
,
597 const struct user_regset
*regset
,
600 struct user_fpsimd_state
*uregs
;
602 sve_sync_to_fpsimd(target
);
604 uregs
= &target
->thread
.uw
.fpsimd_state
;
606 return membuf_write(&to
, uregs
, sizeof(*uregs
));
609 static int fpr_get(struct task_struct
*target
, const struct user_regset
*regset
,
612 if (!system_supports_fpsimd())
615 if (target
== current
)
616 fpsimd_preserve_current_state();
618 return __fpr_get(target
, regset
, to
);
621 static int __fpr_set(struct task_struct
*target
,
622 const struct user_regset
*regset
,
623 unsigned int pos
, unsigned int count
,
624 const void *kbuf
, const void __user
*ubuf
,
625 unsigned int start_pos
)
628 struct user_fpsimd_state newstate
;
631 * Ensure target->thread.uw.fpsimd_state is up to date, so that a
632 * short copyin can't resurrect stale data.
634 sve_sync_to_fpsimd(target
);
636 newstate
= target
->thread
.uw
.fpsimd_state
;
638 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &newstate
,
639 start_pos
, start_pos
+ sizeof(newstate
));
643 target
->thread
.uw
.fpsimd_state
= newstate
;
648 static int fpr_set(struct task_struct
*target
, const struct user_regset
*regset
,
649 unsigned int pos
, unsigned int count
,
650 const void *kbuf
, const void __user
*ubuf
)
654 if (!system_supports_fpsimd())
657 ret
= __fpr_set(target
, regset
, pos
, count
, kbuf
, ubuf
, 0);
661 sve_sync_from_fpsimd_zeropad(target
);
662 fpsimd_flush_task_state(target
);
667 static int tls_get(struct task_struct
*target
, const struct user_regset
*regset
,
670 if (target
== current
)
671 tls_preserve_current_state();
673 return membuf_store(&to
, target
->thread
.uw
.tp_value
);
676 static int tls_set(struct task_struct
*target
, const struct user_regset
*regset
,
677 unsigned int pos
, unsigned int count
,
678 const void *kbuf
, const void __user
*ubuf
)
681 unsigned long tls
= target
->thread
.uw
.tp_value
;
683 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &tls
, 0, -1);
687 target
->thread
.uw
.tp_value
= tls
;
691 static int system_call_get(struct task_struct
*target
,
692 const struct user_regset
*regset
,
695 return membuf_store(&to
, task_pt_regs(target
)->syscallno
);
698 static int system_call_set(struct task_struct
*target
,
699 const struct user_regset
*regset
,
700 unsigned int pos
, unsigned int count
,
701 const void *kbuf
, const void __user
*ubuf
)
703 int syscallno
= task_pt_regs(target
)->syscallno
;
706 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &syscallno
, 0, -1);
710 task_pt_regs(target
)->syscallno
= syscallno
;
714 #ifdef CONFIG_ARM64_SVE
716 static void sve_init_header_from_task(struct user_sve_header
*header
,
717 struct task_struct
*target
)
721 memset(header
, 0, sizeof(*header
));
723 header
->flags
= test_tsk_thread_flag(target
, TIF_SVE
) ?
724 SVE_PT_REGS_SVE
: SVE_PT_REGS_FPSIMD
;
725 if (test_tsk_thread_flag(target
, TIF_SVE_VL_INHERIT
))
726 header
->flags
|= SVE_PT_VL_INHERIT
;
728 header
->vl
= target
->thread
.sve_vl
;
729 vq
= sve_vq_from_vl(header
->vl
);
731 header
->max_vl
= sve_max_vl
;
732 header
->size
= SVE_PT_SIZE(vq
, header
->flags
);
733 header
->max_size
= SVE_PT_SIZE(sve_vq_from_vl(header
->max_vl
),
737 static unsigned int sve_size_from_header(struct user_sve_header
const *header
)
739 return ALIGN(header
->size
, SVE_VQ_BYTES
);
742 static int sve_get(struct task_struct
*target
,
743 const struct user_regset
*regset
,
746 struct user_sve_header header
;
748 unsigned long start
, end
;
750 if (!system_supports_sve())
754 sve_init_header_from_task(&header
, target
);
755 vq
= sve_vq_from_vl(header
.vl
);
757 membuf_write(&to
, &header
, sizeof(header
));
759 if (target
== current
)
760 fpsimd_preserve_current_state();
762 /* Registers: FPSIMD-only case */
764 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET
!= sizeof(header
));
765 if ((header
.flags
& SVE_PT_REGS_MASK
) == SVE_PT_REGS_FPSIMD
)
766 return __fpr_get(target
, regset
, to
);
768 /* Otherwise: full SVE case */
770 BUILD_BUG_ON(SVE_PT_SVE_OFFSET
!= sizeof(header
));
771 start
= SVE_PT_SVE_OFFSET
;
772 end
= SVE_PT_SVE_FFR_OFFSET(vq
) + SVE_PT_SVE_FFR_SIZE(vq
);
773 membuf_write(&to
, target
->thread
.sve_state
, end
- start
);
776 end
= SVE_PT_SVE_FPSR_OFFSET(vq
);
777 membuf_zero(&to
, end
- start
);
780 * Copy fpsr, and fpcr which must follow contiguously in
781 * struct fpsimd_state:
784 end
= SVE_PT_SVE_FPCR_OFFSET(vq
) + SVE_PT_SVE_FPCR_SIZE
;
785 membuf_write(&to
, &target
->thread
.uw
.fpsimd_state
.fpsr
, end
- start
);
788 end
= sve_size_from_header(&header
);
789 return membuf_zero(&to
, end
- start
);
792 static int sve_set(struct task_struct
*target
,
793 const struct user_regset
*regset
,
794 unsigned int pos
, unsigned int count
,
795 const void *kbuf
, const void __user
*ubuf
)
798 struct user_sve_header header
;
800 unsigned long start
, end
;
802 if (!system_supports_sve())
806 if (count
< sizeof(header
))
808 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &header
,
814 * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by
815 * sve_set_vector_length(), which will also validate them for us:
817 ret
= sve_set_vector_length(target
, header
.vl
,
818 ((unsigned long)header
.flags
& ~SVE_PT_REGS_MASK
) << 16);
822 /* Actual VL set may be less than the user asked for: */
823 vq
= sve_vq_from_vl(target
->thread
.sve_vl
);
825 /* Registers: FPSIMD-only case */
827 BUILD_BUG_ON(SVE_PT_FPSIMD_OFFSET
!= sizeof(header
));
828 if ((header
.flags
& SVE_PT_REGS_MASK
) == SVE_PT_REGS_FPSIMD
) {
829 ret
= __fpr_set(target
, regset
, pos
, count
, kbuf
, ubuf
,
830 SVE_PT_FPSIMD_OFFSET
);
831 clear_tsk_thread_flag(target
, TIF_SVE
);
835 /* Otherwise: full SVE case */
838 * If setting a different VL from the requested VL and there is
839 * register data, the data layout will be wrong: don't even
840 * try to set the registers in this case.
842 if (count
&& vq
!= sve_vq_from_vl(header
.vl
)) {
848 if (!target
->thread
.sve_state
) {
850 clear_tsk_thread_flag(target
, TIF_SVE
);
855 * Ensure target->thread.sve_state is up to date with target's
856 * FPSIMD regs, so that a short copyin leaves trailing registers
859 fpsimd_sync_to_sve(target
);
860 set_tsk_thread_flag(target
, TIF_SVE
);
862 BUILD_BUG_ON(SVE_PT_SVE_OFFSET
!= sizeof(header
));
863 start
= SVE_PT_SVE_OFFSET
;
864 end
= SVE_PT_SVE_FFR_OFFSET(vq
) + SVE_PT_SVE_FFR_SIZE(vq
);
865 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
866 target
->thread
.sve_state
,
872 end
= SVE_PT_SVE_FPSR_OFFSET(vq
);
873 ret
= user_regset_copyin_ignore(&pos
, &count
, &kbuf
, &ubuf
,
879 * Copy fpsr, and fpcr which must follow contiguously in
880 * struct fpsimd_state:
883 end
= SVE_PT_SVE_FPCR_OFFSET(vq
) + SVE_PT_SVE_FPCR_SIZE
;
884 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
885 &target
->thread
.uw
.fpsimd_state
.fpsr
,
889 fpsimd_flush_task_state(target
);
893 #endif /* CONFIG_ARM64_SVE */
895 #ifdef CONFIG_ARM64_PTR_AUTH
896 static int pac_mask_get(struct task_struct
*target
,
897 const struct user_regset
*regset
,
901 * The PAC bits can differ across data and instruction pointers
902 * depending on TCR_EL1.TBID*, which we may make use of in future, so
903 * we expose separate masks.
905 unsigned long mask
= ptrauth_user_pac_mask();
906 struct user_pac_mask uregs
= {
911 if (!system_supports_address_auth())
914 return membuf_write(&to
, &uregs
, sizeof(uregs
));
917 static int pac_enabled_keys_get(struct task_struct
*target
,
918 const struct user_regset
*regset
,
921 long enabled_keys
= ptrauth_get_enabled_keys(target
);
923 if (IS_ERR_VALUE(enabled_keys
))
926 return membuf_write(&to
, &enabled_keys
, sizeof(enabled_keys
));
929 static int pac_enabled_keys_set(struct task_struct
*target
,
930 const struct user_regset
*regset
,
931 unsigned int pos
, unsigned int count
,
932 const void *kbuf
, const void __user
*ubuf
)
935 long enabled_keys
= ptrauth_get_enabled_keys(target
);
937 if (IS_ERR_VALUE(enabled_keys
))
940 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &enabled_keys
, 0,
945 return ptrauth_set_enabled_keys(target
, PR_PAC_ENABLED_KEYS_MASK
,
949 #ifdef CONFIG_CHECKPOINT_RESTORE
950 static __uint128_t
pac_key_to_user(const struct ptrauth_key
*key
)
952 return (__uint128_t
)key
->hi
<< 64 | key
->lo
;
955 static struct ptrauth_key
pac_key_from_user(__uint128_t ukey
)
957 struct ptrauth_key key
= {
958 .lo
= (unsigned long)ukey
,
959 .hi
= (unsigned long)(ukey
>> 64),
965 static void pac_address_keys_to_user(struct user_pac_address_keys
*ukeys
,
966 const struct ptrauth_keys_user
*keys
)
968 ukeys
->apiakey
= pac_key_to_user(&keys
->apia
);
969 ukeys
->apibkey
= pac_key_to_user(&keys
->apib
);
970 ukeys
->apdakey
= pac_key_to_user(&keys
->apda
);
971 ukeys
->apdbkey
= pac_key_to_user(&keys
->apdb
);
974 static void pac_address_keys_from_user(struct ptrauth_keys_user
*keys
,
975 const struct user_pac_address_keys
*ukeys
)
977 keys
->apia
= pac_key_from_user(ukeys
->apiakey
);
978 keys
->apib
= pac_key_from_user(ukeys
->apibkey
);
979 keys
->apda
= pac_key_from_user(ukeys
->apdakey
);
980 keys
->apdb
= pac_key_from_user(ukeys
->apdbkey
);
983 static int pac_address_keys_get(struct task_struct
*target
,
984 const struct user_regset
*regset
,
987 struct ptrauth_keys_user
*keys
= &target
->thread
.keys_user
;
988 struct user_pac_address_keys user_keys
;
990 if (!system_supports_address_auth())
993 pac_address_keys_to_user(&user_keys
, keys
);
995 return membuf_write(&to
, &user_keys
, sizeof(user_keys
));
998 static int pac_address_keys_set(struct task_struct
*target
,
999 const struct user_regset
*regset
,
1000 unsigned int pos
, unsigned int count
,
1001 const void *kbuf
, const void __user
*ubuf
)
1003 struct ptrauth_keys_user
*keys
= &target
->thread
.keys_user
;
1004 struct user_pac_address_keys user_keys
;
1007 if (!system_supports_address_auth())
1010 pac_address_keys_to_user(&user_keys
, keys
);
1011 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1015 pac_address_keys_from_user(keys
, &user_keys
);
1020 static void pac_generic_keys_to_user(struct user_pac_generic_keys
*ukeys
,
1021 const struct ptrauth_keys_user
*keys
)
1023 ukeys
->apgakey
= pac_key_to_user(&keys
->apga
);
1026 static void pac_generic_keys_from_user(struct ptrauth_keys_user
*keys
,
1027 const struct user_pac_generic_keys
*ukeys
)
1029 keys
->apga
= pac_key_from_user(ukeys
->apgakey
);
1032 static int pac_generic_keys_get(struct task_struct
*target
,
1033 const struct user_regset
*regset
,
1036 struct ptrauth_keys_user
*keys
= &target
->thread
.keys_user
;
1037 struct user_pac_generic_keys user_keys
;
1039 if (!system_supports_generic_auth())
1042 pac_generic_keys_to_user(&user_keys
, keys
);
1044 return membuf_write(&to
, &user_keys
, sizeof(user_keys
));
1047 static int pac_generic_keys_set(struct task_struct
*target
,
1048 const struct user_regset
*regset
,
1049 unsigned int pos
, unsigned int count
,
1050 const void *kbuf
, const void __user
*ubuf
)
1052 struct ptrauth_keys_user
*keys
= &target
->thread
.keys_user
;
1053 struct user_pac_generic_keys user_keys
;
1056 if (!system_supports_generic_auth())
1059 pac_generic_keys_to_user(&user_keys
, keys
);
1060 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
,
1064 pac_generic_keys_from_user(keys
, &user_keys
);
1068 #endif /* CONFIG_CHECKPOINT_RESTORE */
1069 #endif /* CONFIG_ARM64_PTR_AUTH */
1071 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1072 static int tagged_addr_ctrl_get(struct task_struct
*target
,
1073 const struct user_regset
*regset
,
1076 long ctrl
= get_tagged_addr_ctrl(target
);
1078 if (IS_ERR_VALUE(ctrl
))
1081 return membuf_write(&to
, &ctrl
, sizeof(ctrl
));
1084 static int tagged_addr_ctrl_set(struct task_struct
*target
, const struct
1085 user_regset
*regset
, unsigned int pos
,
1086 unsigned int count
, const void *kbuf
, const
1092 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &ctrl
, 0, -1);
1096 return set_tagged_addr_ctrl(target
, ctrl
);
1100 enum aarch64_regset
{
1104 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1109 #ifdef CONFIG_ARM64_SVE
1112 #ifdef CONFIG_ARM64_PTR_AUTH
1114 REGSET_PAC_ENABLED_KEYS
,
1115 #ifdef CONFIG_CHECKPOINT_RESTORE
1120 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1121 REGSET_TAGGED_ADDR_CTRL
,
1125 static const struct user_regset aarch64_regsets
[] = {
1127 .core_note_type
= NT_PRSTATUS
,
1128 .n
= sizeof(struct user_pt_regs
) / sizeof(u64
),
1129 .size
= sizeof(u64
),
1130 .align
= sizeof(u64
),
1131 .regset_get
= gpr_get
,
1135 .core_note_type
= NT_PRFPREG
,
1136 .n
= sizeof(struct user_fpsimd_state
) / sizeof(u32
),
1138 * We pretend we have 32-bit registers because the fpsr and
1139 * fpcr are 32-bits wide.
1141 .size
= sizeof(u32
),
1142 .align
= sizeof(u32
),
1143 .active
= fpr_active
,
1144 .regset_get
= fpr_get
,
1148 .core_note_type
= NT_ARM_TLS
,
1150 .size
= sizeof(void *),
1151 .align
= sizeof(void *),
1152 .regset_get
= tls_get
,
1155 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1156 [REGSET_HW_BREAK
] = {
1157 .core_note_type
= NT_ARM_HW_BREAK
,
1158 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
1159 .size
= sizeof(u32
),
1160 .align
= sizeof(u32
),
1161 .regset_get
= hw_break_get
,
1162 .set
= hw_break_set
,
1164 [REGSET_HW_WATCH
] = {
1165 .core_note_type
= NT_ARM_HW_WATCH
,
1166 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
1167 .size
= sizeof(u32
),
1168 .align
= sizeof(u32
),
1169 .regset_get
= hw_break_get
,
1170 .set
= hw_break_set
,
1173 [REGSET_SYSTEM_CALL
] = {
1174 .core_note_type
= NT_ARM_SYSTEM_CALL
,
1176 .size
= sizeof(int),
1177 .align
= sizeof(int),
1178 .regset_get
= system_call_get
,
1179 .set
= system_call_set
,
1181 #ifdef CONFIG_ARM64_SVE
1182 [REGSET_SVE
] = { /* Scalable Vector Extension */
1183 .core_note_type
= NT_ARM_SVE
,
1184 .n
= DIV_ROUND_UP(SVE_PT_SIZE(SVE_VQ_MAX
, SVE_PT_REGS_SVE
),
1186 .size
= SVE_VQ_BYTES
,
1187 .align
= SVE_VQ_BYTES
,
1188 .regset_get
= sve_get
,
1192 #ifdef CONFIG_ARM64_PTR_AUTH
1193 [REGSET_PAC_MASK
] = {
1194 .core_note_type
= NT_ARM_PAC_MASK
,
1195 .n
= sizeof(struct user_pac_mask
) / sizeof(u64
),
1196 .size
= sizeof(u64
),
1197 .align
= sizeof(u64
),
1198 .regset_get
= pac_mask_get
,
1199 /* this cannot be set dynamically */
1201 [REGSET_PAC_ENABLED_KEYS
] = {
1202 .core_note_type
= NT_ARM_PAC_ENABLED_KEYS
,
1204 .size
= sizeof(long),
1205 .align
= sizeof(long),
1206 .regset_get
= pac_enabled_keys_get
,
1207 .set
= pac_enabled_keys_set
,
1209 #ifdef CONFIG_CHECKPOINT_RESTORE
1210 [REGSET_PACA_KEYS
] = {
1211 .core_note_type
= NT_ARM_PACA_KEYS
,
1212 .n
= sizeof(struct user_pac_address_keys
) / sizeof(__uint128_t
),
1213 .size
= sizeof(__uint128_t
),
1214 .align
= sizeof(__uint128_t
),
1215 .regset_get
= pac_address_keys_get
,
1216 .set
= pac_address_keys_set
,
1218 [REGSET_PACG_KEYS
] = {
1219 .core_note_type
= NT_ARM_PACG_KEYS
,
1220 .n
= sizeof(struct user_pac_generic_keys
) / sizeof(__uint128_t
),
1221 .size
= sizeof(__uint128_t
),
1222 .align
= sizeof(__uint128_t
),
1223 .regset_get
= pac_generic_keys_get
,
1224 .set
= pac_generic_keys_set
,
1228 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
1229 [REGSET_TAGGED_ADDR_CTRL
] = {
1230 .core_note_type
= NT_ARM_TAGGED_ADDR_CTRL
,
1232 .size
= sizeof(long),
1233 .align
= sizeof(long),
1234 .regset_get
= tagged_addr_ctrl_get
,
1235 .set
= tagged_addr_ctrl_set
,
1240 static const struct user_regset_view user_aarch64_view
= {
1241 .name
= "aarch64", .e_machine
= EM_AARCH64
,
1242 .regsets
= aarch64_regsets
, .n
= ARRAY_SIZE(aarch64_regsets
)
1245 #ifdef CONFIG_COMPAT
1246 enum compat_regset
{
1251 static inline compat_ulong_t
compat_get_user_reg(struct task_struct
*task
, int idx
)
1253 struct pt_regs
*regs
= task_pt_regs(task
);
1259 return pstate_to_compat_psr(regs
->pstate
);
1261 return regs
->orig_x0
;
1263 return regs
->regs
[idx
];
1267 static int compat_gpr_get(struct task_struct
*target
,
1268 const struct user_regset
*regset
,
1274 membuf_store(&to
, compat_get_user_reg(target
, i
++));
1278 static int compat_gpr_set(struct task_struct
*target
,
1279 const struct user_regset
*regset
,
1280 unsigned int pos
, unsigned int count
,
1281 const void *kbuf
, const void __user
*ubuf
)
1283 struct pt_regs newregs
;
1285 unsigned int i
, start
, num_regs
;
1287 /* Calculate the number of AArch32 registers contained in count */
1288 num_regs
= count
/ regset
->size
;
1290 /* Convert pos into an register number */
1291 start
= pos
/ regset
->size
;
1293 if (start
+ num_regs
> regset
->n
)
1296 newregs
= *task_pt_regs(target
);
1298 for (i
= 0; i
< num_regs
; ++i
) {
1299 unsigned int idx
= start
+ i
;
1303 memcpy(®
, kbuf
, sizeof(reg
));
1304 kbuf
+= sizeof(reg
);
1306 ret
= copy_from_user(®
, ubuf
, sizeof(reg
));
1312 ubuf
+= sizeof(reg
);
1320 reg
= compat_psr_to_pstate(reg
);
1321 newregs
.pstate
= reg
;
1324 newregs
.orig_x0
= reg
;
1327 newregs
.regs
[idx
] = reg
;
1332 if (valid_user_regs(&newregs
.user_regs
, target
))
1333 *task_pt_regs(target
) = newregs
;
1340 static int compat_vfp_get(struct task_struct
*target
,
1341 const struct user_regset
*regset
,
1344 struct user_fpsimd_state
*uregs
;
1345 compat_ulong_t fpscr
;
1347 if (!system_supports_fpsimd())
1350 uregs
= &target
->thread
.uw
.fpsimd_state
;
1352 if (target
== current
)
1353 fpsimd_preserve_current_state();
1356 * The VFP registers are packed into the fpsimd_state, so they all sit
1357 * nicely together for us. We just need to create the fpscr separately.
1359 membuf_write(&to
, uregs
, VFP_STATE_SIZE
- sizeof(compat_ulong_t
));
1360 fpscr
= (uregs
->fpsr
& VFP_FPSCR_STAT_MASK
) |
1361 (uregs
->fpcr
& VFP_FPSCR_CTRL_MASK
);
1362 return membuf_store(&to
, fpscr
);
1365 static int compat_vfp_set(struct task_struct
*target
,
1366 const struct user_regset
*regset
,
1367 unsigned int pos
, unsigned int count
,
1368 const void *kbuf
, const void __user
*ubuf
)
1370 struct user_fpsimd_state
*uregs
;
1371 compat_ulong_t fpscr
;
1372 int ret
, vregs_end_pos
;
1374 if (!system_supports_fpsimd())
1377 uregs
= &target
->thread
.uw
.fpsimd_state
;
1379 vregs_end_pos
= VFP_STATE_SIZE
- sizeof(compat_ulong_t
);
1380 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, uregs
, 0,
1383 if (count
&& !ret
) {
1384 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &fpscr
,
1385 vregs_end_pos
, VFP_STATE_SIZE
);
1387 uregs
->fpsr
= fpscr
& VFP_FPSCR_STAT_MASK
;
1388 uregs
->fpcr
= fpscr
& VFP_FPSCR_CTRL_MASK
;
1392 fpsimd_flush_task_state(target
);
1396 static int compat_tls_get(struct task_struct
*target
,
1397 const struct user_regset
*regset
,
1400 return membuf_store(&to
, (compat_ulong_t
)target
->thread
.uw
.tp_value
);
1403 static int compat_tls_set(struct task_struct
*target
,
1404 const struct user_regset
*regset
, unsigned int pos
,
1405 unsigned int count
, const void *kbuf
,
1406 const void __user
*ubuf
)
1409 compat_ulong_t tls
= target
->thread
.uw
.tp_value
;
1411 ret
= user_regset_copyin(&pos
, &count
, &kbuf
, &ubuf
, &tls
, 0, -1);
1415 target
->thread
.uw
.tp_value
= tls
;
1419 static const struct user_regset aarch32_regsets
[] = {
1420 [REGSET_COMPAT_GPR
] = {
1421 .core_note_type
= NT_PRSTATUS
,
1422 .n
= COMPAT_ELF_NGREG
,
1423 .size
= sizeof(compat_elf_greg_t
),
1424 .align
= sizeof(compat_elf_greg_t
),
1425 .regset_get
= compat_gpr_get
,
1426 .set
= compat_gpr_set
1428 [REGSET_COMPAT_VFP
] = {
1429 .core_note_type
= NT_ARM_VFP
,
1430 .n
= VFP_STATE_SIZE
/ sizeof(compat_ulong_t
),
1431 .size
= sizeof(compat_ulong_t
),
1432 .align
= sizeof(compat_ulong_t
),
1433 .active
= fpr_active
,
1434 .regset_get
= compat_vfp_get
,
1435 .set
= compat_vfp_set
1439 static const struct user_regset_view user_aarch32_view
= {
1440 .name
= "aarch32", .e_machine
= EM_ARM
,
1441 .regsets
= aarch32_regsets
, .n
= ARRAY_SIZE(aarch32_regsets
)
1444 static const struct user_regset aarch32_ptrace_regsets
[] = {
1446 .core_note_type
= NT_PRSTATUS
,
1447 .n
= COMPAT_ELF_NGREG
,
1448 .size
= sizeof(compat_elf_greg_t
),
1449 .align
= sizeof(compat_elf_greg_t
),
1450 .regset_get
= compat_gpr_get
,
1451 .set
= compat_gpr_set
1454 .core_note_type
= NT_ARM_VFP
,
1455 .n
= VFP_STATE_SIZE
/ sizeof(compat_ulong_t
),
1456 .size
= sizeof(compat_ulong_t
),
1457 .align
= sizeof(compat_ulong_t
),
1458 .regset_get
= compat_vfp_get
,
1459 .set
= compat_vfp_set
1462 .core_note_type
= NT_ARM_TLS
,
1464 .size
= sizeof(compat_ulong_t
),
1465 .align
= sizeof(compat_ulong_t
),
1466 .regset_get
= compat_tls_get
,
1467 .set
= compat_tls_set
,
1469 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1470 [REGSET_HW_BREAK
] = {
1471 .core_note_type
= NT_ARM_HW_BREAK
,
1472 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
1473 .size
= sizeof(u32
),
1474 .align
= sizeof(u32
),
1475 .regset_get
= hw_break_get
,
1476 .set
= hw_break_set
,
1478 [REGSET_HW_WATCH
] = {
1479 .core_note_type
= NT_ARM_HW_WATCH
,
1480 .n
= sizeof(struct user_hwdebug_state
) / sizeof(u32
),
1481 .size
= sizeof(u32
),
1482 .align
= sizeof(u32
),
1483 .regset_get
= hw_break_get
,
1484 .set
= hw_break_set
,
1487 [REGSET_SYSTEM_CALL
] = {
1488 .core_note_type
= NT_ARM_SYSTEM_CALL
,
1490 .size
= sizeof(int),
1491 .align
= sizeof(int),
1492 .regset_get
= system_call_get
,
1493 .set
= system_call_set
,
1497 static const struct user_regset_view user_aarch32_ptrace_view
= {
1498 .name
= "aarch32", .e_machine
= EM_ARM
,
1499 .regsets
= aarch32_ptrace_regsets
, .n
= ARRAY_SIZE(aarch32_ptrace_regsets
)
1502 static int compat_ptrace_read_user(struct task_struct
*tsk
, compat_ulong_t off
,
1503 compat_ulong_t __user
*ret
)
1510 if (off
== COMPAT_PT_TEXT_ADDR
)
1511 tmp
= tsk
->mm
->start_code
;
1512 else if (off
== COMPAT_PT_DATA_ADDR
)
1513 tmp
= tsk
->mm
->start_data
;
1514 else if (off
== COMPAT_PT_TEXT_END_ADDR
)
1515 tmp
= tsk
->mm
->end_code
;
1516 else if (off
< sizeof(compat_elf_gregset_t
))
1517 tmp
= compat_get_user_reg(tsk
, off
>> 2);
1518 else if (off
>= COMPAT_USER_SZ
)
1523 return put_user(tmp
, ret
);
1526 static int compat_ptrace_write_user(struct task_struct
*tsk
, compat_ulong_t off
,
1529 struct pt_regs newregs
= *task_pt_regs(tsk
);
1530 unsigned int idx
= off
/ 4;
1532 if (off
& 3 || off
>= COMPAT_USER_SZ
)
1535 if (off
>= sizeof(compat_elf_gregset_t
))
1543 newregs
.pstate
= compat_psr_to_pstate(val
);
1546 newregs
.orig_x0
= val
;
1549 newregs
.regs
[idx
] = val
;
1552 if (!valid_user_regs(&newregs
.user_regs
, tsk
))
1555 *task_pt_regs(tsk
) = newregs
;
1559 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1562 * Convert a virtual register number into an index for a thread_info
1563 * breakpoint array. Breakpoints are identified using positive numbers
1564 * whilst watchpoints are negative. The registers are laid out as pairs
1565 * of (address, control), each pair mapping to a unique hw_breakpoint struct.
1566 * Register 0 is reserved for describing resource information.
1568 static int compat_ptrace_hbp_num_to_idx(compat_long_t num
)
1570 return (abs(num
) - 1) >> 1;
1573 static int compat_ptrace_hbp_get_resource_info(u32
*kdata
)
1575 u8 num_brps
, num_wrps
, debug_arch
, wp_len
;
1578 num_brps
= hw_breakpoint_slots(TYPE_INST
);
1579 num_wrps
= hw_breakpoint_slots(TYPE_DATA
);
1581 debug_arch
= debug_monitors_arch();
1595 static int compat_ptrace_hbp_get(unsigned int note_type
,
1596 struct task_struct
*tsk
,
1603 int err
, idx
= compat_ptrace_hbp_num_to_idx(num
);
1606 err
= ptrace_hbp_get_addr(note_type
, tsk
, idx
, &addr
);
1609 err
= ptrace_hbp_get_ctrl(note_type
, tsk
, idx
, &ctrl
);
1616 static int compat_ptrace_hbp_set(unsigned int note_type
,
1617 struct task_struct
*tsk
,
1624 int err
, idx
= compat_ptrace_hbp_num_to_idx(num
);
1628 err
= ptrace_hbp_set_addr(note_type
, tsk
, idx
, addr
);
1631 err
= ptrace_hbp_set_ctrl(note_type
, tsk
, idx
, ctrl
);
1637 static int compat_ptrace_gethbpregs(struct task_struct
*tsk
, compat_long_t num
,
1638 compat_ulong_t __user
*data
)
1645 ret
= compat_ptrace_hbp_get(NT_ARM_HW_WATCH
, tsk
, num
, &kdata
);
1647 } else if (num
== 0) {
1648 ret
= compat_ptrace_hbp_get_resource_info(&kdata
);
1651 ret
= compat_ptrace_hbp_get(NT_ARM_HW_BREAK
, tsk
, num
, &kdata
);
1655 ret
= put_user(kdata
, data
);
1660 static int compat_ptrace_sethbpregs(struct task_struct
*tsk
, compat_long_t num
,
1661 compat_ulong_t __user
*data
)
1669 ret
= get_user(kdata
, data
);
1674 ret
= compat_ptrace_hbp_set(NT_ARM_HW_WATCH
, tsk
, num
, &kdata
);
1676 ret
= compat_ptrace_hbp_set(NT_ARM_HW_BREAK
, tsk
, num
, &kdata
);
1680 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1682 long compat_arch_ptrace(struct task_struct
*child
, compat_long_t request
,
1683 compat_ulong_t caddr
, compat_ulong_t cdata
)
1685 unsigned long addr
= caddr
;
1686 unsigned long data
= cdata
;
1687 void __user
*datap
= compat_ptr(data
);
1691 case PTRACE_PEEKUSR
:
1692 ret
= compat_ptrace_read_user(child
, addr
, datap
);
1695 case PTRACE_POKEUSR
:
1696 ret
= compat_ptrace_write_user(child
, addr
, data
);
1699 case COMPAT_PTRACE_GETREGS
:
1700 ret
= copy_regset_to_user(child
,
1703 0, sizeof(compat_elf_gregset_t
),
1707 case COMPAT_PTRACE_SETREGS
:
1708 ret
= copy_regset_from_user(child
,
1711 0, sizeof(compat_elf_gregset_t
),
1715 case COMPAT_PTRACE_GET_THREAD_AREA
:
1716 ret
= put_user((compat_ulong_t
)child
->thread
.uw
.tp_value
,
1717 (compat_ulong_t __user
*)datap
);
1720 case COMPAT_PTRACE_SET_SYSCALL
:
1721 task_pt_regs(child
)->syscallno
= data
;
1725 case COMPAT_PTRACE_GETVFPREGS
:
1726 ret
= copy_regset_to_user(child
,
1733 case COMPAT_PTRACE_SETVFPREGS
:
1734 ret
= copy_regset_from_user(child
,
1741 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1742 case COMPAT_PTRACE_GETHBPREGS
:
1743 ret
= compat_ptrace_gethbpregs(child
, addr
, datap
);
1746 case COMPAT_PTRACE_SETHBPREGS
:
1747 ret
= compat_ptrace_sethbpregs(child
, addr
, datap
);
1752 ret
= compat_ptrace_request(child
, request
, addr
,
1759 #endif /* CONFIG_COMPAT */
1761 const struct user_regset_view
*task_user_regset_view(struct task_struct
*task
)
1763 #ifdef CONFIG_COMPAT
1765 * Core dumping of 32-bit tasks or compat ptrace requests must use the
1766 * user_aarch32_view compatible with arm32. Native ptrace requests on
1767 * 32-bit children use an extended user_aarch32_ptrace_view to allow
1768 * access to the TLS register.
1770 if (is_compat_task())
1771 return &user_aarch32_view
;
1772 else if (is_compat_thread(task_thread_info(task
)))
1773 return &user_aarch32_ptrace_view
;
1775 return &user_aarch64_view
;
1778 long arch_ptrace(struct task_struct
*child
, long request
,
1779 unsigned long addr
, unsigned long data
)
1782 case PTRACE_PEEKMTETAGS
:
1783 case PTRACE_POKEMTETAGS
:
1784 return mte_ptrace_copy_tags(child
, request
, addr
, data
);
1787 return ptrace_request(child
, request
, addr
, data
);
1790 enum ptrace_syscall_dir
{
1791 PTRACE_SYSCALL_ENTER
= 0,
1792 PTRACE_SYSCALL_EXIT
,
1795 static void tracehook_report_syscall(struct pt_regs
*regs
,
1796 enum ptrace_syscall_dir dir
)
1799 unsigned long saved_reg
;
1802 * We have some ABI weirdness here in the way that we handle syscall
1803 * exit stops because we indicate whether or not the stop has been
1804 * signalled from syscall entry or syscall exit by clobbering a general
1805 * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee
1806 * and restoring its old value after the stop. This means that:
1808 * - Any writes by the tracer to this register during the stop are
1809 * ignored/discarded.
1811 * - The actual value of the register is not available during the stop,
1812 * so the tracer cannot save it and restore it later.
1814 * - Syscall stops behave differently to seccomp and pseudo-step traps
1815 * (the latter do not nobble any registers).
1817 regno
= (is_compat_task() ? 12 : 7);
1818 saved_reg
= regs
->regs
[regno
];
1819 regs
->regs
[regno
] = dir
;
1821 if (dir
== PTRACE_SYSCALL_ENTER
) {
1822 if (tracehook_report_syscall_entry(regs
))
1823 forget_syscall(regs
);
1824 regs
->regs
[regno
] = saved_reg
;
1825 } else if (!test_thread_flag(TIF_SINGLESTEP
)) {
1826 tracehook_report_syscall_exit(regs
, 0);
1827 regs
->regs
[regno
] = saved_reg
;
1829 regs
->regs
[regno
] = saved_reg
;
1832 * Signal a pseudo-step exception since we are stepping but
1833 * tracer modifications to the registers may have rewound the
1836 tracehook_report_syscall_exit(regs
, 1);
1840 int syscall_trace_enter(struct pt_regs
*regs
)
1842 unsigned long flags
= READ_ONCE(current_thread_info()->flags
);
1844 if (flags
& (_TIF_SYSCALL_EMU
| _TIF_SYSCALL_TRACE
)) {
1845 tracehook_report_syscall(regs
, PTRACE_SYSCALL_ENTER
);
1846 if (flags
& _TIF_SYSCALL_EMU
)
1850 /* Do the secure computing after ptrace; failures should be fast. */
1851 if (secure_computing() == -1)
1854 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT
))
1855 trace_sys_enter(regs
, regs
->syscallno
);
1857 audit_syscall_entry(regs
->syscallno
, regs
->orig_x0
, regs
->regs
[1],
1858 regs
->regs
[2], regs
->regs
[3]);
1860 return regs
->syscallno
;
1863 void syscall_trace_exit(struct pt_regs
*regs
)
1865 unsigned long flags
= READ_ONCE(current_thread_info()->flags
);
1867 audit_syscall_exit(regs
);
1869 if (flags
& _TIF_SYSCALL_TRACEPOINT
)
1870 trace_sys_exit(regs
, syscall_get_return_value(current
, regs
));
1872 if (flags
& (_TIF_SYSCALL_TRACE
| _TIF_SINGLESTEP
))
1873 tracehook_report_syscall(regs
, PTRACE_SYSCALL_EXIT
);
1879 * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a.
1880 * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is
1881 * not described in ARM DDI 0487D.a.
1882 * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may
1883 * be allocated an EL0 meaning in future.
1884 * Userspace cannot use these until they have an architectural meaning.
1885 * Note that this follows the SPSR_ELx format, not the AArch32 PSR format.
1886 * We also reserve IL for the kernel; SS is handled dynamically.
1888 #define SPSR_EL1_AARCH64_RES0_BITS \
1889 (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 26) | GENMASK_ULL(23, 22) | \
1890 GENMASK_ULL(20, 13) | GENMASK_ULL(5, 5))
1891 #define SPSR_EL1_AARCH32_RES0_BITS \
1892 (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20))
1894 static int valid_compat_regs(struct user_pt_regs
*regs
)
1896 regs
->pstate
&= ~SPSR_EL1_AARCH32_RES0_BITS
;
1898 if (!system_supports_mixed_endian_el0()) {
1899 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN
))
1900 regs
->pstate
|= PSR_AA32_E_BIT
;
1902 regs
->pstate
&= ~PSR_AA32_E_BIT
;
1905 if (user_mode(regs
) && (regs
->pstate
& PSR_MODE32_BIT
) &&
1906 (regs
->pstate
& PSR_AA32_A_BIT
) == 0 &&
1907 (regs
->pstate
& PSR_AA32_I_BIT
) == 0 &&
1908 (regs
->pstate
& PSR_AA32_F_BIT
) == 0) {
1913 * Force PSR to a valid 32-bit EL0t, preserving the same bits as
1916 regs
->pstate
&= PSR_AA32_N_BIT
| PSR_AA32_Z_BIT
|
1917 PSR_AA32_C_BIT
| PSR_AA32_V_BIT
|
1918 PSR_AA32_Q_BIT
| PSR_AA32_IT_MASK
|
1919 PSR_AA32_GE_MASK
| PSR_AA32_E_BIT
|
1921 regs
->pstate
|= PSR_MODE32_BIT
;
1926 static int valid_native_regs(struct user_pt_regs
*regs
)
1928 regs
->pstate
&= ~SPSR_EL1_AARCH64_RES0_BITS
;
1930 if (user_mode(regs
) && !(regs
->pstate
& PSR_MODE32_BIT
) &&
1931 (regs
->pstate
& PSR_D_BIT
) == 0 &&
1932 (regs
->pstate
& PSR_A_BIT
) == 0 &&
1933 (regs
->pstate
& PSR_I_BIT
) == 0 &&
1934 (regs
->pstate
& PSR_F_BIT
) == 0) {
1938 /* Force PSR to a valid 64-bit EL0t */
1939 regs
->pstate
&= PSR_N_BIT
| PSR_Z_BIT
| PSR_C_BIT
| PSR_V_BIT
;
1945 * Are the current registers suitable for user mode? (used to maintain
1946 * security in signal handlers)
1948 int valid_user_regs(struct user_pt_regs
*regs
, struct task_struct
*task
)
1950 /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
1951 user_regs_reset_single_step(regs
, task
);
1953 if (is_compat_thread(task_thread_info(task
)))
1954 return valid_compat_regs(regs
);
1956 return valid_native_regs(regs
);