1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-cap-linux.h" /* For Morello */
25 #include "nat/aarch64-linux.h"
26 #include "nat/aarch64-linux-hw-point.h"
27 #include "arch/aarch64-cap-linux.h"
28 #include "arch/aarch64-insn.h"
29 #include "linux-aarch32-low.h"
30 #include "elf/common.h"
32 #include "tracepoint.h"
37 #include "nat/gdb_ptrace.h"
38 #include <asm/ptrace.h>
43 #include "gdb_proc_service.h"
44 #include "arch/aarch64.h"
45 #include "linux-aarch32-tdesc.h"
46 #include "linux-aarch64-tdesc.h"
47 #include "nat/aarch64-sve-linux-ptrace.h"
54 /* Linux target op definitions for the AArch64 architecture. */
56 class aarch64_target
: public linux_process_target
60 const regs_info
*get_regs_info () override
;
62 int breakpoint_kind_from_pc (CORE_ADDR
*pcptr
) override
;
64 int breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
) override
;
66 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
68 bool supports_z_point_type (char z_type
) override
;
70 bool supports_tracepoints () override
;
72 bool supports_fast_tracepoints () override
;
74 int install_fast_tracepoint_jump_pad
75 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
76 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
77 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
78 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
79 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
82 int get_min_fast_tracepoint_insn_len () override
;
84 struct emit_ops
*emit_ops () override
;
86 bool supports_qxfer_capability () override
;
88 int qxfer_capability (const CORE_ADDR address
, unsigned char *readbuf
,
89 unsigned const char *writebuf
,
90 CORE_ADDR offset
, int len
) override
;
94 void low_arch_setup () override
;
96 bool low_cannot_fetch_register (int regno
) override
;
98 bool low_cannot_store_register (int regno
) override
;
100 bool low_supports_breakpoints () override
;
102 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
104 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
106 bool low_breakpoint_at (CORE_ADDR pc
) override
;
108 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
109 int size
, raw_breakpoint
*bp
) override
;
111 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
112 int size
, raw_breakpoint
*bp
) override
;
114 bool low_stopped_by_watchpoint () override
;
116 CORE_ADDR
low_stopped_data_address () override
;
118 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
119 int direction
) override
;
121 arch_process_info
*low_new_process () override
;
123 void low_delete_process (arch_process_info
*info
) override
;
125 void low_new_thread (lwp_info
*) override
;
127 void low_delete_thread (arch_lwp_info
*) override
;
129 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
131 void low_prepare_to_resume (lwp_info
*lwp
) override
;
133 int low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
) override
;
135 bool low_supports_range_stepping () override
;
137 bool low_supports_catch_syscall () override
;
139 void low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
) override
;
141 const struct link_map_offsets
*low_fetch_linkmap_offsets (int is_elf64
) override
;
144 /* The singleton target ops object. */
146 static aarch64_target the_aarch64_target
;
149 aarch64_target::low_cannot_fetch_register (int regno
)
151 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
152 "is not implemented by the target");
156 aarch64_target::low_cannot_store_register (int regno
)
158 gdb_assert_not_reached ("linux target op low_cannot_store_register "
159 "is not implemented by the target");
163 aarch64_target::low_prepare_to_resume (lwp_info
*lwp
)
165 aarch64_linux_prepare_to_resume (lwp
);
168 /* Per-process arch-specific data we want to keep. */
170 struct arch_process_info
172 /* Hardware breakpoint/watchpoint data.
173 The reason for them to be per-process rather than per-thread is
174 due to the lack of information in the gdbserver environment;
175 gdbserver is not told that whether a requested hardware
176 breakpoint/watchpoint is thread specific or not, so it has to set
177 each hw bp/wp for every thread in the current process. The
178 higher level bp/wp management in gdb will resume a thread if a hw
179 bp/wp trap is not expected for it. Since the hw bp/wp setting is
180 same for each thread, it is reasonable for the data to live here.
182 struct aarch64_debug_reg_state debug_reg_state
;
185 /* Return true if the size of register 0 is 8 byte. */
188 is_64bit_tdesc (void)
190 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
192 return register_size (regcache
->tdesc
, 0) == 8;
195 /* Return true if the regcache contains the number of SVE registers. */
200 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
202 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
205 static bool gpr_changed
= false;
208 gpr_set_changed (struct regcache
*regcache
, void *buf
)
210 size_t gpr_size
= (AARCH64_X_REGS_NUM
+ 2) * 8 + 4;
212 = memcmp (regcache
->registers
, buf
, gpr_size
) != 0;
218 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
220 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
223 /* Right now, regcache contains the updated contents of the registers.
224 Check if anything has changed in the GPR's. If nothing has changed,
225 don't update anything.
227 Otherwise, update the contents. */
229 gpr_changed
= gpr_set_changed (regcache
, buf
);
231 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
232 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
233 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
234 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
235 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
239 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
241 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
244 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
245 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
246 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
247 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
248 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
252 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
254 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
257 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
258 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
259 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
260 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
264 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
266 const struct user_fpsimd_state
*regset
267 = (const struct user_fpsimd_state
*) buf
;
270 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
271 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
272 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
273 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
276 /* Store the pauth registers to regcache. */
279 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
281 uint64_t *pauth_regset
= (uint64_t *) buf
;
282 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
287 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
289 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
293 /* Capability registers fill hook implementation. */
296 aarch64_fill_cregset (struct regcache
*regcache
, void *buf
)
298 /* If the GPR's have changed, don't attempt to change the C registers. */
301 /* Reset the flag. */
306 struct user_morello_state
*cregset
307 = (struct user_morello_state
*) buf
;
309 int cregs_base
= find_regno (regcache
->tdesc
, "c0");
311 /* Store the C registers to the buffer. */
313 for (regno
= cregs_base
, i
= 0;
314 regno
< cregs_base
+ AARCH64_C_REGS_NUM
;
316 collect_register (regcache
, regno
, &cregset
->cregs
[i
]);
318 /* Store the other registers to the buffer. */
319 collect_register (regcache
, regno
++, &cregset
->csp
);
320 collect_register (regcache
, regno
++, &cregset
->pcc
);
321 collect_register (regcache
, regno
++, &cregset
->ddc
);
322 collect_register (regcache
, regno
++, &cregset
->ctpidr
);
323 collect_register (regcache
, regno
++, &cregset
->rcsp
);
324 collect_register (regcache
, regno
++, &cregset
->rddc
);
325 collect_register (regcache
, regno
++, &cregset
->rctpidr
);
326 collect_register (regcache
, regno
++, &cregset
->cid
);
327 collect_register (regcache
, regno
++, &cregset
->tag_map
);
328 collect_register (regcache
, regno
++, &cregset
->cctlr
);
331 /* Capability registers store hook implementation. */
334 aarch64_store_cregset (struct regcache
*regcache
, const void *buf
)
336 const struct user_morello_state
*cregset
337 = (const struct user_morello_state
*) buf
;
339 int cregs_base
= find_regno (regcache
->tdesc
, "c0");
341 /* Fetch the C registers. */
343 for (regno
= cregs_base
, i
= 0;
344 regno
< cregs_base
+ AARCH64_C_REGS_NUM
;
346 supply_register (regcache
, regno
, &cregset
->cregs
[i
]);
348 /* Fetch the other registers. */
349 supply_register (regcache
, regno
++, &cregset
->csp
);
350 supply_register (regcache
, regno
++, &cregset
->pcc
);
351 supply_register (regcache
, regno
++, &cregset
->ddc
);
352 supply_register (regcache
, regno
++, &cregset
->ctpidr
);
353 supply_register (regcache
, regno
++, &cregset
->rcsp
);
354 supply_register (regcache
, regno
++, &cregset
->rddc
);
355 supply_register (regcache
, regno
++, &cregset
->rctpidr
);
356 supply_register (regcache
, regno
++, &cregset
->cid
);
357 supply_register (regcache
, regno
++, &cregset
->tag_map
);
358 supply_register (regcache
, regno
++, &cregset
->cctlr
);
362 aarch64_target::low_supports_breakpoints ()
367 /* Implementation of linux_target_ops method "get_pc". */
370 aarch64_target::low_get_pc (regcache
*regcache
)
372 if (register_size (regcache
->tdesc
, 0) == 8)
373 return linux_get_pc_64bit (regcache
);
375 return linux_get_pc_32bit (regcache
);
378 /* Implementation of linux target ops method "low_set_pc". */
381 aarch64_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
383 if (register_size (regcache
->tdesc
, 0) == 8)
384 linux_set_pc_64bit (regcache
, pc
);
386 linux_set_pc_32bit (regcache
, pc
);
389 #define aarch64_breakpoint_len 4
391 /* AArch64 BRK software debug mode instruction.
392 This instruction needs to match gdb/aarch64-tdep.c
393 (aarch64_default_breakpoint). */
394 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
396 /* Implementation of linux target ops method "low_breakpoint_at". */
399 aarch64_target::low_breakpoint_at (CORE_ADDR where
)
401 if (is_64bit_tdesc ())
403 gdb_byte insn
[aarch64_breakpoint_len
];
405 read_memory (where
, (unsigned char *) &insn
, aarch64_breakpoint_len
);
406 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
412 return arm_breakpoint_at (where
);
416 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
420 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
422 state
->dr_addr_bp
[i
] = 0;
423 state
->dr_ctrl_bp
[i
] = 0;
424 state
->dr_ref_count_bp
[i
] = 0;
427 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
429 state
->dr_addr_wp
[i
] = 0;
430 state
->dr_ctrl_wp
[i
] = 0;
431 state
->dr_ref_count_wp
[i
] = 0;
435 /* Return the pointer to the debug register state structure in the
436 current process' arch-specific data area. */
438 struct aarch64_debug_reg_state
*
439 aarch64_get_debug_reg_state (pid_t pid
)
441 struct process_info
*proc
= find_process_pid (pid
);
443 return &proc
->priv
->arch_private
->debug_reg_state
;
446 /* Implementation of target ops method "supports_z_point_type". */
449 aarch64_target::supports_z_point_type (char z_type
)
455 case Z_PACKET_WRITE_WP
:
456 case Z_PACKET_READ_WP
:
457 case Z_PACKET_ACCESS_WP
:
464 /* Implementation of linux target ops method "low_insert_point".
466 It actually only records the info of the to-be-inserted bp/wp;
467 the actual insertion will happen when threads are resumed. */
470 aarch64_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
471 int len
, raw_breakpoint
*bp
)
474 enum target_hw_bp_type targ_type
;
475 struct aarch64_debug_reg_state
*state
476 = aarch64_get_debug_reg_state (pid_of (current_thread
));
479 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
480 (unsigned long) addr
, len
);
482 /* Determine the type from the raw breakpoint type. */
483 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
485 if (targ_type
!= hw_execute
)
487 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
488 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
489 1 /* is_insert */, state
);
497 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
498 instruction. Set it to 2 to correctly encode length bit
499 mask in hardware/watchpoint control register. */
502 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
503 1 /* is_insert */, state
);
507 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
513 /* Implementation of linux target ops method "low_remove_point".
515 It actually only records the info of the to-be-removed bp/wp,
516 the actual removal will be done when threads are resumed. */
519 aarch64_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
520 int len
, raw_breakpoint
*bp
)
523 enum target_hw_bp_type targ_type
;
524 struct aarch64_debug_reg_state
*state
525 = aarch64_get_debug_reg_state (pid_of (current_thread
));
528 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
529 (unsigned long) addr
, len
);
531 /* Determine the type from the raw breakpoint type. */
532 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
534 /* Set up state pointers. */
535 if (targ_type
!= hw_execute
)
537 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
543 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
544 instruction. Set it to 2 to correctly encode length bit
545 mask in hardware/watchpoint control register. */
548 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
549 0 /* is_insert */, state
);
553 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
559 /* Implementation of linux target ops method "low_stopped_data_address". */
562 aarch64_target::low_stopped_data_address ()
566 struct aarch64_debug_reg_state
*state
;
568 pid
= lwpid_of (current_thread
);
570 /* Get the siginfo. */
571 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
572 return (CORE_ADDR
) 0;
574 /* Need to be a hardware breakpoint/watchpoint trap. */
575 if (siginfo
.si_signo
!= SIGTRAP
576 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
577 return (CORE_ADDR
) 0;
579 /* Check if the address matches any watched address. */
580 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
581 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
583 const unsigned int offset
584 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
585 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
586 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
587 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
588 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
589 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
591 if (state
->dr_ref_count_wp
[i
]
592 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
593 && addr_trap
>= addr_watch_aligned
594 && addr_trap
< addr_watch
+ len
)
596 /* ADDR_TRAP reports the first address of the memory range
597 accessed by the CPU, regardless of what was the memory
598 range watched. Thus, a large CPU access that straddles
599 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
600 ADDR_TRAP that is lower than the
601 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
603 addr: | 4 | 5 | 6 | 7 | 8 |
604 |---- range watched ----|
605 |----------- range accessed ------------|
607 In this case, ADDR_TRAP will be 4.
609 To match a watchpoint known to GDB core, we must never
610 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
611 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
612 positive on kernels older than 4.10. See PR
618 return (CORE_ADDR
) 0;
621 /* Implementation of linux target ops method "low_stopped_by_watchpoint". */
624 aarch64_target::low_stopped_by_watchpoint ()
626 return (low_stopped_data_address () != 0);
629 /* Fetch the thread-local storage pointer for libthread_db. */
632 ps_get_thread_area (struct ps_prochandle
*ph
,
633 lwpid_t lwpid
, int idx
, void **base
)
635 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
639 /* Implementation of linux target ops method "low_siginfo_fixup". */
642 aarch64_target::low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
645 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
646 if (!is_64bit_tdesc ())
649 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
652 aarch64_siginfo_from_compat_siginfo (native
,
653 (struct compat_siginfo
*) inf
);
661 /* Implementation of linux target ops method "low_new_process". */
664 aarch64_target::low_new_process ()
666 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
668 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
673 /* Implementation of linux target ops method "low_delete_process". */
676 aarch64_target::low_delete_process (arch_process_info
*info
)
682 aarch64_target::low_new_thread (lwp_info
*lwp
)
684 aarch64_linux_new_thread (lwp
);
688 aarch64_target::low_delete_thread (arch_lwp_info
*arch_lwp
)
690 aarch64_linux_delete_thread (arch_lwp
);
693 /* Implementation of linux target ops method "low_new_fork". */
696 aarch64_target::low_new_fork (process_info
*parent
,
699 /* These are allocated by linux_add_process. */
700 gdb_assert (parent
->priv
!= NULL
701 && parent
->priv
->arch_private
!= NULL
);
702 gdb_assert (child
->priv
!= NULL
703 && child
->priv
->arch_private
!= NULL
);
705 /* Linux kernel before 2.6.33 commit
706 72f674d203cd230426437cdcf7dd6f681dad8b0d
707 will inherit hardware debug registers from parent
708 on fork/vfork/clone. Newer Linux kernels create such tasks with
709 zeroed debug registers.
711 GDB core assumes the child inherits the watchpoints/hw
712 breakpoints of the parent, and will remove them all from the
713 forked off process. Copy the debug registers mirrors into the
714 new process so that all breakpoints and watchpoints can be
715 removed together. The debug registers mirror will become zeroed
716 in the end before detaching the forked off process, thus making
717 this compatible with older Linux kernels too. */
719 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
722 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
725 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
727 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
730 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
733 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
735 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
738 static struct regset_info aarch64_regsets
[] =
740 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
741 sizeof (struct user_pt_regs
), GENERAL_REGS
,
742 aarch64_fill_gregset
, aarch64_store_gregset
},
743 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
744 sizeof (struct user_fpsimd_state
), FP_REGS
,
745 aarch64_fill_fpregset
, aarch64_store_fpregset
747 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
748 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
749 NULL
, aarch64_store_pauthregset
},
750 /* FIXME-Morello: Fixup the register set size. */
751 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_MORELLO
,
752 AARCH64_LINUX_CREGS_SIZE
, OPTIONAL_REGS
,
753 aarch64_fill_cregset
, aarch64_store_cregset
, nullptr,
754 "cheri.ptrace_forge_cap", "capability"
759 static struct regsets_info aarch64_regsets_info
=
761 aarch64_regsets
, /* regsets */
763 NULL
, /* disabled_regsets */
766 static struct regs_info regs_info_aarch64
=
768 NULL
, /* regset_bitmap */
770 &aarch64_regsets_info
,
773 static struct regset_info aarch64_sve_regsets
[] =
775 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
776 sizeof (struct user_pt_regs
), GENERAL_REGS
,
777 aarch64_fill_gregset
, aarch64_store_gregset
},
778 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
779 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
780 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
782 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
783 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
784 NULL
, aarch64_store_pauthregset
},
785 /* FIXME-Morello: Fixup the register set size. */
786 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_MORELLO
,
787 AARCH64_LINUX_CREGS_SIZE
, OPTIONAL_REGS
,
788 aarch64_fill_cregset
, aarch64_store_cregset
, nullptr,
789 "cheri.ptrace_forge_cap", "capability"
794 static struct regsets_info aarch64_sve_regsets_info
=
796 aarch64_sve_regsets
, /* regsets. */
797 0, /* num_regsets. */
798 NULL
, /* disabled_regsets. */
801 static struct regs_info regs_info_aarch64_sve
=
803 NULL
, /* regset_bitmap. */
805 &aarch64_sve_regsets_info
,
808 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
809 #define AARCH64_HWCAP_PACA (1 << 30)
811 /* Implementation of linux target ops method "low_arch_setup". */
814 aarch64_target::low_arch_setup ()
816 unsigned int machine
;
820 tid
= lwpid_of (current_thread
);
822 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
826 uint64_t vq
= aarch64_sve_get_vq (tid
);
827 unsigned long hwcap
= linux_get_hwcap (8);
828 unsigned long hwcap2
= linux_get_hwcap2 (8);
829 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
830 bool capability_p
= hwcap2
& HWCAP2_MORELLO
;
832 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
,
835 /* Re-enable warnings for register sets with sysctl settings. */
836 aarch64_regsets
[4].sysctl_write_should_warn
= true;
837 aarch64_sve_regsets
[4].sysctl_write_should_warn
= true;
840 current_process ()->tdesc
= aarch32_linux_read_description ();
842 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
847 /* Implementation of linux target ops method "get_regs_info". */
850 aarch64_target::get_regs_info ()
852 if (!is_64bit_tdesc ())
853 return ®s_info_aarch32
;
856 return ®s_info_aarch64_sve
;
858 return ®s_info_aarch64
;
861 /* Implementation of target ops method "supports_tracepoints". */
864 aarch64_target::supports_tracepoints ()
866 if (current_thread
== NULL
)
870 /* We don't support tracepoints on aarch32 now. */
871 return is_64bit_tdesc ();
875 /* Implementation of linux target ops method "low_get_thread_area". */
878 aarch64_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
883 iovec
.iov_base
= ®
;
884 iovec
.iov_len
= sizeof (reg
);
886 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
895 aarch64_target::low_supports_catch_syscall ()
900 /* Implementation of linux target ops method "low_get_syscall_trapinfo". */
903 aarch64_target::low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
)
905 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
911 collect_register_by_name (regcache
, "x8", &l_sysno
);
912 *sysno
= (int) l_sysno
;
915 collect_register_by_name (regcache
, "r7", sysno
);
918 static const struct link_map_offsets lmo_64bit_morello_offsets
=
920 0, /* r_version offset. */
921 16, /* r_debug.r_map offset. */
922 0, /* l_addr offset in link_map. */
923 16, /* l_name offset in link_map. */
924 32, /* l_ld offset in link_map. */
925 48, /* l_next offset in link_map. */
926 64 /* l_prev offset in link_map. */
929 const struct link_map_offsets
*
930 aarch64_target::low_fetch_linkmap_offsets (int is_elf64
)
934 CORE_ADDR entry_addr
= linux_get_at_entry (8);
936 /* If the LSB of AT_ENTRY is 1, then we have a pure capability Morello
939 return &lmo_64bit_morello_offsets
;
942 return linux_process_target::low_fetch_linkmap_offsets (is_elf64
);
945 /* List of condition codes that we need. */
947 enum aarch64_condition_codes
958 enum aarch64_operand_type
964 /* Representation of an operand. At this time, it only supports register
965 and immediate types. */
967 struct aarch64_operand
969 /* Type of the operand. */
970 enum aarch64_operand_type type
;
972 /* Value of the operand according to the type. */
976 struct aarch64_register reg
;
980 /* List of registers that we are currently using, we can add more here as
981 we need to use them. */
983 /* General purpose scratch registers (64 bit). */
984 static const struct aarch64_register x0
= { 0, 1 };
985 static const struct aarch64_register x1
= { 1, 1 };
986 static const struct aarch64_register x2
= { 2, 1 };
987 static const struct aarch64_register x3
= { 3, 1 };
988 static const struct aarch64_register x4
= { 4, 1 };
990 /* General purpose scratch registers (32 bit). */
991 static const struct aarch64_register w0
= { 0, 0 };
992 static const struct aarch64_register w2
= { 2, 0 };
994 /* Intra-procedure scratch registers. */
995 static const struct aarch64_register ip0
= { 16, 1 };
997 /* Special purpose registers. */
998 static const struct aarch64_register fp
= { 29, 1 };
999 static const struct aarch64_register lr
= { 30, 1 };
1000 static const struct aarch64_register sp
= { 31, 1 };
1001 static const struct aarch64_register xzr
= { 31, 1 };
1003 /* Dynamically allocate a new register. If we know the register
1004 statically, we should make it a global as above instead of using this
1007 static struct aarch64_register
1008 aarch64_register (unsigned num
, int is64
)
1010 return (struct aarch64_register
) { num
, is64
};
1013 /* Helper function to create a register operand, for instructions with
1014 different types of operands.
1017 p += emit_mov (p, x0, register_operand (x1)); */
1019 static struct aarch64_operand
1020 register_operand (struct aarch64_register reg
)
1022 struct aarch64_operand operand
;
1024 operand
.type
= OPERAND_REGISTER
;
1030 /* Helper function to create an immediate operand, for instructions with
1031 different types of operands.
1034 p += emit_mov (p, x0, immediate_operand (12)); */
1036 static struct aarch64_operand
1037 immediate_operand (uint32_t imm
)
1039 struct aarch64_operand operand
;
1041 operand
.type
= OPERAND_IMMEDIATE
;
1047 /* Helper function to create an offset memory operand.
1050 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
1052 static struct aarch64_memory_operand
1053 offset_memory_operand (int32_t offset
)
1055 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
1058 /* Helper function to create a pre-index memory operand.
1061 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
1063 static struct aarch64_memory_operand
1064 preindex_memory_operand (int32_t index
)
1066 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
1069 /* Helper function to create a post-index memory operand.
1072 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
1074 static struct aarch64_memory_operand
1075 postindex_memory_operand (int32_t index
)
1077 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
1080 /* System control registers. These special registers can be written and
1081 read with the MRS and MSR instructions.
1083 - NZCV: Condition flags. GDB refers to this register under the CPSR
1085 - FPSR: Floating-point status register.
1086 - FPCR: Floating-point control registers.
1087 - TPIDR_EL0: Software thread ID register. */
1089 enum aarch64_system_control_registers
1091 /* op0 op1 crn crm op2 */
1092 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
1093 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
1094 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
1095 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
1098 /* Write a BLR instruction into *BUF.
1102 RN is the register to branch to. */
1105 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
1107 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
1110 /* Write a RET instruction into *BUF.
1114 RN is the register to branch to. */
1117 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
1119 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
1123 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
1124 struct aarch64_register rt
,
1125 struct aarch64_register rt2
,
1126 struct aarch64_register rn
,
1127 struct aarch64_memory_operand operand
)
1131 uint32_t write_back
;
1134 opc
= ENCODE (2, 2, 30);
1136 opc
= ENCODE (0, 2, 30);
1138 switch (operand
.type
)
1140 case MEMORY_OPERAND_OFFSET
:
1142 pre_index
= ENCODE (1, 1, 24);
1143 write_back
= ENCODE (0, 1, 23);
1146 case MEMORY_OPERAND_POSTINDEX
:
1148 pre_index
= ENCODE (0, 1, 24);
1149 write_back
= ENCODE (1, 1, 23);
1152 case MEMORY_OPERAND_PREINDEX
:
1154 pre_index
= ENCODE (1, 1, 24);
1155 write_back
= ENCODE (1, 1, 23);
1162 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
1163 | ENCODE (operand
.index
>> 3, 7, 15)
1164 | ENCODE (rt2
.num
, 5, 10)
1165 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1168 /* Write a STP instruction into *BUF.
1170 STP rt, rt2, [rn, #offset]
1171 STP rt, rt2, [rn, #index]!
1172 STP rt, rt2, [rn], #index
1174 RT and RT2 are the registers to store.
1175 RN is the base address register.
1176 OFFSET is the immediate to add to the base address. It is limited to a
1177 -512 .. 504 range (7 bits << 3). */
1180 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
1181 struct aarch64_register rt2
, struct aarch64_register rn
,
1182 struct aarch64_memory_operand operand
)
1184 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
1187 /* Write a LDP instruction into *BUF.
1189 LDP rt, rt2, [rn, #offset]
1190 LDP rt, rt2, [rn, #index]!
1191 LDP rt, rt2, [rn], #index
1193 RT and RT2 are the registers to store.
1194 RN is the base address register.
1195 OFFSET is the immediate to add to the base address. It is limited to a
1196 -512 .. 504 range (7 bits << 3). */
1199 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
1200 struct aarch64_register rt2
, struct aarch64_register rn
,
1201 struct aarch64_memory_operand operand
)
1203 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
1206 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1208 LDP qt, qt2, [rn, #offset]
1210 RT and RT2 are the Q registers to store.
1211 RN is the base address register.
1212 OFFSET is the immediate to add to the base address. It is limited to
1213 -1024 .. 1008 range (7 bits << 4). */
1216 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1217 struct aarch64_register rn
, int32_t offset
)
1219 uint32_t opc
= ENCODE (2, 2, 30);
1220 uint32_t pre_index
= ENCODE (1, 1, 24);
1222 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
1223 | ENCODE (offset
>> 4, 7, 15)
1224 | ENCODE (rt2
, 5, 10)
1225 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1228 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1230 STP qt, qt2, [rn, #offset]
1232 RT and RT2 are the Q registers to store.
1233 RN is the base address register.
1234 OFFSET is the immediate to add to the base address. It is limited to
1235 -1024 .. 1008 range (7 bits << 4). */
1238 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1239 struct aarch64_register rn
, int32_t offset
)
1241 uint32_t opc
= ENCODE (2, 2, 30);
1242 uint32_t pre_index
= ENCODE (1, 1, 24);
1244 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
1245 | ENCODE (offset
>> 4, 7, 15)
1246 | ENCODE (rt2
, 5, 10)
1247 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1250 /* Write a LDRH instruction into *BUF.
1252 LDRH wt, [xn, #offset]
1253 LDRH wt, [xn, #index]!
1254 LDRH wt, [xn], #index
1256 RT is the register to store.
1257 RN is the base address register.
1258 OFFSET is the immediate to add to the base address. It is limited to
1259 0 .. 32760 range (12 bits << 3). */
1262 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1263 struct aarch64_register rn
,
1264 struct aarch64_memory_operand operand
)
1266 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1269 /* Write a LDRB instruction into *BUF.
1271 LDRB wt, [xn, #offset]
1272 LDRB wt, [xn, #index]!
1273 LDRB wt, [xn], #index
1275 RT is the register to store.
1276 RN is the base address register.
1277 OFFSET is the immediate to add to the base address. It is limited to
1278 0 .. 32760 range (12 bits << 3). */
1281 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1282 struct aarch64_register rn
,
1283 struct aarch64_memory_operand operand
)
1285 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1290 /* Write a STR instruction into *BUF.
1292 STR rt, [rn, #offset]
1293 STR rt, [rn, #index]!
1294 STR rt, [rn], #index
1296 RT is the register to store.
1297 RN is the base address register.
1298 OFFSET is the immediate to add to the base address. It is limited to
1299 0 .. 32760 range (12 bits << 3). */
1302 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1303 struct aarch64_register rn
,
1304 struct aarch64_memory_operand operand
)
1306 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1309 /* Helper function emitting an exclusive load or store instruction. */
1312 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1313 enum aarch64_opcodes opcode
,
1314 struct aarch64_register rs
,
1315 struct aarch64_register rt
,
1316 struct aarch64_register rt2
,
1317 struct aarch64_register rn
)
1319 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1320 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1321 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1324 /* Write a LAXR instruction into *BUF.
1328 RT is the destination register.
1329 RN is the base address register. */
1332 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1333 struct aarch64_register rn
)
1335 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1339 /* Write a STXR instruction into *BUF.
1343 RS is the result register, it indicates if the store succeeded or not.
1344 RT is the destination register.
1345 RN is the base address register. */
1348 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1349 struct aarch64_register rt
, struct aarch64_register rn
)
1351 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1355 /* Write a STLR instruction into *BUF.
1359 RT is the register to store.
1360 RN is the base address register. */
1363 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1364 struct aarch64_register rn
)
1366 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1370 /* Helper function for data processing instructions with register sources. */
1373 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1374 struct aarch64_register rd
,
1375 struct aarch64_register rn
,
1376 struct aarch64_register rm
)
1378 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1380 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1381 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1384 /* Helper function for data processing instructions taking either a register
1388 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1389 struct aarch64_register rd
,
1390 struct aarch64_register rn
,
1391 struct aarch64_operand operand
)
1393 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1394 /* The opcode is different for register and immediate source operands. */
1395 uint32_t operand_opcode
;
1397 if (operand
.type
== OPERAND_IMMEDIATE
)
1399 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1400 operand_opcode
= ENCODE (8, 4, 25);
1402 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1403 | ENCODE (operand
.imm
, 12, 10)
1404 | ENCODE (rn
.num
, 5, 5)
1405 | ENCODE (rd
.num
, 5, 0));
1409 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1410 operand_opcode
= ENCODE (5, 4, 25);
1412 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1417 /* Write an ADD instruction into *BUF.
1422 This function handles both an immediate and register add.
1424 RD is the destination register.
1425 RN is the input register.
1426 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1427 OPERAND_REGISTER. */
1430 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1431 struct aarch64_register rn
, struct aarch64_operand operand
)
1433 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1436 /* Write a SUB instruction into *BUF.
1441 This function handles both an immediate and register sub.
1443 RD is the destination register.
1444 RN is the input register.
1445 IMM is the immediate to substract to RN. */
1448 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1449 struct aarch64_register rn
, struct aarch64_operand operand
)
1451 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1454 /* Write a MOV instruction into *BUF.
1459 This function handles both a wide immediate move and a register move,
1460 with the condition that the source register is not xzr. xzr and the
1461 stack pointer share the same encoding and this function only supports
1464 RD is the destination register.
1465 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1466 OPERAND_REGISTER. */
1469 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1470 struct aarch64_operand operand
)
1472 if (operand
.type
== OPERAND_IMMEDIATE
)
1474 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1475 /* Do not shift the immediate. */
1476 uint32_t shift
= ENCODE (0, 2, 21);
1478 return aarch64_emit_insn (buf
, MOV
| size
| shift
1479 | ENCODE (operand
.imm
, 16, 5)
1480 | ENCODE (rd
.num
, 5, 0));
1483 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1486 /* Write a MOVK instruction into *BUF.
1488 MOVK rd, #imm, lsl #shift
1490 RD is the destination register.
1491 IMM is the immediate.
1492 SHIFT is the logical shift left to apply to IMM. */
1495 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1498 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1500 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1501 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1504 /* Write instructions into *BUF in order to move ADDR into a register.
1505 ADDR can be a 64-bit value.
1507 This function will emit a series of MOV and MOVK instructions, such as:
1510 MOVK xd, #(addr >> 16), lsl #16
1511 MOVK xd, #(addr >> 32), lsl #32
1512 MOVK xd, #(addr >> 48), lsl #48 */
1515 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1519 /* The MOV (wide immediate) instruction clears to top bits of the
1521 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1523 if ((addr
>> 16) != 0)
1524 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1528 if ((addr
>> 32) != 0)
1529 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1533 if ((addr
>> 48) != 0)
1534 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1539 /* Write a SUBS instruction into *BUF.
1543 This instruction update the condition flags.
1545 RD is the destination register.
1546 RN and RM are the source registers. */
1549 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1550 struct aarch64_register rn
, struct aarch64_operand operand
)
1552 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1555 /* Write a CMP instruction into *BUF.
1559 This instruction is an alias of SUBS xzr, rn, rm.
1561 RN and RM are the registers to compare. */
1564 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1565 struct aarch64_operand operand
)
1567 return emit_subs (buf
, xzr
, rn
, operand
);
1570 /* Write a AND instruction into *BUF.
1574 RD is the destination register.
1575 RN and RM are the source registers. */
1578 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1579 struct aarch64_register rn
, struct aarch64_register rm
)
1581 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1584 /* Write a ORR instruction into *BUF.
1588 RD is the destination register.
1589 RN and RM are the source registers. */
1592 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1593 struct aarch64_register rn
, struct aarch64_register rm
)
1595 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1598 /* Write a ORN instruction into *BUF.
1602 RD is the destination register.
1603 RN and RM are the source registers. */
1606 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1607 struct aarch64_register rn
, struct aarch64_register rm
)
1609 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1612 /* Write a EOR instruction into *BUF.
1616 RD is the destination register.
1617 RN and RM are the source registers. */
1620 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1621 struct aarch64_register rn
, struct aarch64_register rm
)
1623 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1626 /* Write a MVN instruction into *BUF.
1630 This is an alias for ORN rd, xzr, rm.
1632 RD is the destination register.
1633 RM is the source register. */
1636 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1637 struct aarch64_register rm
)
1639 return emit_orn (buf
, rd
, xzr
, rm
);
1642 /* Write a LSLV instruction into *BUF.
1646 RD is the destination register.
1647 RN and RM are the source registers. */
1650 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1651 struct aarch64_register rn
, struct aarch64_register rm
)
1653 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1656 /* Write a LSRV instruction into *BUF.
1660 RD is the destination register.
1661 RN and RM are the source registers. */
1664 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1665 struct aarch64_register rn
, struct aarch64_register rm
)
1667 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1670 /* Write a ASRV instruction into *BUF.
1674 RD is the destination register.
1675 RN and RM are the source registers. */
1678 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1679 struct aarch64_register rn
, struct aarch64_register rm
)
1681 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1684 /* Write a MUL instruction into *BUF.
1688 RD is the destination register.
1689 RN and RM are the source registers. */
1692 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1693 struct aarch64_register rn
, struct aarch64_register rm
)
1695 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1698 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1702 RT is the destination register.
1703 SYSTEM_REG is special purpose register to read. */
1706 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1707 enum aarch64_system_control_registers system_reg
)
1709 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1710 | ENCODE (rt
.num
, 5, 0));
1713 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1717 SYSTEM_REG is special purpose register to write.
1718 RT is the input register. */
1721 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1722 struct aarch64_register rt
)
1724 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1725 | ENCODE (rt
.num
, 5, 0));
1728 /* Write a SEVL instruction into *BUF.
1730 This is a hint instruction telling the hardware to trigger an event. */
1733 emit_sevl (uint32_t *buf
)
1735 return aarch64_emit_insn (buf
, SEVL
);
1738 /* Write a WFE instruction into *BUF.
1740 This is a hint instruction telling the hardware to wait for an event. */
1743 emit_wfe (uint32_t *buf
)
1745 return aarch64_emit_insn (buf
, WFE
);
1748 /* Write a SBFM instruction into *BUF.
1750 SBFM rd, rn, #immr, #imms
1752 This instruction moves the bits from #immr to #imms into the
1753 destination, sign extending the result.
1755 RD is the destination register.
1756 RN is the source register.
1757 IMMR is the bit number to start at (least significant bit).
1758 IMMS is the bit number to stop at (most significant bit). */
1761 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1762 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1764 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1765 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1767 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1768 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1769 | ENCODE (rd
.num
, 5, 0));
1772 /* Write a SBFX instruction into *BUF.
1774 SBFX rd, rn, #lsb, #width
1776 This instruction moves #width bits from #lsb into the destination, sign
1777 extending the result. This is an alias for:
1779 SBFM rd, rn, #lsb, #(lsb + width - 1)
1781 RD is the destination register.
1782 RN is the source register.
1783 LSB is the bit number to start at (least significant bit).
1784 WIDTH is the number of bits to move. */
1787 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1788 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1790 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1793 /* Write a UBFM instruction into *BUF.
1795 UBFM rd, rn, #immr, #imms
1797 This instruction moves the bits from #immr to #imms into the
1798 destination, extending the result with zeros.
1800 RD is the destination register.
1801 RN is the source register.
1802 IMMR is the bit number to start at (least significant bit).
1803 IMMS is the bit number to stop at (most significant bit). */
1806 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1807 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1809 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1810 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1812 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1813 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1814 | ENCODE (rd
.num
, 5, 0));
1817 /* Write a UBFX instruction into *BUF.
1819 UBFX rd, rn, #lsb, #width
1821 This instruction moves #width bits from #lsb into the destination,
1822 extending the result with zeros. This is an alias for:
1824 UBFM rd, rn, #lsb, #(lsb + width - 1)
1826 RD is the destination register.
1827 RN is the source register.
1828 LSB is the bit number to start at (least significant bit).
1829 WIDTH is the number of bits to move. */
1832 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1833 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1835 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1838 /* Write a CSINC instruction into *BUF.
1840 CSINC rd, rn, rm, cond
1842 This instruction conditionally increments rn or rm and places the result
1843 in rd. rn is chosen is the condition is true.
1845 RD is the destination register.
1846 RN and RM are the source registers.
1847 COND is the encoded condition. */
1850 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1851 struct aarch64_register rn
, struct aarch64_register rm
,
1854 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1856 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1857 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1858 | ENCODE (rd
.num
, 5, 0));
1861 /* Write a CSET instruction into *BUF.
1865 This instruction conditionally write 1 or 0 in the destination register.
1866 1 is written if the condition is true. This is an alias for:
1868 CSINC rd, xzr, xzr, !cond
1870 Note that the condition needs to be inverted.
1872 RD is the destination register.
1873 RN and RM are the source registers.
1874 COND is the encoded condition. */
1877 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1879 /* The least significant bit of the condition needs toggling in order to
1881 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1884 /* Write LEN instructions from BUF into the inferior memory at *TO.
1886 Note instructions are always little endian on AArch64, unlike data. */
1889 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1891 size_t byte_len
= len
* sizeof (uint32_t);
1892 #if (__BYTE_ORDER == __BIG_ENDIAN)
1893 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1896 for (i
= 0; i
< len
; i
++)
1897 le_buf
[i
] = htole32 (buf
[i
]);
1899 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1903 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1909 /* Sub-class of struct aarch64_insn_data, store information of
1910 instruction relocation for fast tracepoint. Visitor can
1911 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1912 the relocated instructions in buffer pointed by INSN_PTR. */
1914 struct aarch64_insn_relocation_data
1916 struct aarch64_insn_data base
;
1918 /* The new address the instruction is relocated to. */
1920 /* Pointer to the buffer of relocated instruction(s). */
1924 /* Implementation of aarch64_insn_visitor method "b". */
1927 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1928 struct aarch64_insn_data
*data
)
1930 struct aarch64_insn_relocation_data
*insn_reloc
1931 = (struct aarch64_insn_relocation_data
*) data
;
1933 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1935 if (can_encode_int32 (new_offset
, 28))
1936 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1939 /* Implementation of aarch64_insn_visitor method "b_cond". */
1942 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1943 struct aarch64_insn_data
*data
)
1945 struct aarch64_insn_relocation_data
*insn_reloc
1946 = (struct aarch64_insn_relocation_data
*) data
;
1948 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1950 if (can_encode_int32 (new_offset
, 21))
1952 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1955 else if (can_encode_int32 (new_offset
, 28))
1957 /* The offset is out of range for a conditional branch
1958 instruction but not for a unconditional branch. We can use
1959 the following instructions instead:
1961 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1962 B NOT_TAKEN ; Else jump over TAKEN and continue.
1969 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1970 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1971 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1975 /* Implementation of aarch64_insn_visitor method "cb". */
1978 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1979 const unsigned rn
, int is64
,
1980 struct aarch64_insn_data
*data
)
1982 struct aarch64_insn_relocation_data
*insn_reloc
1983 = (struct aarch64_insn_relocation_data
*) data
;
1985 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1987 if (can_encode_int32 (new_offset
, 21))
1989 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1990 aarch64_register (rn
, is64
), new_offset
);
1992 else if (can_encode_int32 (new_offset
, 28))
1994 /* The offset is out of range for a compare and branch
1995 instruction but not for a unconditional branch. We can use
1996 the following instructions instead:
1998 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1999 B NOT_TAKEN ; Else jump over TAKEN and continue.
2005 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
2006 aarch64_register (rn
, is64
), 8);
2007 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
2008 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
2012 /* Implementation of aarch64_insn_visitor method "tb". */
2015 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
2016 const unsigned rt
, unsigned bit
,
2017 struct aarch64_insn_data
*data
)
2019 struct aarch64_insn_relocation_data
*insn_reloc
2020 = (struct aarch64_insn_relocation_data
*) data
;
2022 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
2024 if (can_encode_int32 (new_offset
, 16))
2026 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
2027 aarch64_register (rt
, 1), new_offset
);
2029 else if (can_encode_int32 (new_offset
, 28))
2031 /* The offset is out of range for a test bit and branch
2032 instruction but not for a unconditional branch. We can use
2033 the following instructions instead:
2035 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2036 B NOT_TAKEN ; Else jump over TAKEN and continue.
2042 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
2043 aarch64_register (rt
, 1), 8);
2044 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
2045 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
2050 /* Implementation of aarch64_insn_visitor method "adr". */
2053 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
2055 struct aarch64_insn_data
*data
)
2057 struct aarch64_insn_relocation_data
*insn_reloc
2058 = (struct aarch64_insn_relocation_data
*) data
;
2059 /* We know exactly the address the ADR{P,} instruction will compute.
2060 We can just write it to the destination register. */
2061 CORE_ADDR address
= data
->insn_addr
+ offset
;
2065 /* Clear the lower 12 bits of the offset to get the 4K page. */
2066 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
2067 aarch64_register (rd
, 1),
2071 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
2072 aarch64_register (rd
, 1), address
);
2075 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2078 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
2079 const unsigned rt
, const int is64
,
2080 struct aarch64_insn_data
*data
)
2082 struct aarch64_insn_relocation_data
*insn_reloc
2083 = (struct aarch64_insn_relocation_data
*) data
;
2084 CORE_ADDR address
= data
->insn_addr
+ offset
;
2086 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
2087 aarch64_register (rt
, 1), address
);
2089 /* We know exactly what address to load from, and what register we
2092 MOV xd, #(oldloc + offset)
2093 MOVK xd, #((oldloc + offset) >> 16), lsl #16
2096 LDR xd, [xd] ; or LDRSW xd, [xd]
2101 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
2102 aarch64_register (rt
, 1),
2103 aarch64_register (rt
, 1),
2104 offset_memory_operand (0));
2106 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
2107 aarch64_register (rt
, is64
),
2108 aarch64_register (rt
, 1),
2109 offset_memory_operand (0));
2112 /* Implementation of aarch64_insn_visitor method "others". */
2115 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
2116 struct aarch64_insn_data
*data
)
2118 struct aarch64_insn_relocation_data
*insn_reloc
2119 = (struct aarch64_insn_relocation_data
*) data
;
2121 /* The instruction is not PC relative. Just re-emit it at the new
2123 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
2126 static const struct aarch64_insn_visitor visitor
=
2128 aarch64_ftrace_insn_reloc_b
,
2129 aarch64_ftrace_insn_reloc_b_cond
,
2130 aarch64_ftrace_insn_reloc_cb
,
2131 aarch64_ftrace_insn_reloc_tb
,
2132 aarch64_ftrace_insn_reloc_adr
,
2133 aarch64_ftrace_insn_reloc_ldr_literal
,
2134 aarch64_ftrace_insn_reloc_others
,
2138 aarch64_target::supports_fast_tracepoints ()
2143 /* Implementation of target ops method
2144 "install_fast_tracepoint_jump_pad". */
2147 aarch64_target::install_fast_tracepoint_jump_pad
2148 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
2149 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
2150 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
2151 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
2152 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
2160 CORE_ADDR buildaddr
= *jump_entry
;
2161 struct aarch64_insn_relocation_data insn_data
;
2163 /* We need to save the current state on the stack both to restore it
2164 later and to collect register values when the tracepoint is hit.
2166 The saved registers are pushed in a layout that needs to be in sync
2167 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
2168 the supply_fast_tracepoint_registers function will fill in the
2169 register cache from a pointer to saved registers on the stack we build
2172 For simplicity, we set the size of each cell on the stack to 16 bytes.
2173 This way one cell can hold any register type, from system registers
2174 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2175 has to be 16 bytes aligned anyway.
2177 Note that the CPSR register does not exist on AArch64. Instead we
2178 can access system bits describing the process state with the
2179 MRS/MSR instructions, namely the condition flags. We save them as
2180 if they are part of a CPSR register because that's how GDB
2181 interprets these system bits. At the moment, only the condition
2182 flags are saved in CPSR (NZCV).
2184 Stack layout, each cell is 16 bytes (descending):
2186 High *-------- SIMD&FP registers from 31 down to 0. --------*
2192 *---- General purpose registers from 30 down to 0. ----*
2198 *------------- Special purpose registers. -------------*
2201 | CPSR (NZCV) | 5 cells
2204 *------------- collecting_t object --------------------*
2205 | TPIDR_EL0 | struct tracepoint * |
2206 Low *------------------------------------------------------*
2208 After this stack is set up, we issue a call to the collector, passing
2209 it the saved registers at (SP + 16). */
2211 /* Push SIMD&FP registers on the stack:
2213 SUB sp, sp, #(32 * 16)
2215 STP q30, q31, [sp, #(30 * 16)]
2220 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
2221 for (i
= 30; i
>= 0; i
-= 2)
2222 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2224 /* Push general purpose registers on the stack. Note that we do not need
2225 to push x31 as it represents the xzr register and not the stack
2226 pointer in a STR instruction.
2228 SUB sp, sp, #(31 * 16)
2230 STR x30, [sp, #(30 * 16)]
2235 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
2236 for (i
= 30; i
>= 0; i
-= 1)
2237 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
2238 offset_memory_operand (i
* 16));
2240 /* Make space for 5 more cells.
2242 SUB sp, sp, #(5 * 16)
2245 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
2250 ADD x4, sp, #((32 + 31 + 5) * 16)
2251 STR x4, [sp, #(4 * 16)]
2254 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
2255 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2257 /* Save PC (tracepoint address):
2262 STR x3, [sp, #(3 * 16)]
2266 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2267 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2269 /* Save CPSR (NZCV), FPSR and FPCR:
2275 STR x2, [sp, #(2 * 16)]
2276 STR x1, [sp, #(1 * 16)]
2277 STR x0, [sp, #(0 * 16)]
2280 p
+= emit_mrs (p
, x2
, NZCV
);
2281 p
+= emit_mrs (p
, x1
, FPSR
);
2282 p
+= emit_mrs (p
, x0
, FPCR
);
2283 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2284 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2285 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2287 /* Push the collecting_t object. It consist of the address of the
2288 tracepoint and an ID for the current thread. We get the latter by
2289 reading the tpidr_el0 system register. It corresponds to the
2290 NT_ARM_TLS register accessible with ptrace.
2297 STP x0, x1, [sp, #-16]!
2301 p
+= emit_mov_addr (p
, x0
, tpoint
);
2302 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2303 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2307 The shared memory for the lock is at lockaddr. It will hold zero
2308 if no-one is holding the lock, otherwise it contains the address of
2309 the collecting_t object on the stack of the thread which acquired it.
2311 At this stage, the stack pointer points to this thread's collecting_t
2314 We use the following registers:
2315 - x0: Address of the lock.
2316 - x1: Pointer to collecting_t object.
2317 - x2: Scratch register.
2323 ; Trigger an event local to this core. So the following WFE
2324 ; instruction is ignored.
2327 ; Wait for an event. The event is triggered by either the SEVL
2328 ; or STLR instructions (store release).
2331 ; Atomically read at lockaddr. This marks the memory location as
2332 ; exclusive. This instruction also has memory constraints which
2333 ; make sure all previous data reads and writes are done before
2337 ; Try again if another thread holds the lock.
2340 ; We can lock it! Write the address of the collecting_t object.
2341 ; This instruction will fail if the memory location is not marked
2342 ; as exclusive anymore. If it succeeds, it will remove the
2343 ; exclusive mark on the memory location. This way, if another
2344 ; thread executes this instruction before us, we will fail and try
2351 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2352 p
+= emit_mov (p
, x1
, register_operand (sp
));
2356 p
+= emit_ldaxr (p
, x2
, x0
);
2357 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2358 p
+= emit_stxr (p
, w2
, x1
, x0
);
2359 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2361 /* Call collector (struct tracepoint *, unsigned char *):
2366 ; Saved registers start after the collecting_t object.
2369 ; We use an intra-procedure-call scratch register.
2370 MOV ip0, #(collector)
2373 ; And call back to C!
2378 p
+= emit_mov_addr (p
, x0
, tpoint
);
2379 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2381 p
+= emit_mov_addr (p
, ip0
, collector
);
2382 p
+= emit_blr (p
, ip0
);
2384 /* Release the lock.
2389 ; This instruction is a normal store with memory ordering
2390 ; constraints. Thanks to this we do not have to put a data
2391 ; barrier instruction to make sure all data read and writes are done
2392 ; before this instruction is executed. Furthermore, this instruction
2393 ; will trigger an event, letting other threads know they can grab
2398 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2399 p
+= emit_stlr (p
, xzr
, x0
);
2401 /* Free collecting_t object:
2406 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2408 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2409 registers from the stack.
2411 LDR x2, [sp, #(2 * 16)]
2412 LDR x1, [sp, #(1 * 16)]
2413 LDR x0, [sp, #(0 * 16)]
2419 ADD sp, sp #(5 * 16)
2422 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2423 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2424 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2425 p
+= emit_msr (p
, NZCV
, x2
);
2426 p
+= emit_msr (p
, FPSR
, x1
);
2427 p
+= emit_msr (p
, FPCR
, x0
);
2429 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2431 /* Pop general purpose registers:
2435 LDR x30, [sp, #(30 * 16)]
2437 ADD sp, sp, #(31 * 16)
2440 for (i
= 0; i
<= 30; i
+= 1)
2441 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2442 offset_memory_operand (i
* 16));
2443 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2445 /* Pop SIMD&FP registers:
2449 LDP q30, q31, [sp, #(30 * 16)]
2451 ADD sp, sp, #(32 * 16)
2454 for (i
= 0; i
<= 30; i
+= 2)
2455 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2456 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2458 /* Write the code into the inferior memory. */
2459 append_insns (&buildaddr
, p
- buf
, buf
);
2461 /* Now emit the relocated instruction. */
2462 *adjusted_insn_addr
= buildaddr
;
2463 target_read_uint32 (tpaddr
, &insn
);
2465 insn_data
.base
.insn_addr
= tpaddr
;
2466 insn_data
.new_addr
= buildaddr
;
2467 insn_data
.insn_ptr
= buf
;
2469 aarch64_relocate_instruction (insn
, &visitor
,
2470 (struct aarch64_insn_data
*) &insn_data
);
2472 /* We may not have been able to relocate the instruction. */
2473 if (insn_data
.insn_ptr
== buf
)
2476 "E.Could not relocate instruction from %s to %s.",
2477 core_addr_to_string_nz (tpaddr
),
2478 core_addr_to_string_nz (buildaddr
));
2482 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2483 *adjusted_insn_addr_end
= buildaddr
;
2485 /* Go back to the start of the buffer. */
2488 /* Emit a branch back from the jump pad. */
2489 offset
= (tpaddr
+ orig_size
- buildaddr
);
2490 if (!can_encode_int32 (offset
, 28))
2493 "E.Jump back from jump pad too far from tracepoint "
2494 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2499 p
+= emit_b (p
, 0, offset
);
2500 append_insns (&buildaddr
, p
- buf
, buf
);
2502 /* Give the caller a branch instruction into the jump pad. */
2503 offset
= (*jump_entry
- tpaddr
);
2504 if (!can_encode_int32 (offset
, 28))
2507 "E.Jump pad too far from tracepoint "
2508 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2513 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2514 *jjump_pad_insn_size
= 4;
2516 /* Return the end address of our pad. */
2517 *jump_entry
= buildaddr
;
2522 /* Helper function writing LEN instructions from START into
2523 current_insn_ptr. */
2526 emit_ops_insns (const uint32_t *start
, int len
)
2528 CORE_ADDR buildaddr
= current_insn_ptr
;
2531 debug_printf ("Adding %d instrucions at %s\n",
2532 len
, paddress (buildaddr
));
2534 append_insns (&buildaddr
, len
, start
);
2535 current_insn_ptr
= buildaddr
;
2538 /* Pop a register from the stack. */
2541 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2543 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2546 /* Push a register on the stack. */
2549 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2551 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2554 /* Implementation of emit_ops method "emit_prologue". */
2557 aarch64_emit_prologue (void)
2562 /* This function emit a prologue for the following function prototype:
2564 enum eval_result_type f (unsigned char *regs,
2567 The first argument is a buffer of raw registers. The second
2568 argument is the result of
2569 evaluating the expression, which will be set to whatever is on top of
2570 the stack at the end.
2572 The stack set up by the prologue is as such:
2574 High *------------------------------------------------------*
2577 | x1 (ULONGEST *value) |
2578 | x0 (unsigned char *regs) |
2579 Low *------------------------------------------------------*
2581 As we are implementing a stack machine, each opcode can expand the
2582 stack so we never know how far we are from the data saved by this
2583 prologue. In order to be able refer to value and regs later, we save
2584 the current stack pointer in the frame pointer. This way, it is not
2585 clobbered when calling C functions.
2587 Finally, throughout every operation, we are using register x0 as the
2588 top of the stack, and x1 as a scratch register. */
2590 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2591 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2592 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2594 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2597 emit_ops_insns (buf
, p
- buf
);
2600 /* Implementation of emit_ops method "emit_epilogue". */
2603 aarch64_emit_epilogue (void)
2608 /* Store the result of the expression (x0) in *value. */
2609 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2610 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2611 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2613 /* Restore the previous state. */
2614 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2615 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2617 /* Return expr_eval_no_error. */
2618 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2619 p
+= emit_ret (p
, lr
);
2621 emit_ops_insns (buf
, p
- buf
);
2624 /* Implementation of emit_ops method "emit_add". */
2627 aarch64_emit_add (void)
2632 p
+= emit_pop (p
, x1
);
2633 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2635 emit_ops_insns (buf
, p
- buf
);
2638 /* Implementation of emit_ops method "emit_sub". */
2641 aarch64_emit_sub (void)
2646 p
+= emit_pop (p
, x1
);
2647 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2649 emit_ops_insns (buf
, p
- buf
);
2652 /* Implementation of emit_ops method "emit_mul". */
2655 aarch64_emit_mul (void)
2660 p
+= emit_pop (p
, x1
);
2661 p
+= emit_mul (p
, x0
, x1
, x0
);
2663 emit_ops_insns (buf
, p
- buf
);
2666 /* Implementation of emit_ops method "emit_lsh". */
2669 aarch64_emit_lsh (void)
2674 p
+= emit_pop (p
, x1
);
2675 p
+= emit_lslv (p
, x0
, x1
, x0
);
2677 emit_ops_insns (buf
, p
- buf
);
2680 /* Implementation of emit_ops method "emit_rsh_signed". */
2683 aarch64_emit_rsh_signed (void)
2688 p
+= emit_pop (p
, x1
);
2689 p
+= emit_asrv (p
, x0
, x1
, x0
);
2691 emit_ops_insns (buf
, p
- buf
);
2694 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2697 aarch64_emit_rsh_unsigned (void)
2702 p
+= emit_pop (p
, x1
);
2703 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2705 emit_ops_insns (buf
, p
- buf
);
2708 /* Implementation of emit_ops method "emit_ext". */
2711 aarch64_emit_ext (int arg
)
2716 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2718 emit_ops_insns (buf
, p
- buf
);
2721 /* Implementation of emit_ops method "emit_log_not". */
2724 aarch64_emit_log_not (void)
2729 /* If the top of the stack is 0, replace it with 1. Else replace it with
2732 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2733 p
+= emit_cset (p
, x0
, EQ
);
2735 emit_ops_insns (buf
, p
- buf
);
2738 /* Implementation of emit_ops method "emit_bit_and". */
2741 aarch64_emit_bit_and (void)
2746 p
+= emit_pop (p
, x1
);
2747 p
+= emit_and (p
, x0
, x0
, x1
);
2749 emit_ops_insns (buf
, p
- buf
);
2752 /* Implementation of emit_ops method "emit_bit_or". */
2755 aarch64_emit_bit_or (void)
2760 p
+= emit_pop (p
, x1
);
2761 p
+= emit_orr (p
, x0
, x0
, x1
);
2763 emit_ops_insns (buf
, p
- buf
);
2766 /* Implementation of emit_ops method "emit_bit_xor". */
2769 aarch64_emit_bit_xor (void)
2774 p
+= emit_pop (p
, x1
);
2775 p
+= emit_eor (p
, x0
, x0
, x1
);
2777 emit_ops_insns (buf
, p
- buf
);
2780 /* Implementation of emit_ops method "emit_bit_not". */
2783 aarch64_emit_bit_not (void)
2788 p
+= emit_mvn (p
, x0
, x0
);
2790 emit_ops_insns (buf
, p
- buf
);
2793 /* Implementation of emit_ops method "emit_equal". */
2796 aarch64_emit_equal (void)
2801 p
+= emit_pop (p
, x1
);
2802 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2803 p
+= emit_cset (p
, x0
, EQ
);
2805 emit_ops_insns (buf
, p
- buf
);
2808 /* Implementation of emit_ops method "emit_less_signed". */
2811 aarch64_emit_less_signed (void)
2816 p
+= emit_pop (p
, x1
);
2817 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2818 p
+= emit_cset (p
, x0
, LT
);
2820 emit_ops_insns (buf
, p
- buf
);
2823 /* Implementation of emit_ops method "emit_less_unsigned". */
2826 aarch64_emit_less_unsigned (void)
2831 p
+= emit_pop (p
, x1
);
2832 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2833 p
+= emit_cset (p
, x0
, LO
);
2835 emit_ops_insns (buf
, p
- buf
);
2838 /* Implementation of emit_ops method "emit_ref". */
2841 aarch64_emit_ref (int size
)
2849 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2852 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2855 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2858 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2861 /* Unknown size, bail on compilation. */
2866 emit_ops_insns (buf
, p
- buf
);
2869 /* Implementation of emit_ops method "emit_if_goto". */
2872 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2877 /* The Z flag is set or cleared here. */
2878 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2879 /* This instruction must not change the Z flag. */
2880 p
+= emit_pop (p
, x0
);
2881 /* Branch over the next instruction if x0 == 0. */
2882 p
+= emit_bcond (p
, EQ
, 8);
2884 /* The NOP instruction will be patched with an unconditional branch. */
2886 *offset_p
= (p
- buf
) * 4;
2891 emit_ops_insns (buf
, p
- buf
);
2894 /* Implementation of emit_ops method "emit_goto". */
2897 aarch64_emit_goto (int *offset_p
, int *size_p
)
2902 /* The NOP instruction will be patched with an unconditional branch. */
2909 emit_ops_insns (buf
, p
- buf
);
2912 /* Implementation of emit_ops method "write_goto_address". */
2915 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2919 emit_b (&insn
, 0, to
- from
);
2920 append_insns (&from
, 1, &insn
);
2923 /* Implementation of emit_ops method "emit_const". */
2926 aarch64_emit_const (LONGEST num
)
2931 p
+= emit_mov_addr (p
, x0
, num
);
2933 emit_ops_insns (buf
, p
- buf
);
2936 /* Implementation of emit_ops method "emit_call". */
2939 aarch64_emit_call (CORE_ADDR fn
)
2944 p
+= emit_mov_addr (p
, ip0
, fn
);
2945 p
+= emit_blr (p
, ip0
);
2947 emit_ops_insns (buf
, p
- buf
);
2950 /* Implementation of emit_ops method "emit_reg". */
2953 aarch64_emit_reg (int reg
)
2958 /* Set x0 to unsigned char *regs. */
2959 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2960 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2961 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2963 emit_ops_insns (buf
, p
- buf
);
2965 aarch64_emit_call (get_raw_reg_func_addr ());
2968 /* Implementation of emit_ops method "emit_pop". */
2971 aarch64_emit_pop (void)
2976 p
+= emit_pop (p
, x0
);
2978 emit_ops_insns (buf
, p
- buf
);
2981 /* Implementation of emit_ops method "emit_stack_flush". */
2984 aarch64_emit_stack_flush (void)
2989 p
+= emit_push (p
, x0
);
2991 emit_ops_insns (buf
, p
- buf
);
2994 /* Implementation of emit_ops method "emit_zero_ext". */
2997 aarch64_emit_zero_ext (int arg
)
3002 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
3004 emit_ops_insns (buf
, p
- buf
);
3007 /* Implementation of emit_ops method "emit_swap". */
3010 aarch64_emit_swap (void)
3015 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
3016 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
3017 p
+= emit_mov (p
, x0
, register_operand (x1
));
3019 emit_ops_insns (buf
, p
- buf
);
3022 /* Implementation of emit_ops method "emit_stack_adjust". */
3025 aarch64_emit_stack_adjust (int n
)
3027 /* This is not needed with our design. */
3031 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
3033 emit_ops_insns (buf
, p
- buf
);
3036 /* Implementation of emit_ops method "emit_int_call_1". */
3039 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3044 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
3046 emit_ops_insns (buf
, p
- buf
);
3048 aarch64_emit_call (fn
);
3051 /* Implementation of emit_ops method "emit_void_call_2". */
3054 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3059 /* Push x0 on the stack. */
3060 aarch64_emit_stack_flush ();
3062 /* Setup arguments for the function call:
3065 x1: top of the stack
3070 p
+= emit_mov (p
, x1
, register_operand (x0
));
3071 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
3073 emit_ops_insns (buf
, p
- buf
);
3075 aarch64_emit_call (fn
);
3078 aarch64_emit_pop ();
3081 /* Implementation of emit_ops method "emit_eq_goto". */
3084 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
3089 p
+= emit_pop (p
, x1
);
3090 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3091 /* Branch over the next instruction if x0 != x1. */
3092 p
+= emit_bcond (p
, NE
, 8);
3093 /* The NOP instruction will be patched with an unconditional branch. */
3095 *offset_p
= (p
- buf
) * 4;
3100 emit_ops_insns (buf
, p
- buf
);
3103 /* Implementation of emit_ops method "emit_ne_goto". */
3106 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
3111 p
+= emit_pop (p
, x1
);
3112 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3113 /* Branch over the next instruction if x0 == x1. */
3114 p
+= emit_bcond (p
, EQ
, 8);
3115 /* The NOP instruction will be patched with an unconditional branch. */
3117 *offset_p
= (p
- buf
) * 4;
3122 emit_ops_insns (buf
, p
- buf
);
3125 /* Implementation of emit_ops method "emit_lt_goto". */
3128 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
3133 p
+= emit_pop (p
, x1
);
3134 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3135 /* Branch over the next instruction if x0 >= x1. */
3136 p
+= emit_bcond (p
, GE
, 8);
3137 /* The NOP instruction will be patched with an unconditional branch. */
3139 *offset_p
= (p
- buf
) * 4;
3144 emit_ops_insns (buf
, p
- buf
);
3147 /* Implementation of emit_ops method "emit_le_goto". */
3150 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
3155 p
+= emit_pop (p
, x1
);
3156 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3157 /* Branch over the next instruction if x0 > x1. */
3158 p
+= emit_bcond (p
, GT
, 8);
3159 /* The NOP instruction will be patched with an unconditional branch. */
3161 *offset_p
= (p
- buf
) * 4;
3166 emit_ops_insns (buf
, p
- buf
);
3169 /* Implementation of emit_ops method "emit_gt_goto". */
3172 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
3177 p
+= emit_pop (p
, x1
);
3178 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3179 /* Branch over the next instruction if x0 <= x1. */
3180 p
+= emit_bcond (p
, LE
, 8);
3181 /* The NOP instruction will be patched with an unconditional branch. */
3183 *offset_p
= (p
- buf
) * 4;
3188 emit_ops_insns (buf
, p
- buf
);
3191 /* Implementation of emit_ops method "emit_ge_got". */
3194 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
3199 p
+= emit_pop (p
, x1
);
3200 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3201 /* Branch over the next instruction if x0 <= x1. */
3202 p
+= emit_bcond (p
, LT
, 8);
3203 /* The NOP instruction will be patched with an unconditional branch. */
3205 *offset_p
= (p
- buf
) * 4;
3210 emit_ops_insns (buf
, p
- buf
);
3213 static struct emit_ops aarch64_emit_ops_impl
=
3215 aarch64_emit_prologue
,
3216 aarch64_emit_epilogue
,
3221 aarch64_emit_rsh_signed
,
3222 aarch64_emit_rsh_unsigned
,
3224 aarch64_emit_log_not
,
3225 aarch64_emit_bit_and
,
3226 aarch64_emit_bit_or
,
3227 aarch64_emit_bit_xor
,
3228 aarch64_emit_bit_not
,
3230 aarch64_emit_less_signed
,
3231 aarch64_emit_less_unsigned
,
3233 aarch64_emit_if_goto
,
3235 aarch64_write_goto_address
,
3240 aarch64_emit_stack_flush
,
3241 aarch64_emit_zero_ext
,
3243 aarch64_emit_stack_adjust
,
3244 aarch64_emit_int_call_1
,
3245 aarch64_emit_void_call_2
,
3246 aarch64_emit_eq_goto
,
3247 aarch64_emit_ne_goto
,
3248 aarch64_emit_lt_goto
,
3249 aarch64_emit_le_goto
,
3250 aarch64_emit_gt_goto
,
3251 aarch64_emit_ge_got
,
3254 /* Implementation of target ops method "emit_ops". */
3257 aarch64_target::emit_ops ()
3259 return &aarch64_emit_ops_impl
;
3262 /* Implementation of target ops method
3263 "get_min_fast_tracepoint_insn_len". */
3266 aarch64_target::get_min_fast_tracepoint_insn_len ()
3271 /* Implementation of linux target ops method "low_supports_range_stepping". */
3274 aarch64_target::low_supports_range_stepping ()
3279 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3282 aarch64_target::sw_breakpoint_from_kind (int kind
, int *size
)
3284 if (is_64bit_tdesc ())
3286 *size
= aarch64_breakpoint_len
;
3287 return aarch64_breakpoint
;
3290 return arm_sw_breakpoint_from_kind (kind
, size
);
3293 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3296 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3298 if (is_64bit_tdesc ())
3299 return aarch64_breakpoint_len
;
3301 return arm_breakpoint_kind_from_pc (pcptr
);
3304 /* Implementation of the target ops method
3305 "breakpoint_kind_from_current_state". */
3308 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3310 if (is_64bit_tdesc ())
3311 return aarch64_breakpoint_len
;
3313 return arm_breakpoint_kind_from_current_state (pcptr
);
3316 /* Implementation of targets ops method "supports_qxfer_capability. */
3319 aarch64_target::supports_qxfer_capability ()
3321 unsigned long hwcap2
= linux_get_hwcap2 (8);
3323 return (hwcap2
& HWCAP2_MORELLO
) != 0;
3326 /* Implementation of targets ops method "qxfer_capability. */
3329 aarch64_target::qxfer_capability (const CORE_ADDR address
,
3330 unsigned char *readbuf
,
3331 unsigned const char *writebuf
,
3332 CORE_ADDR offset
, int len
)
3334 int tid
= pid_of (current_thread
);
3336 struct user_cap cap
;
3338 if (readbuf
!= nullptr)
3340 if (!aarch64_linux_read_capability (tid
, address
, cap
))
3342 warning (_("Unable to read capability from address."));
3346 /* Copy data to readbuf. */
3347 memcpy (readbuf
, &cap
.tag
, 1);
3348 memcpy (readbuf
+ 1, &cap
.val
, 16);
3352 /* Copy data from writebuf. */
3353 memcpy (&cap
.tag
, writebuf
, 1);
3354 memcpy (&cap
.val
, writebuf
+ 1, 16);
3355 memset (&cap
.__reserved
, 0, 15);
3357 if (!aarch64_linux_write_capability (tid
, address
, cap
))
3359 warning (_("Unable to write capability to address.\n"
3360 "Please run \"sysctl cheri.ptrace_forge_cap=1\"."));
3365 return sizeof (cap
.val
) + 1;
3368 /* The linux target ops object. */
3370 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3373 initialize_low_arch (void)
3375 initialize_low_arch_aarch32 ();
3377 initialize_regsets_info (&aarch64_regsets_info
);
3378 initialize_regsets_info (&aarch64_sve_regsets_info
);