1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "arch/aarch64-cap-linux.h"
25 #include "arch/aarch64-insn.h"
26 #include "linux-aarch32-low.h"
27 #include "elf/common.h"
29 #include "tracepoint.h"
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
40 #include "gdb_proc_service.h"
41 #include "arch/aarch64.h"
42 #include "linux-aarch32-tdesc.h"
43 #include "linux-aarch64-tdesc.h"
44 #include "nat/aarch64-sve-linux-ptrace.h"
45 #include "nat/aarch64-cap-linux.h" /* For Morello */
46 #include "nat/aarch64-linux.h"
47 #include "nat/aarch64-linux-hw-point.h"
55 /* Linux target op definitions for the AArch64 architecture. */
57 class aarch64_target
: public linux_process_target
61 const regs_info
*get_regs_info () override
;
63 int breakpoint_kind_from_pc (CORE_ADDR
*pcptr
) override
;
65 int breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
) override
;
67 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
69 bool supports_z_point_type (char z_type
) override
;
71 bool supports_tracepoints () override
;
73 bool supports_fast_tracepoints () override
;
75 int install_fast_tracepoint_jump_pad
76 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
77 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
78 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
79 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
80 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
83 int get_min_fast_tracepoint_insn_len () override
;
85 struct emit_ops
*emit_ops () override
;
87 bool supports_qxfer_capability () override
;
89 int qxfer_capability (const CORE_ADDR address
, unsigned char *readbuf
,
90 unsigned const char *writebuf
,
91 CORE_ADDR offset
, int len
) override
;
93 /* AArch64 (Morello) implementation of auxv_search. We need this
94 override to handle Morello 16-byte AUXV entries in the PCuABI. */
95 bool auxv_search (CORE_ADDR type
, CORE_ADDR
&value
) override
;
99 void low_arch_setup () override
;
101 bool low_cannot_fetch_register (int regno
) override
;
103 bool low_cannot_store_register (int regno
) override
;
105 bool low_supports_breakpoints () override
;
107 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
109 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
111 bool low_breakpoint_at (CORE_ADDR pc
) override
;
113 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
114 int size
, raw_breakpoint
*bp
) override
;
116 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
117 int size
, raw_breakpoint
*bp
) override
;
119 bool low_stopped_by_watchpoint () override
;
121 CORE_ADDR
low_stopped_data_address () override
;
123 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
124 int direction
) override
;
126 arch_process_info
*low_new_process () override
;
128 void low_delete_process (arch_process_info
*info
) override
;
130 void low_new_thread (lwp_info
*) override
;
132 void low_delete_thread (arch_lwp_info
*) override
;
134 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
136 void low_prepare_to_resume (lwp_info
*lwp
) override
;
138 int low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
) override
;
140 bool low_supports_range_stepping () override
;
142 bool low_supports_catch_syscall () override
;
144 void low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
) override
;
146 const struct link_map_offsets
*low_fetch_linkmap_offsets (int is_elf64
) override
;
149 /* The singleton target ops object. */
151 static aarch64_target the_aarch64_target
;
154 aarch64_target::low_cannot_fetch_register (int regno
)
156 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
157 "is not implemented by the target");
161 aarch64_target::low_cannot_store_register (int regno
)
163 gdb_assert_not_reached ("linux target op low_cannot_store_register "
164 "is not implemented by the target");
168 aarch64_target::low_prepare_to_resume (lwp_info
*lwp
)
170 aarch64_linux_prepare_to_resume (lwp
);
173 /* Per-process arch-specific data we want to keep. */
175 struct arch_process_info
177 /* Hardware breakpoint/watchpoint data.
178 The reason for them to be per-process rather than per-thread is
179 due to the lack of information in the gdbserver environment;
180 gdbserver is not told that whether a requested hardware
181 breakpoint/watchpoint is thread specific or not, so it has to set
182 each hw bp/wp for every thread in the current process. The
183 higher level bp/wp management in gdb will resume a thread if a hw
184 bp/wp trap is not expected for it. Since the hw bp/wp setting is
185 same for each thread, it is reasonable for the data to live here.
187 struct aarch64_debug_reg_state debug_reg_state
;
190 /* Return true if the size of register 0 is 8 byte. */
193 is_64bit_tdesc (void)
195 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
197 return register_size (regcache
->tdesc
, 0) == 8;
200 /* Return true if the regcache contains the number of SVE registers. */
205 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
207 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
210 static bool gpr_changed
= false;
213 gpr_set_changed (struct regcache
*regcache
, void *buf
)
215 size_t gpr_size
= (AARCH64_X_REGS_NUM
+ 2) * 8 + 4;
217 = memcmp (regcache
->registers
, buf
, gpr_size
) != 0;
223 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
225 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
228 /* Right now, regcache contains the updated contents of the registers.
229 Check if anything has changed in the GPR's. If nothing has changed,
230 don't update anything.
232 Otherwise, update the contents. */
234 gpr_changed
= gpr_set_changed (regcache
, buf
);
236 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
237 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
238 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
239 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
240 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
244 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
246 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
249 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
250 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
251 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
252 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
253 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
257 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
259 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
262 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
263 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
264 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
265 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
269 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
271 const struct user_fpsimd_state
*regset
272 = (const struct user_fpsimd_state
*) buf
;
275 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
276 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
277 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
278 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
281 /* Store the pauth registers to regcache. */
284 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
286 uint64_t *pauth_regset
= (uint64_t *) buf
;
287 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
292 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
294 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
298 /* Capability registers fill hook implementation. */
301 aarch64_fill_cregset (struct regcache
*regcache
, void *buf
)
303 /* If the GPR's have changed, don't attempt to change the C registers. */
306 /* Reset the flag. */
311 struct user_morello_state
*cregset
312 = (struct user_morello_state
*) buf
;
314 int cregs_base
= find_regno (regcache
->tdesc
, "c0");
316 /* Store the C registers to the buffer. */
318 for (regno
= cregs_base
, i
= 0;
319 regno
< cregs_base
+ AARCH64_C_REGS_NUM
;
321 collect_register (regcache
, regno
, &cregset
->cregs
[i
]);
323 /* Store the other registers to the buffer. */
324 collect_register (regcache
, regno
++, &cregset
->csp
);
325 collect_register (regcache
, regno
++, &cregset
->pcc
);
326 collect_register (regcache
, regno
++, &cregset
->ddc
);
327 collect_register (regcache
, regno
++, &cregset
->ctpidr
);
328 collect_register (regcache
, regno
++, &cregset
->rcsp
);
329 collect_register (regcache
, regno
++, &cregset
->rddc
);
330 collect_register (regcache
, regno
++, &cregset
->rctpidr
);
331 collect_register (regcache
, regno
++, &cregset
->cid
);
332 collect_register (regcache
, regno
++, &cregset
->tag_map
);
333 collect_register (regcache
, regno
++, &cregset
->cctlr
);
336 /* Capability registers store hook implementation. */
339 aarch64_store_cregset (struct regcache
*regcache
, const void *buf
)
341 const struct user_morello_state
*cregset
342 = (const struct user_morello_state
*) buf
;
344 int cregs_base
= find_regno (regcache
->tdesc
, "c0");
346 /* Fetch the C registers. */
348 for (regno
= cregs_base
, i
= 0;
349 regno
< cregs_base
+ AARCH64_C_REGS_NUM
;
351 supply_register (regcache
, regno
, &cregset
->cregs
[i
]);
353 /* Fetch the other registers. */
354 supply_register (regcache
, regno
++, &cregset
->csp
);
355 supply_register (regcache
, regno
++, &cregset
->pcc
);
356 supply_register (regcache
, regno
++, &cregset
->ddc
);
357 supply_register (regcache
, regno
++, &cregset
->ctpidr
);
358 supply_register (regcache
, regno
++, &cregset
->rcsp
);
359 supply_register (regcache
, regno
++, &cregset
->rddc
);
360 supply_register (regcache
, regno
++, &cregset
->rctpidr
);
361 supply_register (regcache
, regno
++, &cregset
->cid
);
362 supply_register (regcache
, regno
++, &cregset
->tag_map
);
363 supply_register (regcache
, regno
++, &cregset
->cctlr
);
367 aarch64_target::low_supports_breakpoints ()
372 /* Implementation of linux_target_ops method "get_pc". */
375 aarch64_target::low_get_pc (regcache
*regcache
)
377 if (register_size (regcache
->tdesc
, 0) == 8)
378 return linux_get_pc_64bit (regcache
);
380 return linux_get_pc_32bit (regcache
);
383 /* Implementation of linux target ops method "low_set_pc". */
386 aarch64_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
388 if (register_size (regcache
->tdesc
, 0) == 8)
389 linux_set_pc_64bit (regcache
, pc
);
391 linux_set_pc_32bit (regcache
, pc
);
394 #define aarch64_breakpoint_len 4
396 /* AArch64 BRK software debug mode instruction.
397 This instruction needs to match gdb/aarch64-tdep.c
398 (aarch64_default_breakpoint). */
399 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
401 /* Implementation of linux target ops method "low_breakpoint_at". */
404 aarch64_target::low_breakpoint_at (CORE_ADDR where
)
406 if (is_64bit_tdesc ())
408 gdb_byte insn
[aarch64_breakpoint_len
];
410 read_memory (where
, (unsigned char *) &insn
, aarch64_breakpoint_len
);
411 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
417 return arm_breakpoint_at (where
);
421 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
425 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
427 state
->dr_addr_bp
[i
] = 0;
428 state
->dr_ctrl_bp
[i
] = 0;
429 state
->dr_ref_count_bp
[i
] = 0;
432 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
434 state
->dr_addr_wp
[i
] = 0;
435 state
->dr_ctrl_wp
[i
] = 0;
436 state
->dr_ref_count_wp
[i
] = 0;
440 /* Return the pointer to the debug register state structure in the
441 current process' arch-specific data area. */
443 struct aarch64_debug_reg_state
*
444 aarch64_get_debug_reg_state (pid_t pid
)
446 struct process_info
*proc
= find_process_pid (pid
);
448 return &proc
->priv
->arch_private
->debug_reg_state
;
451 /* Implementation of target ops method "supports_z_point_type". */
454 aarch64_target::supports_z_point_type (char z_type
)
460 case Z_PACKET_WRITE_WP
:
461 case Z_PACKET_READ_WP
:
462 case Z_PACKET_ACCESS_WP
:
469 /* Implementation of linux target ops method "low_insert_point".
471 It actually only records the info of the to-be-inserted bp/wp;
472 the actual insertion will happen when threads are resumed. */
475 aarch64_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
476 int len
, raw_breakpoint
*bp
)
479 enum target_hw_bp_type targ_type
;
480 struct aarch64_debug_reg_state
*state
481 = aarch64_get_debug_reg_state (pid_of (current_thread
));
484 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
485 (unsigned long) addr
, len
);
487 /* Determine the type from the raw breakpoint type. */
488 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
490 if (targ_type
!= hw_execute
)
492 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
493 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
494 1 /* is_insert */, state
);
502 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
503 instruction. Set it to 2 to correctly encode length bit
504 mask in hardware/watchpoint control register. */
507 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
508 1 /* is_insert */, state
);
512 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
518 /* Implementation of linux target ops method "low_remove_point".
520 It actually only records the info of the to-be-removed bp/wp,
521 the actual removal will be done when threads are resumed. */
524 aarch64_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
525 int len
, raw_breakpoint
*bp
)
528 enum target_hw_bp_type targ_type
;
529 struct aarch64_debug_reg_state
*state
530 = aarch64_get_debug_reg_state (pid_of (current_thread
));
533 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
534 (unsigned long) addr
, len
);
536 /* Determine the type from the raw breakpoint type. */
537 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
539 /* Set up state pointers. */
540 if (targ_type
!= hw_execute
)
542 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
548 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
549 instruction. Set it to 2 to correctly encode length bit
550 mask in hardware/watchpoint control register. */
553 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
554 0 /* is_insert */, state
);
558 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
564 /* Implementation of linux target ops method "low_stopped_data_address". */
567 aarch64_target::low_stopped_data_address ()
571 struct aarch64_debug_reg_state
*state
;
573 pid
= lwpid_of (current_thread
);
575 /* Get the siginfo. */
576 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
577 return (CORE_ADDR
) 0;
579 /* Need to be a hardware breakpoint/watchpoint trap. */
580 if (siginfo
.si_signo
!= SIGTRAP
581 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
582 return (CORE_ADDR
) 0;
584 /* Check if the address matches any watched address. */
585 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
586 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
588 const unsigned int offset
589 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
590 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
591 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
592 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
593 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
594 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
596 if (state
->dr_ref_count_wp
[i
]
597 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
598 && addr_trap
>= addr_watch_aligned
599 && addr_trap
< addr_watch
+ len
)
601 /* ADDR_TRAP reports the first address of the memory range
602 accessed by the CPU, regardless of what was the memory
603 range watched. Thus, a large CPU access that straddles
604 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
605 ADDR_TRAP that is lower than the
606 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
608 addr: | 4 | 5 | 6 | 7 | 8 |
609 |---- range watched ----|
610 |----------- range accessed ------------|
612 In this case, ADDR_TRAP will be 4.
614 To match a watchpoint known to GDB core, we must never
615 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
616 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
617 positive on kernels older than 4.10. See PR
623 return (CORE_ADDR
) 0;
626 /* Implementation of linux target ops method "low_stopped_by_watchpoint". */
629 aarch64_target::low_stopped_by_watchpoint ()
631 return (low_stopped_data_address () != 0);
634 /* Fetch the thread-local storage pointer for libthread_db. */
637 ps_get_thread_area (struct ps_prochandle
*ph
,
638 lwpid_t lwpid
, int idx
, void **base
)
640 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
644 /* Implementation of linux target ops method "low_siginfo_fixup". */
647 aarch64_target::low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
650 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
651 if (!is_64bit_tdesc ())
654 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
657 aarch64_siginfo_from_compat_siginfo (native
,
658 (struct compat_siginfo
*) inf
);
666 /* Implementation of linux target ops method "low_new_process". */
669 aarch64_target::low_new_process ()
671 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
673 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
678 /* Implementation of linux target ops method "low_delete_process". */
681 aarch64_target::low_delete_process (arch_process_info
*info
)
687 aarch64_target::low_new_thread (lwp_info
*lwp
)
689 aarch64_linux_new_thread (lwp
);
693 aarch64_target::low_delete_thread (arch_lwp_info
*arch_lwp
)
695 aarch64_linux_delete_thread (arch_lwp
);
698 /* Implementation of linux target ops method "low_new_fork". */
701 aarch64_target::low_new_fork (process_info
*parent
,
704 /* These are allocated by linux_add_process. */
705 gdb_assert (parent
->priv
!= NULL
706 && parent
->priv
->arch_private
!= NULL
);
707 gdb_assert (child
->priv
!= NULL
708 && child
->priv
->arch_private
!= NULL
);
710 /* Linux kernel before 2.6.33 commit
711 72f674d203cd230426437cdcf7dd6f681dad8b0d
712 will inherit hardware debug registers from parent
713 on fork/vfork/clone. Newer Linux kernels create such tasks with
714 zeroed debug registers.
716 GDB core assumes the child inherits the watchpoints/hw
717 breakpoints of the parent, and will remove them all from the
718 forked off process. Copy the debug registers mirrors into the
719 new process so that all breakpoints and watchpoints can be
720 removed together. The debug registers mirror will become zeroed
721 in the end before detaching the forked off process, thus making
722 this compatible with older Linux kernels too. */
724 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
727 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
730 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
732 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
735 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
738 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
740 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
743 static struct regset_info aarch64_regsets
[] =
745 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
746 sizeof (struct user_pt_regs
), GENERAL_REGS
,
747 aarch64_fill_gregset
, aarch64_store_gregset
},
748 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
749 sizeof (struct user_fpsimd_state
), FP_REGS
,
750 aarch64_fill_fpregset
, aarch64_store_fpregset
752 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
753 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
754 NULL
, aarch64_store_pauthregset
},
755 /* FIXME-Morello: Fixup the register set size. */
756 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_MORELLO
,
757 AARCH64_LINUX_CREGS_SIZE
, OPTIONAL_REGS
,
758 aarch64_fill_cregset
, aarch64_store_cregset
, nullptr,
759 "cheri.ptrace_forge_cap", "capability"
764 static struct regsets_info aarch64_regsets_info
=
766 aarch64_regsets
, /* regsets */
768 NULL
, /* disabled_regsets */
771 static struct regs_info regs_info_aarch64
=
773 NULL
, /* regset_bitmap */
775 &aarch64_regsets_info
,
778 static struct regset_info aarch64_sve_regsets
[] =
780 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
781 sizeof (struct user_pt_regs
), GENERAL_REGS
,
782 aarch64_fill_gregset
, aarch64_store_gregset
},
783 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
784 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
785 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
787 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
788 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
789 NULL
, aarch64_store_pauthregset
},
790 /* FIXME-Morello: Fixup the register set size. */
791 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_MORELLO
,
792 AARCH64_LINUX_CREGS_SIZE
, OPTIONAL_REGS
,
793 aarch64_fill_cregset
, aarch64_store_cregset
, nullptr,
794 "cheri.ptrace_forge_cap", "capability"
799 static struct regsets_info aarch64_sve_regsets_info
=
801 aarch64_sve_regsets
, /* regsets. */
802 0, /* num_regsets. */
803 NULL
, /* disabled_regsets. */
806 static struct regs_info regs_info_aarch64_sve
=
808 NULL
, /* regset_bitmap. */
810 &aarch64_sve_regsets_info
,
813 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
814 #define AARCH64_HWCAP_PACA (1 << 30)
816 /* Implementation of linux target ops method "low_arch_setup". */
819 aarch64_target::low_arch_setup ()
821 unsigned int machine
;
825 tid
= lwpid_of (current_thread
);
827 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
831 uint64_t vq
= aarch64_sve_get_vq (tid
);
832 unsigned long hwcap
= linux_get_hwcap ();
833 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
834 /* We cannot use HWCAP2_MORELLO to check for Morello support. */
835 bool capability_p
= aarch64_supports_morello (tid
);
837 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
,
840 /* Re-enable warnings for register sets with sysctl settings. */
841 aarch64_regsets
[4].sysctl_write_should_warn
= true;
842 aarch64_sve_regsets
[4].sysctl_write_should_warn
= true;
845 current_process ()->tdesc
= aarch32_linux_read_description ();
847 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
852 /* Implementation of linux target ops method "get_regs_info". */
855 aarch64_target::get_regs_info ()
857 if (!is_64bit_tdesc ())
858 return ®s_info_aarch32
;
861 return ®s_info_aarch64_sve
;
863 return ®s_info_aarch64
;
866 /* Implementation of target ops method "supports_tracepoints". */
869 aarch64_target::supports_tracepoints ()
871 if (current_thread
== NULL
)
875 /* We don't support tracepoints on aarch32 now. */
876 return is_64bit_tdesc ();
880 /* Implementation of linux target ops method "low_get_thread_area". */
883 aarch64_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
888 iovec
.iov_base
= ®
;
889 iovec
.iov_len
= sizeof (reg
);
891 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
900 aarch64_target::low_supports_catch_syscall ()
905 /* Implementation of linux target ops method "low_get_syscall_trapinfo". */
908 aarch64_target::low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
)
910 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
916 collect_register_by_name (regcache
, "x8", &l_sysno
);
917 *sysno
= (int) l_sysno
;
920 collect_register_by_name (regcache
, "r7", sysno
);
923 static const struct link_map_offsets lmo_64bit_morello_offsets
=
925 0, /* r_version offset. */
926 16, /* r_debug.r_map offset. */
927 0, /* l_addr offset in link_map. */
928 16, /* l_name offset in link_map. */
929 32, /* l_ld offset in link_map. */
930 48, /* l_next offset in link_map. */
931 64 /* l_prev offset in link_map. */
934 const struct link_map_offsets
*
935 aarch64_target::low_fetch_linkmap_offsets (int is_elf64
)
939 CORE_ADDR entry_addr
= linux_get_at_entry ();
941 /* If the LSB of AT_ENTRY is 1, then we have a pure capability Morello
944 return &lmo_64bit_morello_offsets
;
947 return linux_process_target::low_fetch_linkmap_offsets (is_elf64
);
951 aarch64_target::auxv_search (CORE_ADDR type
, CORE_ADDR
&value
)
953 const gdb::byte_vector auxv
= linux_process_target::get_auxv ();
954 const gdb_byte
*ptr
= auxv
.data ();
955 const gdb_byte
*end_ptr
= auxv
.data () + auxv
.size ();
960 /* There needs to be at least one auxv entry. */
961 gdb_assert (end_ptr
- ptr
>= 16);
963 /* We're dealing with three different AUXV layouts:
965 A - The regular AArch64 format: Each type entry is 64-bit and each value
966 is 64-bit. This is also the case for Morello Hybrid binaries.
967 B - The Morello pure capability format with libshim: This is a compability
968 layout and it keeps the 64-bit types and 64-bit values.
969 C - The Morello pure capability format without libshim: This layout has
970 64-bit types followed by 64-bit padding. The value is 128-bit.
972 We need to determine what layout we have, so we can read the data
975 The easiest way to tell the difference is to assume 8-byte entries and
976 look for any types outside the range [AT_NULL, AT_MINSIGSTKSZ]. If we
977 find one such type, assume that we have layout C. Otherwise we have
980 bool layout_c
= false;
981 const gdb_byte
*p
= ptr
;
984 CORE_ADDR
*entry_type
= (CORE_ADDR
*) p
;
986 if (*entry_type
> AT_MINSIGSTKSZ
)
994 /* Do the actual search now that we know the auxv format. */
1000 CORE_ADDR
*entry_type
= (CORE_ADDR
*) p
;
1002 if (type
== *entry_type
)
1004 const gdb_byte
*value_ptr
= p
+ 16;
1005 value
= *((CORE_ADDR
*) value_ptr
);
1013 /* We have the regular layout. Let generic code handle it. */
1014 return linux_process_target::auxv_search (type
, value
);
1017 /* List of condition codes that we need. */
1019 enum aarch64_condition_codes
1030 enum aarch64_operand_type
1036 /* Representation of an operand. At this time, it only supports register
1037 and immediate types. */
1039 struct aarch64_operand
1041 /* Type of the operand. */
1042 enum aarch64_operand_type type
;
1044 /* Value of the operand according to the type. */
1048 struct aarch64_register reg
;
1052 /* List of registers that we are currently using, we can add more here as
1053 we need to use them. */
1055 /* General purpose scratch registers (64 bit). */
1056 static const struct aarch64_register x0
= { 0, 1 };
1057 static const struct aarch64_register x1
= { 1, 1 };
1058 static const struct aarch64_register x2
= { 2, 1 };
1059 static const struct aarch64_register x3
= { 3, 1 };
1060 static const struct aarch64_register x4
= { 4, 1 };
1062 /* General purpose scratch registers (32 bit). */
1063 static const struct aarch64_register w0
= { 0, 0 };
1064 static const struct aarch64_register w2
= { 2, 0 };
1066 /* Intra-procedure scratch registers. */
1067 static const struct aarch64_register ip0
= { 16, 1 };
1069 /* Special purpose registers. */
1070 static const struct aarch64_register fp
= { 29, 1 };
1071 static const struct aarch64_register lr
= { 30, 1 };
1072 static const struct aarch64_register sp
= { 31, 1 };
1073 static const struct aarch64_register xzr
= { 31, 1 };
1075 /* Dynamically allocate a new register. If we know the register
1076 statically, we should make it a global as above instead of using this
1079 static struct aarch64_register
1080 aarch64_register (unsigned num
, int is64
)
1082 return (struct aarch64_register
) { num
, is64
};
1085 /* Helper function to create a register operand, for instructions with
1086 different types of operands.
1089 p += emit_mov (p, x0, register_operand (x1)); */
1091 static struct aarch64_operand
1092 register_operand (struct aarch64_register reg
)
1094 struct aarch64_operand operand
;
1096 operand
.type
= OPERAND_REGISTER
;
1102 /* Helper function to create an immediate operand, for instructions with
1103 different types of operands.
1106 p += emit_mov (p, x0, immediate_operand (12)); */
1108 static struct aarch64_operand
1109 immediate_operand (uint32_t imm
)
1111 struct aarch64_operand operand
;
1113 operand
.type
= OPERAND_IMMEDIATE
;
1119 /* Helper function to create an offset memory operand.
1122 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
1124 static struct aarch64_memory_operand
1125 offset_memory_operand (int32_t offset
)
1127 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
1130 /* Helper function to create a pre-index memory operand.
1133 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
1135 static struct aarch64_memory_operand
1136 preindex_memory_operand (int32_t index
)
1138 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
1141 /* Helper function to create a post-index memory operand.
1144 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
1146 static struct aarch64_memory_operand
1147 postindex_memory_operand (int32_t index
)
1149 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
1152 /* System control registers. These special registers can be written and
1153 read with the MRS and MSR instructions.
1155 - NZCV: Condition flags. GDB refers to this register under the CPSR
1157 - FPSR: Floating-point status register.
1158 - FPCR: Floating-point control registers.
1159 - TPIDR_EL0: Software thread ID register. */
1161 enum aarch64_system_control_registers
1163 /* op0 op1 crn crm op2 */
1164 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
1165 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
1166 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
1167 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
1170 /* Write a BLR instruction into *BUF.
1174 RN is the register to branch to. */
1177 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
1179 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
1182 /* Write a RET instruction into *BUF.
1186 RN is the register to branch to. */
1189 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
1191 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
1195 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
1196 struct aarch64_register rt
,
1197 struct aarch64_register rt2
,
1198 struct aarch64_register rn
,
1199 struct aarch64_memory_operand operand
)
1203 uint32_t write_back
;
1206 opc
= ENCODE (2, 2, 30);
1208 opc
= ENCODE (0, 2, 30);
1210 switch (operand
.type
)
1212 case MEMORY_OPERAND_OFFSET
:
1214 pre_index
= ENCODE (1, 1, 24);
1215 write_back
= ENCODE (0, 1, 23);
1218 case MEMORY_OPERAND_POSTINDEX
:
1220 pre_index
= ENCODE (0, 1, 24);
1221 write_back
= ENCODE (1, 1, 23);
1224 case MEMORY_OPERAND_PREINDEX
:
1226 pre_index
= ENCODE (1, 1, 24);
1227 write_back
= ENCODE (1, 1, 23);
1234 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
1235 | ENCODE (operand
.index
>> 3, 7, 15)
1236 | ENCODE (rt2
.num
, 5, 10)
1237 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1240 /* Write a STP instruction into *BUF.
1242 STP rt, rt2, [rn, #offset]
1243 STP rt, rt2, [rn, #index]!
1244 STP rt, rt2, [rn], #index
1246 RT and RT2 are the registers to store.
1247 RN is the base address register.
1248 OFFSET is the immediate to add to the base address. It is limited to a
1249 -512 .. 504 range (7 bits << 3). */
1252 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
1253 struct aarch64_register rt2
, struct aarch64_register rn
,
1254 struct aarch64_memory_operand operand
)
1256 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
1259 /* Write a LDP instruction into *BUF.
1261 LDP rt, rt2, [rn, #offset]
1262 LDP rt, rt2, [rn, #index]!
1263 LDP rt, rt2, [rn], #index
1265 RT and RT2 are the registers to store.
1266 RN is the base address register.
1267 OFFSET is the immediate to add to the base address. It is limited to a
1268 -512 .. 504 range (7 bits << 3). */
1271 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
1272 struct aarch64_register rt2
, struct aarch64_register rn
,
1273 struct aarch64_memory_operand operand
)
1275 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
1278 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1280 LDP qt, qt2, [rn, #offset]
1282 RT and RT2 are the Q registers to store.
1283 RN is the base address register.
1284 OFFSET is the immediate to add to the base address. It is limited to
1285 -1024 .. 1008 range (7 bits << 4). */
1288 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1289 struct aarch64_register rn
, int32_t offset
)
1291 uint32_t opc
= ENCODE (2, 2, 30);
1292 uint32_t pre_index
= ENCODE (1, 1, 24);
1294 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
1295 | ENCODE (offset
>> 4, 7, 15)
1296 | ENCODE (rt2
, 5, 10)
1297 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1300 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1302 STP qt, qt2, [rn, #offset]
1304 RT and RT2 are the Q registers to store.
1305 RN is the base address register.
1306 OFFSET is the immediate to add to the base address. It is limited to
1307 -1024 .. 1008 range (7 bits << 4). */
1310 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1311 struct aarch64_register rn
, int32_t offset
)
1313 uint32_t opc
= ENCODE (2, 2, 30);
1314 uint32_t pre_index
= ENCODE (1, 1, 24);
1316 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
1317 | ENCODE (offset
>> 4, 7, 15)
1318 | ENCODE (rt2
, 5, 10)
1319 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1322 /* Write a LDRH instruction into *BUF.
1324 LDRH wt, [xn, #offset]
1325 LDRH wt, [xn, #index]!
1326 LDRH wt, [xn], #index
1328 RT is the register to store.
1329 RN is the base address register.
1330 OFFSET is the immediate to add to the base address. It is limited to
1331 0 .. 32760 range (12 bits << 3). */
1334 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1335 struct aarch64_register rn
,
1336 struct aarch64_memory_operand operand
)
1338 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1341 /* Write a LDRB instruction into *BUF.
1343 LDRB wt, [xn, #offset]
1344 LDRB wt, [xn, #index]!
1345 LDRB wt, [xn], #index
1347 RT is the register to store.
1348 RN is the base address register.
1349 OFFSET is the immediate to add to the base address. It is limited to
1350 0 .. 32760 range (12 bits << 3). */
1353 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1354 struct aarch64_register rn
,
1355 struct aarch64_memory_operand operand
)
1357 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1362 /* Write a STR instruction into *BUF.
1364 STR rt, [rn, #offset]
1365 STR rt, [rn, #index]!
1366 STR rt, [rn], #index
1368 RT is the register to store.
1369 RN is the base address register.
1370 OFFSET is the immediate to add to the base address. It is limited to
1371 0 .. 32760 range (12 bits << 3). */
1374 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1375 struct aarch64_register rn
,
1376 struct aarch64_memory_operand operand
)
1378 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1381 /* Helper function emitting an exclusive load or store instruction. */
1384 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1385 enum aarch64_opcodes opcode
,
1386 struct aarch64_register rs
,
1387 struct aarch64_register rt
,
1388 struct aarch64_register rt2
,
1389 struct aarch64_register rn
)
1391 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1392 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1393 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1396 /* Write a LAXR instruction into *BUF.
1400 RT is the destination register.
1401 RN is the base address register. */
1404 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1405 struct aarch64_register rn
)
1407 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1411 /* Write a STXR instruction into *BUF.
1415 RS is the result register, it indicates if the store succeeded or not.
1416 RT is the destination register.
1417 RN is the base address register. */
1420 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1421 struct aarch64_register rt
, struct aarch64_register rn
)
1423 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1427 /* Write a STLR instruction into *BUF.
1431 RT is the register to store.
1432 RN is the base address register. */
1435 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1436 struct aarch64_register rn
)
1438 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1442 /* Helper function for data processing instructions with register sources. */
1445 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1446 struct aarch64_register rd
,
1447 struct aarch64_register rn
,
1448 struct aarch64_register rm
)
1450 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1452 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1453 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1456 /* Helper function for data processing instructions taking either a register
1460 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1461 struct aarch64_register rd
,
1462 struct aarch64_register rn
,
1463 struct aarch64_operand operand
)
1465 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1466 /* The opcode is different for register and immediate source operands. */
1467 uint32_t operand_opcode
;
1469 if (operand
.type
== OPERAND_IMMEDIATE
)
1471 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1472 operand_opcode
= ENCODE (8, 4, 25);
1474 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1475 | ENCODE (operand
.imm
, 12, 10)
1476 | ENCODE (rn
.num
, 5, 5)
1477 | ENCODE (rd
.num
, 5, 0));
1481 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1482 operand_opcode
= ENCODE (5, 4, 25);
1484 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1489 /* Write an ADD instruction into *BUF.
1494 This function handles both an immediate and register add.
1496 RD is the destination register.
1497 RN is the input register.
1498 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1499 OPERAND_REGISTER. */
1502 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1503 struct aarch64_register rn
, struct aarch64_operand operand
)
1505 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1508 /* Write a SUB instruction into *BUF.
1513 This function handles both an immediate and register sub.
1515 RD is the destination register.
1516 RN is the input register.
1517 IMM is the immediate to substract to RN. */
1520 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1521 struct aarch64_register rn
, struct aarch64_operand operand
)
1523 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1526 /* Write a MOV instruction into *BUF.
1531 This function handles both a wide immediate move and a register move,
1532 with the condition that the source register is not xzr. xzr and the
1533 stack pointer share the same encoding and this function only supports
1536 RD is the destination register.
1537 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1538 OPERAND_REGISTER. */
1541 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1542 struct aarch64_operand operand
)
1544 if (operand
.type
== OPERAND_IMMEDIATE
)
1546 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1547 /* Do not shift the immediate. */
1548 uint32_t shift
= ENCODE (0, 2, 21);
1550 return aarch64_emit_insn (buf
, MOV
| size
| shift
1551 | ENCODE (operand
.imm
, 16, 5)
1552 | ENCODE (rd
.num
, 5, 0));
1555 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1558 /* Write a MOVK instruction into *BUF.
1560 MOVK rd, #imm, lsl #shift
1562 RD is the destination register.
1563 IMM is the immediate.
1564 SHIFT is the logical shift left to apply to IMM. */
1567 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1570 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1572 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1573 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1576 /* Write instructions into *BUF in order to move ADDR into a register.
1577 ADDR can be a 64-bit value.
1579 This function will emit a series of MOV and MOVK instructions, such as:
1582 MOVK xd, #(addr >> 16), lsl #16
1583 MOVK xd, #(addr >> 32), lsl #32
1584 MOVK xd, #(addr >> 48), lsl #48 */
1587 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1591 /* The MOV (wide immediate) instruction clears to top bits of the
1593 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1595 if ((addr
>> 16) != 0)
1596 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1600 if ((addr
>> 32) != 0)
1601 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1605 if ((addr
>> 48) != 0)
1606 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1611 /* Write a SUBS instruction into *BUF.
1615 This instruction update the condition flags.
1617 RD is the destination register.
1618 RN and RM are the source registers. */
1621 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1622 struct aarch64_register rn
, struct aarch64_operand operand
)
1624 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1627 /* Write a CMP instruction into *BUF.
1631 This instruction is an alias of SUBS xzr, rn, rm.
1633 RN and RM are the registers to compare. */
1636 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1637 struct aarch64_operand operand
)
1639 return emit_subs (buf
, xzr
, rn
, operand
);
1642 /* Write a AND instruction into *BUF.
1646 RD is the destination register.
1647 RN and RM are the source registers. */
1650 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1651 struct aarch64_register rn
, struct aarch64_register rm
)
1653 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1656 /* Write a ORR instruction into *BUF.
1660 RD is the destination register.
1661 RN and RM are the source registers. */
1664 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1665 struct aarch64_register rn
, struct aarch64_register rm
)
1667 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1670 /* Write a ORN instruction into *BUF.
1674 RD is the destination register.
1675 RN and RM are the source registers. */
1678 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1679 struct aarch64_register rn
, struct aarch64_register rm
)
1681 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1684 /* Write a EOR instruction into *BUF.
1688 RD is the destination register.
1689 RN and RM are the source registers. */
1692 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1693 struct aarch64_register rn
, struct aarch64_register rm
)
1695 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1698 /* Write a MVN instruction into *BUF.
1702 This is an alias for ORN rd, xzr, rm.
1704 RD is the destination register.
1705 RM is the source register. */
1708 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1709 struct aarch64_register rm
)
1711 return emit_orn (buf
, rd
, xzr
, rm
);
1714 /* Write a LSLV instruction into *BUF.
1718 RD is the destination register.
1719 RN and RM are the source registers. */
1722 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1723 struct aarch64_register rn
, struct aarch64_register rm
)
1725 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1728 /* Write a LSRV instruction into *BUF.
1732 RD is the destination register.
1733 RN and RM are the source registers. */
1736 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1737 struct aarch64_register rn
, struct aarch64_register rm
)
1739 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1742 /* Write a ASRV instruction into *BUF.
1746 RD is the destination register.
1747 RN and RM are the source registers. */
1750 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1751 struct aarch64_register rn
, struct aarch64_register rm
)
1753 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1756 /* Write a MUL instruction into *BUF.
1760 RD is the destination register.
1761 RN and RM are the source registers. */
1764 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1765 struct aarch64_register rn
, struct aarch64_register rm
)
1767 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1770 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1774 RT is the destination register.
1775 SYSTEM_REG is special purpose register to read. */
1778 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1779 enum aarch64_system_control_registers system_reg
)
1781 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1782 | ENCODE (rt
.num
, 5, 0));
1785 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1789 SYSTEM_REG is special purpose register to write.
1790 RT is the input register. */
1793 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1794 struct aarch64_register rt
)
1796 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1797 | ENCODE (rt
.num
, 5, 0));
1800 /* Write a SEVL instruction into *BUF.
1802 This is a hint instruction telling the hardware to trigger an event. */
1805 emit_sevl (uint32_t *buf
)
1807 return aarch64_emit_insn (buf
, SEVL
);
1810 /* Write a WFE instruction into *BUF.
1812 This is a hint instruction telling the hardware to wait for an event. */
1815 emit_wfe (uint32_t *buf
)
1817 return aarch64_emit_insn (buf
, WFE
);
1820 /* Write a SBFM instruction into *BUF.
1822 SBFM rd, rn, #immr, #imms
1824 This instruction moves the bits from #immr to #imms into the
1825 destination, sign extending the result.
1827 RD is the destination register.
1828 RN is the source register.
1829 IMMR is the bit number to start at (least significant bit).
1830 IMMS is the bit number to stop at (most significant bit). */
1833 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1834 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1836 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1837 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1839 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1840 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1841 | ENCODE (rd
.num
, 5, 0));
1844 /* Write a SBFX instruction into *BUF.
1846 SBFX rd, rn, #lsb, #width
1848 This instruction moves #width bits from #lsb into the destination, sign
1849 extending the result. This is an alias for:
1851 SBFM rd, rn, #lsb, #(lsb + width - 1)
1853 RD is the destination register.
1854 RN is the source register.
1855 LSB is the bit number to start at (least significant bit).
1856 WIDTH is the number of bits to move. */
1859 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1860 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1862 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1865 /* Write a UBFM instruction into *BUF.
1867 UBFM rd, rn, #immr, #imms
1869 This instruction moves the bits from #immr to #imms into the
1870 destination, extending the result with zeros.
1872 RD is the destination register.
1873 RN is the source register.
1874 IMMR is the bit number to start at (least significant bit).
1875 IMMS is the bit number to stop at (most significant bit). */
1878 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1879 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1881 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1882 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1884 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1885 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1886 | ENCODE (rd
.num
, 5, 0));
1889 /* Write a UBFX instruction into *BUF.
1891 UBFX rd, rn, #lsb, #width
1893 This instruction moves #width bits from #lsb into the destination,
1894 extending the result with zeros. This is an alias for:
1896 UBFM rd, rn, #lsb, #(lsb + width - 1)
1898 RD is the destination register.
1899 RN is the source register.
1900 LSB is the bit number to start at (least significant bit).
1901 WIDTH is the number of bits to move. */
1904 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1905 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1907 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1910 /* Write a CSINC instruction into *BUF.
1912 CSINC rd, rn, rm, cond
1914 This instruction conditionally increments rn or rm and places the result
1915 in rd. rn is chosen is the condition is true.
1917 RD is the destination register.
1918 RN and RM are the source registers.
1919 COND is the encoded condition. */
1922 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1923 struct aarch64_register rn
, struct aarch64_register rm
,
1926 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1928 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1929 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1930 | ENCODE (rd
.num
, 5, 0));
1933 /* Write a CSET instruction into *BUF.
1937 This instruction conditionally write 1 or 0 in the destination register.
1938 1 is written if the condition is true. This is an alias for:
1940 CSINC rd, xzr, xzr, !cond
1942 Note that the condition needs to be inverted.
1944 RD is the destination register.
1945 RN and RM are the source registers.
1946 COND is the encoded condition. */
1949 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1951 /* The least significant bit of the condition needs toggling in order to
1953 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1956 /* Write LEN instructions from BUF into the inferior memory at *TO.
1958 Note instructions are always little endian on AArch64, unlike data. */
1961 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1963 size_t byte_len
= len
* sizeof (uint32_t);
1964 #if (__BYTE_ORDER == __BIG_ENDIAN)
1965 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1968 for (i
= 0; i
< len
; i
++)
1969 le_buf
[i
] = htole32 (buf
[i
]);
1971 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1975 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1981 /* Sub-class of struct aarch64_insn_data, store information of
1982 instruction relocation for fast tracepoint. Visitor can
1983 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1984 the relocated instructions in buffer pointed by INSN_PTR. */
1986 struct aarch64_insn_relocation_data
1988 struct aarch64_insn_data base
;
1990 /* The new address the instruction is relocated to. */
1992 /* Pointer to the buffer of relocated instruction(s). */
1996 /* Implementation of aarch64_insn_visitor method "b". */
1999 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
2000 struct aarch64_insn_data
*data
)
2002 struct aarch64_insn_relocation_data
*insn_reloc
2003 = (struct aarch64_insn_relocation_data
*) data
;
2005 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
2007 if (can_encode_int32 (new_offset
, 28))
2008 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
2011 /* Implementation of aarch64_insn_visitor method "b_cond". */
2014 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
2015 struct aarch64_insn_data
*data
)
2017 struct aarch64_insn_relocation_data
*insn_reloc
2018 = (struct aarch64_insn_relocation_data
*) data
;
2020 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
2022 if (can_encode_int32 (new_offset
, 21))
2024 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
2027 else if (can_encode_int32 (new_offset
, 28))
2029 /* The offset is out of range for a conditional branch
2030 instruction but not for a unconditional branch. We can use
2031 the following instructions instead:
2033 B.COND TAKEN ; If cond is true, then jump to TAKEN.
2034 B NOT_TAKEN ; Else jump over TAKEN and continue.
2041 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
2042 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
2043 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
2047 /* Implementation of aarch64_insn_visitor method "cb". */
2050 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
2051 const unsigned rn
, int is64
,
2052 struct aarch64_insn_data
*data
)
2054 struct aarch64_insn_relocation_data
*insn_reloc
2055 = (struct aarch64_insn_relocation_data
*) data
;
2057 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
2059 if (can_encode_int32 (new_offset
, 21))
2061 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
2062 aarch64_register (rn
, is64
), new_offset
);
2064 else if (can_encode_int32 (new_offset
, 28))
2066 /* The offset is out of range for a compare and branch
2067 instruction but not for a unconditional branch. We can use
2068 the following instructions instead:
2070 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
2071 B NOT_TAKEN ; Else jump over TAKEN and continue.
2077 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
2078 aarch64_register (rn
, is64
), 8);
2079 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
2080 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
2084 /* Implementation of aarch64_insn_visitor method "tb". */
2087 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
2088 const unsigned rt
, unsigned bit
,
2089 struct aarch64_insn_data
*data
)
2091 struct aarch64_insn_relocation_data
*insn_reloc
2092 = (struct aarch64_insn_relocation_data
*) data
;
2094 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
2096 if (can_encode_int32 (new_offset
, 16))
2098 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
2099 aarch64_register (rt
, 1), new_offset
);
2101 else if (can_encode_int32 (new_offset
, 28))
2103 /* The offset is out of range for a test bit and branch
2104 instruction but not for a unconditional branch. We can use
2105 the following instructions instead:
2107 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2108 B NOT_TAKEN ; Else jump over TAKEN and continue.
2114 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
2115 aarch64_register (rt
, 1), 8);
2116 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
2117 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
2122 /* Implementation of aarch64_insn_visitor method "adr". */
2125 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
2127 struct aarch64_insn_data
*data
)
2129 struct aarch64_insn_relocation_data
*insn_reloc
2130 = (struct aarch64_insn_relocation_data
*) data
;
2131 /* We know exactly the address the ADR{P,} instruction will compute.
2132 We can just write it to the destination register. */
2133 CORE_ADDR address
= data
->insn_addr
+ offset
;
2137 /* Clear the lower 12 bits of the offset to get the 4K page. */
2138 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
2139 aarch64_register (rd
, 1),
2143 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
2144 aarch64_register (rd
, 1), address
);
2147 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2150 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
2151 const unsigned rt
, const int is64
,
2152 struct aarch64_insn_data
*data
)
2154 struct aarch64_insn_relocation_data
*insn_reloc
2155 = (struct aarch64_insn_relocation_data
*) data
;
2156 CORE_ADDR address
= data
->insn_addr
+ offset
;
2158 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
2159 aarch64_register (rt
, 1), address
);
2161 /* We know exactly what address to load from, and what register we
2164 MOV xd, #(oldloc + offset)
2165 MOVK xd, #((oldloc + offset) >> 16), lsl #16
2168 LDR xd, [xd] ; or LDRSW xd, [xd]
2173 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
2174 aarch64_register (rt
, 1),
2175 aarch64_register (rt
, 1),
2176 offset_memory_operand (0));
2178 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
2179 aarch64_register (rt
, is64
),
2180 aarch64_register (rt
, 1),
2181 offset_memory_operand (0));
2184 /* Implementation of aarch64_insn_visitor method "others". */
2187 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
2188 struct aarch64_insn_data
*data
)
2190 struct aarch64_insn_relocation_data
*insn_reloc
2191 = (struct aarch64_insn_relocation_data
*) data
;
2193 /* The instruction is not PC relative. Just re-emit it at the new
2195 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
2198 static const struct aarch64_insn_visitor visitor
=
2200 aarch64_ftrace_insn_reloc_b
,
2201 aarch64_ftrace_insn_reloc_b_cond
,
2202 aarch64_ftrace_insn_reloc_cb
,
2203 aarch64_ftrace_insn_reloc_tb
,
2204 aarch64_ftrace_insn_reloc_adr
,
2205 aarch64_ftrace_insn_reloc_ldr_literal
,
2206 aarch64_ftrace_insn_reloc_others
,
2210 aarch64_target::supports_fast_tracepoints ()
2215 /* Implementation of target ops method
2216 "install_fast_tracepoint_jump_pad". */
2219 aarch64_target::install_fast_tracepoint_jump_pad
2220 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
2221 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
2222 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
2223 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
2224 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
2232 CORE_ADDR buildaddr
= *jump_entry
;
2233 struct aarch64_insn_relocation_data insn_data
;
2235 /* We need to save the current state on the stack both to restore it
2236 later and to collect register values when the tracepoint is hit.
2238 The saved registers are pushed in a layout that needs to be in sync
2239 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
2240 the supply_fast_tracepoint_registers function will fill in the
2241 register cache from a pointer to saved registers on the stack we build
2244 For simplicity, we set the size of each cell on the stack to 16 bytes.
2245 This way one cell can hold any register type, from system registers
2246 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2247 has to be 16 bytes aligned anyway.
2249 Note that the CPSR register does not exist on AArch64. Instead we
2250 can access system bits describing the process state with the
2251 MRS/MSR instructions, namely the condition flags. We save them as
2252 if they are part of a CPSR register because that's how GDB
2253 interprets these system bits. At the moment, only the condition
2254 flags are saved in CPSR (NZCV).
2256 Stack layout, each cell is 16 bytes (descending):
2258 High *-------- SIMD&FP registers from 31 down to 0. --------*
2264 *---- General purpose registers from 30 down to 0. ----*
2270 *------------- Special purpose registers. -------------*
2273 | CPSR (NZCV) | 5 cells
2276 *------------- collecting_t object --------------------*
2277 | TPIDR_EL0 | struct tracepoint * |
2278 Low *------------------------------------------------------*
2280 After this stack is set up, we issue a call to the collector, passing
2281 it the saved registers at (SP + 16). */
2283 /* Push SIMD&FP registers on the stack:
2285 SUB sp, sp, #(32 * 16)
2287 STP q30, q31, [sp, #(30 * 16)]
2292 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
2293 for (i
= 30; i
>= 0; i
-= 2)
2294 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2296 /* Push general purpose registers on the stack. Note that we do not need
2297 to push x31 as it represents the xzr register and not the stack
2298 pointer in a STR instruction.
2300 SUB sp, sp, #(31 * 16)
2302 STR x30, [sp, #(30 * 16)]
2307 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
2308 for (i
= 30; i
>= 0; i
-= 1)
2309 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
2310 offset_memory_operand (i
* 16));
2312 /* Make space for 5 more cells.
2314 SUB sp, sp, #(5 * 16)
2317 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
2322 ADD x4, sp, #((32 + 31 + 5) * 16)
2323 STR x4, [sp, #(4 * 16)]
2326 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
2327 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2329 /* Save PC (tracepoint address):
2334 STR x3, [sp, #(3 * 16)]
2338 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2339 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2341 /* Save CPSR (NZCV), FPSR and FPCR:
2347 STR x2, [sp, #(2 * 16)]
2348 STR x1, [sp, #(1 * 16)]
2349 STR x0, [sp, #(0 * 16)]
2352 p
+= emit_mrs (p
, x2
, NZCV
);
2353 p
+= emit_mrs (p
, x1
, FPSR
);
2354 p
+= emit_mrs (p
, x0
, FPCR
);
2355 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2356 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2357 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2359 /* Push the collecting_t object. It consist of the address of the
2360 tracepoint and an ID for the current thread. We get the latter by
2361 reading the tpidr_el0 system register. It corresponds to the
2362 NT_ARM_TLS register accessible with ptrace.
2369 STP x0, x1, [sp, #-16]!
2373 p
+= emit_mov_addr (p
, x0
, tpoint
);
2374 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2375 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2379 The shared memory for the lock is at lockaddr. It will hold zero
2380 if no-one is holding the lock, otherwise it contains the address of
2381 the collecting_t object on the stack of the thread which acquired it.
2383 At this stage, the stack pointer points to this thread's collecting_t
2386 We use the following registers:
2387 - x0: Address of the lock.
2388 - x1: Pointer to collecting_t object.
2389 - x2: Scratch register.
2395 ; Trigger an event local to this core. So the following WFE
2396 ; instruction is ignored.
2399 ; Wait for an event. The event is triggered by either the SEVL
2400 ; or STLR instructions (store release).
2403 ; Atomically read at lockaddr. This marks the memory location as
2404 ; exclusive. This instruction also has memory constraints which
2405 ; make sure all previous data reads and writes are done before
2409 ; Try again if another thread holds the lock.
2412 ; We can lock it! Write the address of the collecting_t object.
2413 ; This instruction will fail if the memory location is not marked
2414 ; as exclusive anymore. If it succeeds, it will remove the
2415 ; exclusive mark on the memory location. This way, if another
2416 ; thread executes this instruction before us, we will fail and try
2423 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2424 p
+= emit_mov (p
, x1
, register_operand (sp
));
2428 p
+= emit_ldaxr (p
, x2
, x0
);
2429 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2430 p
+= emit_stxr (p
, w2
, x1
, x0
);
2431 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2433 /* Call collector (struct tracepoint *, unsigned char *):
2438 ; Saved registers start after the collecting_t object.
2441 ; We use an intra-procedure-call scratch register.
2442 MOV ip0, #(collector)
2445 ; And call back to C!
2450 p
+= emit_mov_addr (p
, x0
, tpoint
);
2451 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2453 p
+= emit_mov_addr (p
, ip0
, collector
);
2454 p
+= emit_blr (p
, ip0
);
2456 /* Release the lock.
2461 ; This instruction is a normal store with memory ordering
2462 ; constraints. Thanks to this we do not have to put a data
2463 ; barrier instruction to make sure all data read and writes are done
2464 ; before this instruction is executed. Furthermore, this instruction
2465 ; will trigger an event, letting other threads know they can grab
2470 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2471 p
+= emit_stlr (p
, xzr
, x0
);
2473 /* Free collecting_t object:
2478 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2480 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2481 registers from the stack.
2483 LDR x2, [sp, #(2 * 16)]
2484 LDR x1, [sp, #(1 * 16)]
2485 LDR x0, [sp, #(0 * 16)]
2491 ADD sp, sp #(5 * 16)
2494 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2495 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2496 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2497 p
+= emit_msr (p
, NZCV
, x2
);
2498 p
+= emit_msr (p
, FPSR
, x1
);
2499 p
+= emit_msr (p
, FPCR
, x0
);
2501 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2503 /* Pop general purpose registers:
2507 LDR x30, [sp, #(30 * 16)]
2509 ADD sp, sp, #(31 * 16)
2512 for (i
= 0; i
<= 30; i
+= 1)
2513 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2514 offset_memory_operand (i
* 16));
2515 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2517 /* Pop SIMD&FP registers:
2521 LDP q30, q31, [sp, #(30 * 16)]
2523 ADD sp, sp, #(32 * 16)
2526 for (i
= 0; i
<= 30; i
+= 2)
2527 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2528 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2530 /* Write the code into the inferior memory. */
2531 append_insns (&buildaddr
, p
- buf
, buf
);
2533 /* Now emit the relocated instruction. */
2534 *adjusted_insn_addr
= buildaddr
;
2535 target_read_uint32 (tpaddr
, &insn
);
2537 insn_data
.base
.insn_addr
= tpaddr
;
2538 insn_data
.new_addr
= buildaddr
;
2539 insn_data
.insn_ptr
= buf
;
2541 aarch64_relocate_instruction (insn
, &visitor
,
2542 (struct aarch64_insn_data
*) &insn_data
);
2544 /* We may not have been able to relocate the instruction. */
2545 if (insn_data
.insn_ptr
== buf
)
2548 "E.Could not relocate instruction from %s to %s.",
2549 core_addr_to_string_nz (tpaddr
),
2550 core_addr_to_string_nz (buildaddr
));
2554 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2555 *adjusted_insn_addr_end
= buildaddr
;
2557 /* Go back to the start of the buffer. */
2560 /* Emit a branch back from the jump pad. */
2561 offset
= (tpaddr
+ orig_size
- buildaddr
);
2562 if (!can_encode_int32 (offset
, 28))
2565 "E.Jump back from jump pad too far from tracepoint "
2566 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2571 p
+= emit_b (p
, 0, offset
);
2572 append_insns (&buildaddr
, p
- buf
, buf
);
2574 /* Give the caller a branch instruction into the jump pad. */
2575 offset
= (*jump_entry
- tpaddr
);
2576 if (!can_encode_int32 (offset
, 28))
2579 "E.Jump pad too far from tracepoint "
2580 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2585 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2586 *jjump_pad_insn_size
= 4;
2588 /* Return the end address of our pad. */
2589 *jump_entry
= buildaddr
;
2594 /* Helper function writing LEN instructions from START into
2595 current_insn_ptr. */
2598 emit_ops_insns (const uint32_t *start
, int len
)
2600 CORE_ADDR buildaddr
= current_insn_ptr
;
2603 debug_printf ("Adding %d instrucions at %s\n",
2604 len
, paddress (buildaddr
));
2606 append_insns (&buildaddr
, len
, start
);
2607 current_insn_ptr
= buildaddr
;
2610 /* Pop a register from the stack. */
2613 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2615 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2618 /* Push a register on the stack. */
2621 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2623 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2626 /* Implementation of emit_ops method "emit_prologue". */
2629 aarch64_emit_prologue (void)
2634 /* This function emit a prologue for the following function prototype:
2636 enum eval_result_type f (unsigned char *regs,
2639 The first argument is a buffer of raw registers. The second
2640 argument is the result of
2641 evaluating the expression, which will be set to whatever is on top of
2642 the stack at the end.
2644 The stack set up by the prologue is as such:
2646 High *------------------------------------------------------*
2649 | x1 (ULONGEST *value) |
2650 | x0 (unsigned char *regs) |
2651 Low *------------------------------------------------------*
2653 As we are implementing a stack machine, each opcode can expand the
2654 stack so we never know how far we are from the data saved by this
2655 prologue. In order to be able refer to value and regs later, we save
2656 the current stack pointer in the frame pointer. This way, it is not
2657 clobbered when calling C functions.
2659 Finally, throughout every operation, we are using register x0 as the
2660 top of the stack, and x1 as a scratch register. */
2662 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2663 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2664 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2666 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2669 emit_ops_insns (buf
, p
- buf
);
2672 /* Implementation of emit_ops method "emit_epilogue". */
2675 aarch64_emit_epilogue (void)
2680 /* Store the result of the expression (x0) in *value. */
2681 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2682 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2683 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2685 /* Restore the previous state. */
2686 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2687 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2689 /* Return expr_eval_no_error. */
2690 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2691 p
+= emit_ret (p
, lr
);
2693 emit_ops_insns (buf
, p
- buf
);
2696 /* Implementation of emit_ops method "emit_add". */
2699 aarch64_emit_add (void)
2704 p
+= emit_pop (p
, x1
);
2705 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2707 emit_ops_insns (buf
, p
- buf
);
2710 /* Implementation of emit_ops method "emit_sub". */
2713 aarch64_emit_sub (void)
2718 p
+= emit_pop (p
, x1
);
2719 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2721 emit_ops_insns (buf
, p
- buf
);
2724 /* Implementation of emit_ops method "emit_mul". */
2727 aarch64_emit_mul (void)
2732 p
+= emit_pop (p
, x1
);
2733 p
+= emit_mul (p
, x0
, x1
, x0
);
2735 emit_ops_insns (buf
, p
- buf
);
2738 /* Implementation of emit_ops method "emit_lsh". */
2741 aarch64_emit_lsh (void)
2746 p
+= emit_pop (p
, x1
);
2747 p
+= emit_lslv (p
, x0
, x1
, x0
);
2749 emit_ops_insns (buf
, p
- buf
);
2752 /* Implementation of emit_ops method "emit_rsh_signed". */
2755 aarch64_emit_rsh_signed (void)
2760 p
+= emit_pop (p
, x1
);
2761 p
+= emit_asrv (p
, x0
, x1
, x0
);
2763 emit_ops_insns (buf
, p
- buf
);
2766 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2769 aarch64_emit_rsh_unsigned (void)
2774 p
+= emit_pop (p
, x1
);
2775 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2777 emit_ops_insns (buf
, p
- buf
);
2780 /* Implementation of emit_ops method "emit_ext". */
2783 aarch64_emit_ext (int arg
)
2788 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2790 emit_ops_insns (buf
, p
- buf
);
2793 /* Implementation of emit_ops method "emit_log_not". */
2796 aarch64_emit_log_not (void)
2801 /* If the top of the stack is 0, replace it with 1. Else replace it with
2804 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2805 p
+= emit_cset (p
, x0
, EQ
);
2807 emit_ops_insns (buf
, p
- buf
);
2810 /* Implementation of emit_ops method "emit_bit_and". */
2813 aarch64_emit_bit_and (void)
2818 p
+= emit_pop (p
, x1
);
2819 p
+= emit_and (p
, x0
, x0
, x1
);
2821 emit_ops_insns (buf
, p
- buf
);
2824 /* Implementation of emit_ops method "emit_bit_or". */
2827 aarch64_emit_bit_or (void)
2832 p
+= emit_pop (p
, x1
);
2833 p
+= emit_orr (p
, x0
, x0
, x1
);
2835 emit_ops_insns (buf
, p
- buf
);
2838 /* Implementation of emit_ops method "emit_bit_xor". */
2841 aarch64_emit_bit_xor (void)
2846 p
+= emit_pop (p
, x1
);
2847 p
+= emit_eor (p
, x0
, x0
, x1
);
2849 emit_ops_insns (buf
, p
- buf
);
2852 /* Implementation of emit_ops method "emit_bit_not". */
2855 aarch64_emit_bit_not (void)
2860 p
+= emit_mvn (p
, x0
, x0
);
2862 emit_ops_insns (buf
, p
- buf
);
2865 /* Implementation of emit_ops method "emit_equal". */
2868 aarch64_emit_equal (void)
2873 p
+= emit_pop (p
, x1
);
2874 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2875 p
+= emit_cset (p
, x0
, EQ
);
2877 emit_ops_insns (buf
, p
- buf
);
2880 /* Implementation of emit_ops method "emit_less_signed". */
2883 aarch64_emit_less_signed (void)
2888 p
+= emit_pop (p
, x1
);
2889 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2890 p
+= emit_cset (p
, x0
, LT
);
2892 emit_ops_insns (buf
, p
- buf
);
2895 /* Implementation of emit_ops method "emit_less_unsigned". */
2898 aarch64_emit_less_unsigned (void)
2903 p
+= emit_pop (p
, x1
);
2904 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2905 p
+= emit_cset (p
, x0
, LO
);
2907 emit_ops_insns (buf
, p
- buf
);
2910 /* Implementation of emit_ops method "emit_ref". */
2913 aarch64_emit_ref (int size
)
2921 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2924 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2927 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2930 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2933 /* Unknown size, bail on compilation. */
2938 emit_ops_insns (buf
, p
- buf
);
2941 /* Implementation of emit_ops method "emit_if_goto". */
2944 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2949 /* The Z flag is set or cleared here. */
2950 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2951 /* This instruction must not change the Z flag. */
2952 p
+= emit_pop (p
, x0
);
2953 /* Branch over the next instruction if x0 == 0. */
2954 p
+= emit_bcond (p
, EQ
, 8);
2956 /* The NOP instruction will be patched with an unconditional branch. */
2958 *offset_p
= (p
- buf
) * 4;
2963 emit_ops_insns (buf
, p
- buf
);
2966 /* Implementation of emit_ops method "emit_goto". */
2969 aarch64_emit_goto (int *offset_p
, int *size_p
)
2974 /* The NOP instruction will be patched with an unconditional branch. */
2981 emit_ops_insns (buf
, p
- buf
);
2984 /* Implementation of emit_ops method "write_goto_address". */
2987 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2991 emit_b (&insn
, 0, to
- from
);
2992 append_insns (&from
, 1, &insn
);
2995 /* Implementation of emit_ops method "emit_const". */
2998 aarch64_emit_const (LONGEST num
)
3003 p
+= emit_mov_addr (p
, x0
, num
);
3005 emit_ops_insns (buf
, p
- buf
);
3008 /* Implementation of emit_ops method "emit_call". */
3011 aarch64_emit_call (CORE_ADDR fn
)
3016 p
+= emit_mov_addr (p
, ip0
, fn
);
3017 p
+= emit_blr (p
, ip0
);
3019 emit_ops_insns (buf
, p
- buf
);
3022 /* Implementation of emit_ops method "emit_reg". */
3025 aarch64_emit_reg (int reg
)
3030 /* Set x0 to unsigned char *regs. */
3031 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
3032 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
3033 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
3035 emit_ops_insns (buf
, p
- buf
);
3037 aarch64_emit_call (get_raw_reg_func_addr ());
3040 /* Implementation of emit_ops method "emit_pop". */
3043 aarch64_emit_pop (void)
3048 p
+= emit_pop (p
, x0
);
3050 emit_ops_insns (buf
, p
- buf
);
3053 /* Implementation of emit_ops method "emit_stack_flush". */
3056 aarch64_emit_stack_flush (void)
3061 p
+= emit_push (p
, x0
);
3063 emit_ops_insns (buf
, p
- buf
);
3066 /* Implementation of emit_ops method "emit_zero_ext". */
3069 aarch64_emit_zero_ext (int arg
)
3074 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
3076 emit_ops_insns (buf
, p
- buf
);
3079 /* Implementation of emit_ops method "emit_swap". */
3082 aarch64_emit_swap (void)
3087 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
3088 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
3089 p
+= emit_mov (p
, x0
, register_operand (x1
));
3091 emit_ops_insns (buf
, p
- buf
);
3094 /* Implementation of emit_ops method "emit_stack_adjust". */
3097 aarch64_emit_stack_adjust (int n
)
3099 /* This is not needed with our design. */
3103 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
3105 emit_ops_insns (buf
, p
- buf
);
3108 /* Implementation of emit_ops method "emit_int_call_1". */
3111 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3116 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
3118 emit_ops_insns (buf
, p
- buf
);
3120 aarch64_emit_call (fn
);
3123 /* Implementation of emit_ops method "emit_void_call_2". */
3126 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3131 /* Push x0 on the stack. */
3132 aarch64_emit_stack_flush ();
3134 /* Setup arguments for the function call:
3137 x1: top of the stack
3142 p
+= emit_mov (p
, x1
, register_operand (x0
));
3143 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
3145 emit_ops_insns (buf
, p
- buf
);
3147 aarch64_emit_call (fn
);
3150 aarch64_emit_pop ();
3153 /* Implementation of emit_ops method "emit_eq_goto". */
3156 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
3161 p
+= emit_pop (p
, x1
);
3162 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3163 /* Branch over the next instruction if x0 != x1. */
3164 p
+= emit_bcond (p
, NE
, 8);
3165 /* The NOP instruction will be patched with an unconditional branch. */
3167 *offset_p
= (p
- buf
) * 4;
3172 emit_ops_insns (buf
, p
- buf
);
3175 /* Implementation of emit_ops method "emit_ne_goto". */
3178 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
3183 p
+= emit_pop (p
, x1
);
3184 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3185 /* Branch over the next instruction if x0 == x1. */
3186 p
+= emit_bcond (p
, EQ
, 8);
3187 /* The NOP instruction will be patched with an unconditional branch. */
3189 *offset_p
= (p
- buf
) * 4;
3194 emit_ops_insns (buf
, p
- buf
);
3197 /* Implementation of emit_ops method "emit_lt_goto". */
3200 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
3205 p
+= emit_pop (p
, x1
);
3206 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3207 /* Branch over the next instruction if x0 >= x1. */
3208 p
+= emit_bcond (p
, GE
, 8);
3209 /* The NOP instruction will be patched with an unconditional branch. */
3211 *offset_p
= (p
- buf
) * 4;
3216 emit_ops_insns (buf
, p
- buf
);
3219 /* Implementation of emit_ops method "emit_le_goto". */
3222 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
3227 p
+= emit_pop (p
, x1
);
3228 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3229 /* Branch over the next instruction if x0 > x1. */
3230 p
+= emit_bcond (p
, GT
, 8);
3231 /* The NOP instruction will be patched with an unconditional branch. */
3233 *offset_p
= (p
- buf
) * 4;
3238 emit_ops_insns (buf
, p
- buf
);
3241 /* Implementation of emit_ops method "emit_gt_goto". */
3244 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
3249 p
+= emit_pop (p
, x1
);
3250 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3251 /* Branch over the next instruction if x0 <= x1. */
3252 p
+= emit_bcond (p
, LE
, 8);
3253 /* The NOP instruction will be patched with an unconditional branch. */
3255 *offset_p
= (p
- buf
) * 4;
3260 emit_ops_insns (buf
, p
- buf
);
3263 /* Implementation of emit_ops method "emit_ge_got". */
3266 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
3271 p
+= emit_pop (p
, x1
);
3272 p
+= emit_cmp (p
, x1
, register_operand (x0
));
3273 /* Branch over the next instruction if x0 <= x1. */
3274 p
+= emit_bcond (p
, LT
, 8);
3275 /* The NOP instruction will be patched with an unconditional branch. */
3277 *offset_p
= (p
- buf
) * 4;
3282 emit_ops_insns (buf
, p
- buf
);
3285 static struct emit_ops aarch64_emit_ops_impl
=
3287 aarch64_emit_prologue
,
3288 aarch64_emit_epilogue
,
3293 aarch64_emit_rsh_signed
,
3294 aarch64_emit_rsh_unsigned
,
3296 aarch64_emit_log_not
,
3297 aarch64_emit_bit_and
,
3298 aarch64_emit_bit_or
,
3299 aarch64_emit_bit_xor
,
3300 aarch64_emit_bit_not
,
3302 aarch64_emit_less_signed
,
3303 aarch64_emit_less_unsigned
,
3305 aarch64_emit_if_goto
,
3307 aarch64_write_goto_address
,
3312 aarch64_emit_stack_flush
,
3313 aarch64_emit_zero_ext
,
3315 aarch64_emit_stack_adjust
,
3316 aarch64_emit_int_call_1
,
3317 aarch64_emit_void_call_2
,
3318 aarch64_emit_eq_goto
,
3319 aarch64_emit_ne_goto
,
3320 aarch64_emit_lt_goto
,
3321 aarch64_emit_le_goto
,
3322 aarch64_emit_gt_goto
,
3323 aarch64_emit_ge_got
,
3326 /* Implementation of target ops method "emit_ops". */
3329 aarch64_target::emit_ops ()
3331 return &aarch64_emit_ops_impl
;
3334 /* Implementation of target ops method
3335 "get_min_fast_tracepoint_insn_len". */
3338 aarch64_target::get_min_fast_tracepoint_insn_len ()
3343 /* Implementation of linux target ops method "low_supports_range_stepping". */
3346 aarch64_target::low_supports_range_stepping ()
3351 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3354 aarch64_target::sw_breakpoint_from_kind (int kind
, int *size
)
3356 if (is_64bit_tdesc ())
3358 *size
= aarch64_breakpoint_len
;
3359 return aarch64_breakpoint
;
3362 return arm_sw_breakpoint_from_kind (kind
, size
);
3365 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3368 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3370 if (is_64bit_tdesc ())
3371 return aarch64_breakpoint_len
;
3373 return arm_breakpoint_kind_from_pc (pcptr
);
3376 /* Implementation of the target ops method
3377 "breakpoint_kind_from_current_state". */
3380 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3382 if (is_64bit_tdesc ())
3383 return aarch64_breakpoint_len
;
3385 return arm_breakpoint_kind_from_current_state (pcptr
);
3389 aarch64_supports_morello_features ()
3391 /* Spawn a child for testing. */
3392 int child_pid
= linux_create_child_for_ptrace_testing ();
3393 bool ret
= aarch64_supports_morello (child_pid
);
3394 /* Kill child_pid. */
3395 linux_kill_child (child_pid
, "aarch64_check_ptrace_features");
3399 /* Implementation of targets ops method "supports_qxfer_capability. */
3402 aarch64_target::supports_qxfer_capability ()
3404 /* Do a live ptrace feature check instead of using HWCAP bits. */
3405 return aarch64_supports_morello_features ();
3408 /* Implementation of targets ops method "qxfer_capability. */
3411 aarch64_target::qxfer_capability (const CORE_ADDR address
,
3412 unsigned char *readbuf
,
3413 unsigned const char *writebuf
,
3414 CORE_ADDR offset
, int len
)
3416 int tid
= pid_of (current_thread
);
3418 struct user_cap cap
;
3420 if (readbuf
!= nullptr)
3422 if (!aarch64_linux_read_capability (tid
, address
, cap
))
3425 /* Copy data to readbuf. */
3426 memcpy (readbuf
, &cap
.tag
, 1);
3427 memcpy (readbuf
+ 1, &cap
.val
, 16);
3431 /* Copy data from writebuf. */
3432 memcpy (&cap
.tag
, writebuf
, 1);
3433 memcpy (&cap
.val
, writebuf
+ 1, 16);
3434 memset (&cap
.__reserved
, 0, 15);
3436 if (!aarch64_linux_write_capability (tid
, address
, cap
))
3440 return sizeof (cap
.val
) + 1;
3443 /* The linux target ops object. */
3445 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3448 initialize_low_arch (void)
3450 initialize_low_arch_aarch32 ();
3452 initialize_regsets_info (&aarch64_regsets_info
);
3453 initialize_regsets_info (&aarch64_sve_regsets_info
);