1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
30 #include "tracepoint.h"
35 #include "nat/gdb_ptrace.h"
36 #include <asm/ptrace.h>
41 #include "gdb_proc_service.h"
42 #include "arch/aarch64.h"
43 #include "linux-aarch32-tdesc.h"
44 #include "linux-aarch64-tdesc.h"
45 #include "nat/aarch64-sve-linux-ptrace.h"
52 /* Linux target op definitions for the AArch64 architecture. */
54 class aarch64_target
: public linux_process_target
58 const regs_info
*get_regs_info () override
;
62 void low_arch_setup () override
;
64 bool low_cannot_fetch_register (int regno
) override
;
66 bool low_cannot_store_register (int regno
) override
;
69 /* The singleton target ops object. */
71 static aarch64_target the_aarch64_target
;
74 aarch64_target::low_cannot_fetch_register (int regno
)
76 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
77 "is not implemented by the target");
81 aarch64_target::low_cannot_store_register (int regno
)
83 gdb_assert_not_reached ("linux target op low_cannot_store_register "
84 "is not implemented by the target");
87 /* Per-process arch-specific data we want to keep. */
89 struct arch_process_info
91 /* Hardware breakpoint/watchpoint data.
92 The reason for them to be per-process rather than per-thread is
93 due to the lack of information in the gdbserver environment;
94 gdbserver is not told that whether a requested hardware
95 breakpoint/watchpoint is thread specific or not, so it has to set
96 each hw bp/wp for every thread in the current process. The
97 higher level bp/wp management in gdb will resume a thread if a hw
98 bp/wp trap is not expected for it. Since the hw bp/wp setting is
99 same for each thread, it is reasonable for the data to live here.
101 struct aarch64_debug_reg_state debug_reg_state
;
104 /* Return true if the size of register 0 is 8 byte. */
107 is_64bit_tdesc (void)
109 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
111 return register_size (regcache
->tdesc
, 0) == 8;
114 /* Return true if the regcache contains the number of SVE registers. */
119 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
121 return tdesc_contains_feature (regcache
->tdesc
, "org.gnu.gdb.aarch64.sve");
125 aarch64_fill_gregset (struct regcache
*regcache
, void *buf
)
127 struct user_pt_regs
*regset
= (struct user_pt_regs
*) buf
;
130 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
131 collect_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
132 collect_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
133 collect_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
134 collect_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
138 aarch64_store_gregset (struct regcache
*regcache
, const void *buf
)
140 const struct user_pt_regs
*regset
= (const struct user_pt_regs
*) buf
;
143 for (i
= 0; i
< AARCH64_X_REGS_NUM
; i
++)
144 supply_register (regcache
, AARCH64_X0_REGNUM
+ i
, ®set
->regs
[i
]);
145 supply_register (regcache
, AARCH64_SP_REGNUM
, ®set
->sp
);
146 supply_register (regcache
, AARCH64_PC_REGNUM
, ®set
->pc
);
147 supply_register (regcache
, AARCH64_CPSR_REGNUM
, ®set
->pstate
);
151 aarch64_fill_fpregset (struct regcache
*regcache
, void *buf
)
153 struct user_fpsimd_state
*regset
= (struct user_fpsimd_state
*) buf
;
156 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
157 collect_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
158 collect_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
159 collect_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
163 aarch64_store_fpregset (struct regcache
*regcache
, const void *buf
)
165 const struct user_fpsimd_state
*regset
166 = (const struct user_fpsimd_state
*) buf
;
169 for (i
= 0; i
< AARCH64_V_REGS_NUM
; i
++)
170 supply_register (regcache
, AARCH64_V0_REGNUM
+ i
, ®set
->vregs
[i
]);
171 supply_register (regcache
, AARCH64_FPSR_REGNUM
, ®set
->fpsr
);
172 supply_register (regcache
, AARCH64_FPCR_REGNUM
, ®set
->fpcr
);
175 /* Store the pauth registers to regcache. */
178 aarch64_store_pauthregset (struct regcache
*regcache
, const void *buf
)
180 uint64_t *pauth_regset
= (uint64_t *) buf
;
181 int pauth_base
= find_regno (regcache
->tdesc
, "pauth_dmask");
186 supply_register (regcache
, AARCH64_PAUTH_DMASK_REGNUM (pauth_base
),
188 supply_register (regcache
, AARCH64_PAUTH_CMASK_REGNUM (pauth_base
),
192 /* Implementation of linux_target_ops method "get_pc". */
195 aarch64_get_pc (struct regcache
*regcache
)
197 if (register_size (regcache
->tdesc
, 0) == 8)
198 return linux_get_pc_64bit (regcache
);
200 return linux_get_pc_32bit (regcache
);
203 /* Implementation of linux_target_ops method "set_pc". */
206 aarch64_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
208 if (register_size (regcache
->tdesc
, 0) == 8)
209 linux_set_pc_64bit (regcache
, pc
);
211 linux_set_pc_32bit (regcache
, pc
);
214 #define aarch64_breakpoint_len 4
216 /* AArch64 BRK software debug mode instruction.
217 This instruction needs to match gdb/aarch64-tdep.c
218 (aarch64_default_breakpoint). */
219 static const gdb_byte aarch64_breakpoint
[] = {0x00, 0x00, 0x20, 0xd4};
221 /* Implementation of linux_target_ops method "breakpoint_at". */
224 aarch64_breakpoint_at (CORE_ADDR where
)
226 if (is_64bit_tdesc ())
228 gdb_byte insn
[aarch64_breakpoint_len
];
230 the_target
->read_memory (where
, (unsigned char *) &insn
,
231 aarch64_breakpoint_len
);
232 if (memcmp (insn
, aarch64_breakpoint
, aarch64_breakpoint_len
) == 0)
238 return arm_breakpoint_at (where
);
242 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state
*state
)
246 for (i
= 0; i
< AARCH64_HBP_MAX_NUM
; ++i
)
248 state
->dr_addr_bp
[i
] = 0;
249 state
->dr_ctrl_bp
[i
] = 0;
250 state
->dr_ref_count_bp
[i
] = 0;
253 for (i
= 0; i
< AARCH64_HWP_MAX_NUM
; ++i
)
255 state
->dr_addr_wp
[i
] = 0;
256 state
->dr_ctrl_wp
[i
] = 0;
257 state
->dr_ref_count_wp
[i
] = 0;
261 /* Return the pointer to the debug register state structure in the
262 current process' arch-specific data area. */
264 struct aarch64_debug_reg_state
*
265 aarch64_get_debug_reg_state (pid_t pid
)
267 struct process_info
*proc
= find_process_pid (pid
);
269 return &proc
->priv
->arch_private
->debug_reg_state
;
272 /* Implementation of linux_target_ops method "supports_z_point_type". */
275 aarch64_supports_z_point_type (char z_type
)
281 case Z_PACKET_WRITE_WP
:
282 case Z_PACKET_READ_WP
:
283 case Z_PACKET_ACCESS_WP
:
290 /* Implementation of linux_target_ops method "insert_point".
292 It actually only records the info of the to-be-inserted bp/wp;
293 the actual insertion will happen when threads are resumed. */
296 aarch64_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
297 int len
, struct raw_breakpoint
*bp
)
300 enum target_hw_bp_type targ_type
;
301 struct aarch64_debug_reg_state
*state
302 = aarch64_get_debug_reg_state (pid_of (current_thread
));
305 fprintf (stderr
, "insert_point on entry (addr=0x%08lx, len=%d)\n",
306 (unsigned long) addr
, len
);
308 /* Determine the type from the raw breakpoint type. */
309 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
311 if (targ_type
!= hw_execute
)
313 if (aarch64_linux_region_ok_for_watchpoint (addr
, len
))
314 ret
= aarch64_handle_watchpoint (targ_type
, addr
, len
,
315 1 /* is_insert */, state
);
323 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
324 instruction. Set it to 2 to correctly encode length bit
325 mask in hardware/watchpoint control register. */
328 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
329 1 /* is_insert */, state
);
333 aarch64_show_debug_reg_state (state
, "insert_point", addr
, len
,
339 /* Implementation of linux_target_ops method "remove_point".
341 It actually only records the info of the to-be-removed bp/wp,
342 the actual removal will be done when threads are resumed. */
345 aarch64_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
346 int len
, struct raw_breakpoint
*bp
)
349 enum target_hw_bp_type targ_type
;
350 struct aarch64_debug_reg_state
*state
351 = aarch64_get_debug_reg_state (pid_of (current_thread
));
354 fprintf (stderr
, "remove_point on entry (addr=0x%08lx, len=%d)\n",
355 (unsigned long) addr
, len
);
357 /* Determine the type from the raw breakpoint type. */
358 targ_type
= raw_bkpt_type_to_target_hw_bp_type (type
);
360 /* Set up state pointers. */
361 if (targ_type
!= hw_execute
)
363 aarch64_handle_watchpoint (targ_type
, addr
, len
, 0 /* is_insert */,
369 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
370 instruction. Set it to 2 to correctly encode length bit
371 mask in hardware/watchpoint control register. */
374 ret
= aarch64_handle_breakpoint (targ_type
, addr
, len
,
375 0 /* is_insert */, state
);
379 aarch64_show_debug_reg_state (state
, "remove_point", addr
, len
,
385 /* Implementation of linux_target_ops method "stopped_data_address". */
388 aarch64_stopped_data_address (void)
392 struct aarch64_debug_reg_state
*state
;
394 pid
= lwpid_of (current_thread
);
396 /* Get the siginfo. */
397 if (ptrace (PTRACE_GETSIGINFO
, pid
, NULL
, &siginfo
) != 0)
398 return (CORE_ADDR
) 0;
400 /* Need to be a hardware breakpoint/watchpoint trap. */
401 if (siginfo
.si_signo
!= SIGTRAP
402 || (siginfo
.si_code
& 0xffff) != 0x0004 /* TRAP_HWBKPT */)
403 return (CORE_ADDR
) 0;
405 /* Check if the address matches any watched address. */
406 state
= aarch64_get_debug_reg_state (pid_of (current_thread
));
407 for (i
= aarch64_num_wp_regs
- 1; i
>= 0; --i
)
409 const unsigned int offset
410 = aarch64_watchpoint_offset (state
->dr_ctrl_wp
[i
]);
411 const unsigned int len
= aarch64_watchpoint_length (state
->dr_ctrl_wp
[i
]);
412 const CORE_ADDR addr_trap
= (CORE_ADDR
) siginfo
.si_addr
;
413 const CORE_ADDR addr_watch
= state
->dr_addr_wp
[i
] + offset
;
414 const CORE_ADDR addr_watch_aligned
= align_down (state
->dr_addr_wp
[i
], 8);
415 const CORE_ADDR addr_orig
= state
->dr_addr_orig_wp
[i
];
417 if (state
->dr_ref_count_wp
[i
]
418 && DR_CONTROL_ENABLED (state
->dr_ctrl_wp
[i
])
419 && addr_trap
>= addr_watch_aligned
420 && addr_trap
< addr_watch
+ len
)
422 /* ADDR_TRAP reports the first address of the memory range
423 accessed by the CPU, regardless of what was the memory
424 range watched. Thus, a large CPU access that straddles
425 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
426 ADDR_TRAP that is lower than the
427 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
429 addr: | 4 | 5 | 6 | 7 | 8 |
430 |---- range watched ----|
431 |----------- range accessed ------------|
433 In this case, ADDR_TRAP will be 4.
435 To match a watchpoint known to GDB core, we must never
436 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
437 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
438 positive on kernels older than 4.10. See PR
444 return (CORE_ADDR
) 0;
447 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
450 aarch64_stopped_by_watchpoint (void)
452 if (aarch64_stopped_data_address () != 0)
458 /* Fetch the thread-local storage pointer for libthread_db. */
461 ps_get_thread_area (struct ps_prochandle
*ph
,
462 lwpid_t lwpid
, int idx
, void **base
)
464 return aarch64_ps_get_thread_area (ph
, lwpid
, idx
, base
,
468 /* Implementation of linux_target_ops method "siginfo_fixup". */
471 aarch64_linux_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
, int direction
)
473 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
474 if (!is_64bit_tdesc ())
477 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
,
480 aarch64_siginfo_from_compat_siginfo (native
,
481 (struct compat_siginfo
*) inf
);
489 /* Implementation of linux_target_ops method "new_process". */
491 static struct arch_process_info
*
492 aarch64_linux_new_process (void)
494 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
496 aarch64_init_debug_reg_state (&info
->debug_reg_state
);
501 /* Implementation of linux_target_ops method "delete_process". */
504 aarch64_linux_delete_process (struct arch_process_info
*info
)
509 /* Implementation of linux_target_ops method "linux_new_fork". */
512 aarch64_linux_new_fork (struct process_info
*parent
,
513 struct process_info
*child
)
515 /* These are allocated by linux_add_process. */
516 gdb_assert (parent
->priv
!= NULL
517 && parent
->priv
->arch_private
!= NULL
);
518 gdb_assert (child
->priv
!= NULL
519 && child
->priv
->arch_private
!= NULL
);
521 /* Linux kernel before 2.6.33 commit
522 72f674d203cd230426437cdcf7dd6f681dad8b0d
523 will inherit hardware debug registers from parent
524 on fork/vfork/clone. Newer Linux kernels create such tasks with
525 zeroed debug registers.
527 GDB core assumes the child inherits the watchpoints/hw
528 breakpoints of the parent, and will remove them all from the
529 forked off process. Copy the debug registers mirrors into the
530 new process so that all breakpoints and watchpoints can be
531 removed together. The debug registers mirror will become zeroed
532 in the end before detaching the forked off process, thus making
533 this compatible with older Linux kernels too. */
535 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
538 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
539 #define AARCH64_HWCAP_PACA (1 << 30)
541 /* Implementation of linux target ops method "low_arch_setup". */
544 aarch64_target::low_arch_setup ()
546 unsigned int machine
;
550 tid
= lwpid_of (current_thread
);
552 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
556 uint64_t vq
= aarch64_sve_get_vq (tid
);
557 unsigned long hwcap
= linux_get_hwcap (8);
558 bool pauth_p
= hwcap
& AARCH64_HWCAP_PACA
;
560 current_process ()->tdesc
= aarch64_linux_read_description (vq
, pauth_p
);
563 current_process ()->tdesc
= aarch32_linux_read_description ();
565 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread
));
568 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
571 aarch64_sve_regs_copy_to_regcache (struct regcache
*regcache
, const void *buf
)
573 return aarch64_sve_regs_copy_to_reg_buf (regcache
, buf
);
576 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
579 aarch64_sve_regs_copy_from_regcache (struct regcache
*regcache
, void *buf
)
581 return aarch64_sve_regs_copy_from_reg_buf (regcache
, buf
);
584 static struct regset_info aarch64_regsets
[] =
586 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
587 sizeof (struct user_pt_regs
), GENERAL_REGS
,
588 aarch64_fill_gregset
, aarch64_store_gregset
},
589 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_FPREGSET
,
590 sizeof (struct user_fpsimd_state
), FP_REGS
,
591 aarch64_fill_fpregset
, aarch64_store_fpregset
593 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
594 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
595 NULL
, aarch64_store_pauthregset
},
599 static struct regsets_info aarch64_regsets_info
=
601 aarch64_regsets
, /* regsets */
603 NULL
, /* disabled_regsets */
606 static struct regs_info regs_info_aarch64
=
608 NULL
, /* regset_bitmap */
610 &aarch64_regsets_info
,
613 static struct regset_info aarch64_sve_regsets
[] =
615 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_PRSTATUS
,
616 sizeof (struct user_pt_regs
), GENERAL_REGS
,
617 aarch64_fill_gregset
, aarch64_store_gregset
},
618 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_SVE
,
619 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ
, SVE_PT_REGS_SVE
), EXTENDED_REGS
,
620 aarch64_sve_regs_copy_from_regcache
, aarch64_sve_regs_copy_to_regcache
622 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_ARM_PAC_MASK
,
623 AARCH64_PAUTH_REGS_SIZE
, OPTIONAL_REGS
,
624 NULL
, aarch64_store_pauthregset
},
628 static struct regsets_info aarch64_sve_regsets_info
=
630 aarch64_sve_regsets
, /* regsets. */
631 0, /* num_regsets. */
632 NULL
, /* disabled_regsets. */
635 static struct regs_info regs_info_aarch64_sve
=
637 NULL
, /* regset_bitmap. */
639 &aarch64_sve_regsets_info
,
642 /* Implementation of linux target ops method "get_regs_info". */
645 aarch64_target::get_regs_info ()
647 if (!is_64bit_tdesc ())
648 return ®s_info_aarch32
;
651 return ®s_info_aarch64_sve
;
653 return ®s_info_aarch64
;
656 /* Implementation of linux_target_ops method "supports_tracepoints". */
659 aarch64_supports_tracepoints (void)
661 if (current_thread
== NULL
)
665 /* We don't support tracepoints on aarch32 now. */
666 return is_64bit_tdesc ();
670 /* Implementation of linux_target_ops method "get_thread_area". */
673 aarch64_get_thread_area (int lwpid
, CORE_ADDR
*addrp
)
678 iovec
.iov_base
= ®
;
679 iovec
.iov_len
= sizeof (reg
);
681 if (ptrace (PTRACE_GETREGSET
, lwpid
, NT_ARM_TLS
, &iovec
) != 0)
689 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
692 aarch64_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
694 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
700 collect_register_by_name (regcache
, "x8", &l_sysno
);
701 *sysno
= (int) l_sysno
;
704 collect_register_by_name (regcache
, "r7", sysno
);
707 /* List of condition codes that we need. */
709 enum aarch64_condition_codes
720 enum aarch64_operand_type
726 /* Representation of an operand. At this time, it only supports register
727 and immediate types. */
729 struct aarch64_operand
731 /* Type of the operand. */
732 enum aarch64_operand_type type
;
734 /* Value of the operand according to the type. */
738 struct aarch64_register reg
;
742 /* List of registers that we are currently using, we can add more here as
743 we need to use them. */
745 /* General purpose scratch registers (64 bit). */
746 static const struct aarch64_register x0
= { 0, 1 };
747 static const struct aarch64_register x1
= { 1, 1 };
748 static const struct aarch64_register x2
= { 2, 1 };
749 static const struct aarch64_register x3
= { 3, 1 };
750 static const struct aarch64_register x4
= { 4, 1 };
752 /* General purpose scratch registers (32 bit). */
753 static const struct aarch64_register w0
= { 0, 0 };
754 static const struct aarch64_register w2
= { 2, 0 };
756 /* Intra-procedure scratch registers. */
757 static const struct aarch64_register ip0
= { 16, 1 };
759 /* Special purpose registers. */
760 static const struct aarch64_register fp
= { 29, 1 };
761 static const struct aarch64_register lr
= { 30, 1 };
762 static const struct aarch64_register sp
= { 31, 1 };
763 static const struct aarch64_register xzr
= { 31, 1 };
765 /* Dynamically allocate a new register. If we know the register
766 statically, we should make it a global as above instead of using this
769 static struct aarch64_register
770 aarch64_register (unsigned num
, int is64
)
772 return (struct aarch64_register
) { num
, is64
};
775 /* Helper function to create a register operand, for instructions with
776 different types of operands.
779 p += emit_mov (p, x0, register_operand (x1)); */
781 static struct aarch64_operand
782 register_operand (struct aarch64_register reg
)
784 struct aarch64_operand operand
;
786 operand
.type
= OPERAND_REGISTER
;
792 /* Helper function to create an immediate operand, for instructions with
793 different types of operands.
796 p += emit_mov (p, x0, immediate_operand (12)); */
798 static struct aarch64_operand
799 immediate_operand (uint32_t imm
)
801 struct aarch64_operand operand
;
803 operand
.type
= OPERAND_IMMEDIATE
;
809 /* Helper function to create an offset memory operand.
812 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
814 static struct aarch64_memory_operand
815 offset_memory_operand (int32_t offset
)
817 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_OFFSET
, offset
};
820 /* Helper function to create a pre-index memory operand.
823 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
825 static struct aarch64_memory_operand
826 preindex_memory_operand (int32_t index
)
828 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_PREINDEX
, index
};
831 /* Helper function to create a post-index memory operand.
834 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
836 static struct aarch64_memory_operand
837 postindex_memory_operand (int32_t index
)
839 return (struct aarch64_memory_operand
) { MEMORY_OPERAND_POSTINDEX
, index
};
842 /* System control registers. These special registers can be written and
843 read with the MRS and MSR instructions.
845 - NZCV: Condition flags. GDB refers to this register under the CPSR
847 - FPSR: Floating-point status register.
848 - FPCR: Floating-point control registers.
849 - TPIDR_EL0: Software thread ID register. */
851 enum aarch64_system_control_registers
853 /* op0 op1 crn crm op2 */
854 NZCV
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
855 FPSR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
856 FPCR
= (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
857 TPIDR_EL0
= (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
860 /* Write a BLR instruction into *BUF.
864 RN is the register to branch to. */
867 emit_blr (uint32_t *buf
, struct aarch64_register rn
)
869 return aarch64_emit_insn (buf
, BLR
| ENCODE (rn
.num
, 5, 5));
872 /* Write a RET instruction into *BUF.
876 RN is the register to branch to. */
879 emit_ret (uint32_t *buf
, struct aarch64_register rn
)
881 return aarch64_emit_insn (buf
, RET
| ENCODE (rn
.num
, 5, 5));
885 emit_load_store_pair (uint32_t *buf
, enum aarch64_opcodes opcode
,
886 struct aarch64_register rt
,
887 struct aarch64_register rt2
,
888 struct aarch64_register rn
,
889 struct aarch64_memory_operand operand
)
896 opc
= ENCODE (2, 2, 30);
898 opc
= ENCODE (0, 2, 30);
900 switch (operand
.type
)
902 case MEMORY_OPERAND_OFFSET
:
904 pre_index
= ENCODE (1, 1, 24);
905 write_back
= ENCODE (0, 1, 23);
908 case MEMORY_OPERAND_POSTINDEX
:
910 pre_index
= ENCODE (0, 1, 24);
911 write_back
= ENCODE (1, 1, 23);
914 case MEMORY_OPERAND_PREINDEX
:
916 pre_index
= ENCODE (1, 1, 24);
917 write_back
= ENCODE (1, 1, 23);
924 return aarch64_emit_insn (buf
, opcode
| opc
| pre_index
| write_back
925 | ENCODE (operand
.index
>> 3, 7, 15)
926 | ENCODE (rt2
.num
, 5, 10)
927 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
930 /* Write a STP instruction into *BUF.
932 STP rt, rt2, [rn, #offset]
933 STP rt, rt2, [rn, #index]!
934 STP rt, rt2, [rn], #index
936 RT and RT2 are the registers to store.
937 RN is the base address register.
938 OFFSET is the immediate to add to the base address. It is limited to a
939 -512 .. 504 range (7 bits << 3). */
942 emit_stp (uint32_t *buf
, struct aarch64_register rt
,
943 struct aarch64_register rt2
, struct aarch64_register rn
,
944 struct aarch64_memory_operand operand
)
946 return emit_load_store_pair (buf
, STP
, rt
, rt2
, rn
, operand
);
949 /* Write a LDP instruction into *BUF.
951 LDP rt, rt2, [rn, #offset]
952 LDP rt, rt2, [rn, #index]!
953 LDP rt, rt2, [rn], #index
955 RT and RT2 are the registers to store.
956 RN is the base address register.
957 OFFSET is the immediate to add to the base address. It is limited to a
958 -512 .. 504 range (7 bits << 3). */
961 emit_ldp (uint32_t *buf
, struct aarch64_register rt
,
962 struct aarch64_register rt2
, struct aarch64_register rn
,
963 struct aarch64_memory_operand operand
)
965 return emit_load_store_pair (buf
, LDP
, rt
, rt2
, rn
, operand
);
968 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
970 LDP qt, qt2, [rn, #offset]
972 RT and RT2 are the Q registers to store.
973 RN is the base address register.
974 OFFSET is the immediate to add to the base address. It is limited to
975 -1024 .. 1008 range (7 bits << 4). */
978 emit_ldp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
979 struct aarch64_register rn
, int32_t offset
)
981 uint32_t opc
= ENCODE (2, 2, 30);
982 uint32_t pre_index
= ENCODE (1, 1, 24);
984 return aarch64_emit_insn (buf
, LDP_SIMD_VFP
| opc
| pre_index
985 | ENCODE (offset
>> 4, 7, 15)
986 | ENCODE (rt2
, 5, 10)
987 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
990 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
992 STP qt, qt2, [rn, #offset]
994 RT and RT2 are the Q registers to store.
995 RN is the base address register.
996 OFFSET is the immediate to add to the base address. It is limited to
997 -1024 .. 1008 range (7 bits << 4). */
1000 emit_stp_q_offset (uint32_t *buf
, unsigned rt
, unsigned rt2
,
1001 struct aarch64_register rn
, int32_t offset
)
1003 uint32_t opc
= ENCODE (2, 2, 30);
1004 uint32_t pre_index
= ENCODE (1, 1, 24);
1006 return aarch64_emit_insn (buf
, STP_SIMD_VFP
| opc
| pre_index
1007 | ENCODE (offset
>> 4, 7, 15)
1008 | ENCODE (rt2
, 5, 10)
1009 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
, 5, 0));
1012 /* Write a LDRH instruction into *BUF.
1014 LDRH wt, [xn, #offset]
1015 LDRH wt, [xn, #index]!
1016 LDRH wt, [xn], #index
1018 RT is the register to store.
1019 RN is the base address register.
1020 OFFSET is the immediate to add to the base address. It is limited to
1021 0 .. 32760 range (12 bits << 3). */
1024 emit_ldrh (uint32_t *buf
, struct aarch64_register rt
,
1025 struct aarch64_register rn
,
1026 struct aarch64_memory_operand operand
)
1028 return aarch64_emit_load_store (buf
, 1, LDR
, rt
, rn
, operand
);
1031 /* Write a LDRB instruction into *BUF.
1033 LDRB wt, [xn, #offset]
1034 LDRB wt, [xn, #index]!
1035 LDRB wt, [xn], #index
1037 RT is the register to store.
1038 RN is the base address register.
1039 OFFSET is the immediate to add to the base address. It is limited to
1040 0 .. 32760 range (12 bits << 3). */
1043 emit_ldrb (uint32_t *buf
, struct aarch64_register rt
,
1044 struct aarch64_register rn
,
1045 struct aarch64_memory_operand operand
)
1047 return aarch64_emit_load_store (buf
, 0, LDR
, rt
, rn
, operand
);
1052 /* Write a STR instruction into *BUF.
1054 STR rt, [rn, #offset]
1055 STR rt, [rn, #index]!
1056 STR rt, [rn], #index
1058 RT is the register to store.
1059 RN is the base address register.
1060 OFFSET is the immediate to add to the base address. It is limited to
1061 0 .. 32760 range (12 bits << 3). */
1064 emit_str (uint32_t *buf
, struct aarch64_register rt
,
1065 struct aarch64_register rn
,
1066 struct aarch64_memory_operand operand
)
1068 return aarch64_emit_load_store (buf
, rt
.is64
? 3 : 2, STR
, rt
, rn
, operand
);
1071 /* Helper function emitting an exclusive load or store instruction. */
1074 emit_load_store_exclusive (uint32_t *buf
, uint32_t size
,
1075 enum aarch64_opcodes opcode
,
1076 struct aarch64_register rs
,
1077 struct aarch64_register rt
,
1078 struct aarch64_register rt2
,
1079 struct aarch64_register rn
)
1081 return aarch64_emit_insn (buf
, opcode
| ENCODE (size
, 2, 30)
1082 | ENCODE (rs
.num
, 5, 16) | ENCODE (rt2
.num
, 5, 10)
1083 | ENCODE (rn
.num
, 5, 5) | ENCODE (rt
.num
, 5, 0));
1086 /* Write a LAXR instruction into *BUF.
1090 RT is the destination register.
1091 RN is the base address register. */
1094 emit_ldaxr (uint32_t *buf
, struct aarch64_register rt
,
1095 struct aarch64_register rn
)
1097 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, LDAXR
, xzr
, rt
,
1101 /* Write a STXR instruction into *BUF.
1105 RS is the result register, it indicates if the store succeeded or not.
1106 RT is the destination register.
1107 RN is the base address register. */
1110 emit_stxr (uint32_t *buf
, struct aarch64_register rs
,
1111 struct aarch64_register rt
, struct aarch64_register rn
)
1113 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STXR
, rs
, rt
,
1117 /* Write a STLR instruction into *BUF.
1121 RT is the register to store.
1122 RN is the base address register. */
1125 emit_stlr (uint32_t *buf
, struct aarch64_register rt
,
1126 struct aarch64_register rn
)
1128 return emit_load_store_exclusive (buf
, rt
.is64
? 3 : 2, STLR
, xzr
, rt
,
1132 /* Helper function for data processing instructions with register sources. */
1135 emit_data_processing_reg (uint32_t *buf
, uint32_t opcode
,
1136 struct aarch64_register rd
,
1137 struct aarch64_register rn
,
1138 struct aarch64_register rm
)
1140 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1142 return aarch64_emit_insn (buf
, opcode
| size
| ENCODE (rm
.num
, 5, 16)
1143 | ENCODE (rn
.num
, 5, 5) | ENCODE (rd
.num
, 5, 0));
1146 /* Helper function for data processing instructions taking either a register
1150 emit_data_processing (uint32_t *buf
, enum aarch64_opcodes opcode
,
1151 struct aarch64_register rd
,
1152 struct aarch64_register rn
,
1153 struct aarch64_operand operand
)
1155 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1156 /* The opcode is different for register and immediate source operands. */
1157 uint32_t operand_opcode
;
1159 if (operand
.type
== OPERAND_IMMEDIATE
)
1161 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1162 operand_opcode
= ENCODE (8, 4, 25);
1164 return aarch64_emit_insn (buf
, opcode
| operand_opcode
| size
1165 | ENCODE (operand
.imm
, 12, 10)
1166 | ENCODE (rn
.num
, 5, 5)
1167 | ENCODE (rd
.num
, 5, 0));
1171 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1172 operand_opcode
= ENCODE (5, 4, 25);
1174 return emit_data_processing_reg (buf
, opcode
| operand_opcode
, rd
,
1179 /* Write an ADD instruction into *BUF.
1184 This function handles both an immediate and register add.
1186 RD is the destination register.
1187 RN is the input register.
1188 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1189 OPERAND_REGISTER. */
1192 emit_add (uint32_t *buf
, struct aarch64_register rd
,
1193 struct aarch64_register rn
, struct aarch64_operand operand
)
1195 return emit_data_processing (buf
, ADD
, rd
, rn
, operand
);
1198 /* Write a SUB instruction into *BUF.
1203 This function handles both an immediate and register sub.
1205 RD is the destination register.
1206 RN is the input register.
1207 IMM is the immediate to substract to RN. */
1210 emit_sub (uint32_t *buf
, struct aarch64_register rd
,
1211 struct aarch64_register rn
, struct aarch64_operand operand
)
1213 return emit_data_processing (buf
, SUB
, rd
, rn
, operand
);
1216 /* Write a MOV instruction into *BUF.
1221 This function handles both a wide immediate move and a register move,
1222 with the condition that the source register is not xzr. xzr and the
1223 stack pointer share the same encoding and this function only supports
1226 RD is the destination register.
1227 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1228 OPERAND_REGISTER. */
1231 emit_mov (uint32_t *buf
, struct aarch64_register rd
,
1232 struct aarch64_operand operand
)
1234 if (operand
.type
== OPERAND_IMMEDIATE
)
1236 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1237 /* Do not shift the immediate. */
1238 uint32_t shift
= ENCODE (0, 2, 21);
1240 return aarch64_emit_insn (buf
, MOV
| size
| shift
1241 | ENCODE (operand
.imm
, 16, 5)
1242 | ENCODE (rd
.num
, 5, 0));
1245 return emit_add (buf
, rd
, operand
.reg
, immediate_operand (0));
1248 /* Write a MOVK instruction into *BUF.
1250 MOVK rd, #imm, lsl #shift
1252 RD is the destination register.
1253 IMM is the immediate.
1254 SHIFT is the logical shift left to apply to IMM. */
1257 emit_movk (uint32_t *buf
, struct aarch64_register rd
, uint32_t imm
,
1260 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1262 return aarch64_emit_insn (buf
, MOVK
| size
| ENCODE (shift
, 2, 21) |
1263 ENCODE (imm
, 16, 5) | ENCODE (rd
.num
, 5, 0));
1266 /* Write instructions into *BUF in order to move ADDR into a register.
1267 ADDR can be a 64-bit value.
1269 This function will emit a series of MOV and MOVK instructions, such as:
1272 MOVK xd, #(addr >> 16), lsl #16
1273 MOVK xd, #(addr >> 32), lsl #32
1274 MOVK xd, #(addr >> 48), lsl #48 */
1277 emit_mov_addr (uint32_t *buf
, struct aarch64_register rd
, CORE_ADDR addr
)
1281 /* The MOV (wide immediate) instruction clears to top bits of the
1283 p
+= emit_mov (p
, rd
, immediate_operand (addr
& 0xffff));
1285 if ((addr
>> 16) != 0)
1286 p
+= emit_movk (p
, rd
, (addr
>> 16) & 0xffff, 1);
1290 if ((addr
>> 32) != 0)
1291 p
+= emit_movk (p
, rd
, (addr
>> 32) & 0xffff, 2);
1295 if ((addr
>> 48) != 0)
1296 p
+= emit_movk (p
, rd
, (addr
>> 48) & 0xffff, 3);
1301 /* Write a SUBS instruction into *BUF.
1305 This instruction update the condition flags.
1307 RD is the destination register.
1308 RN and RM are the source registers. */
1311 emit_subs (uint32_t *buf
, struct aarch64_register rd
,
1312 struct aarch64_register rn
, struct aarch64_operand operand
)
1314 return emit_data_processing (buf
, SUBS
, rd
, rn
, operand
);
1317 /* Write a CMP instruction into *BUF.
1321 This instruction is an alias of SUBS xzr, rn, rm.
1323 RN and RM are the registers to compare. */
1326 emit_cmp (uint32_t *buf
, struct aarch64_register rn
,
1327 struct aarch64_operand operand
)
1329 return emit_subs (buf
, xzr
, rn
, operand
);
1332 /* Write a AND instruction into *BUF.
1336 RD is the destination register.
1337 RN and RM are the source registers. */
1340 emit_and (uint32_t *buf
, struct aarch64_register rd
,
1341 struct aarch64_register rn
, struct aarch64_register rm
)
1343 return emit_data_processing_reg (buf
, AND
, rd
, rn
, rm
);
1346 /* Write a ORR instruction into *BUF.
1350 RD is the destination register.
1351 RN and RM are the source registers. */
1354 emit_orr (uint32_t *buf
, struct aarch64_register rd
,
1355 struct aarch64_register rn
, struct aarch64_register rm
)
1357 return emit_data_processing_reg (buf
, ORR
, rd
, rn
, rm
);
1360 /* Write a ORN instruction into *BUF.
1364 RD is the destination register.
1365 RN and RM are the source registers. */
1368 emit_orn (uint32_t *buf
, struct aarch64_register rd
,
1369 struct aarch64_register rn
, struct aarch64_register rm
)
1371 return emit_data_processing_reg (buf
, ORN
, rd
, rn
, rm
);
1374 /* Write a EOR instruction into *BUF.
1378 RD is the destination register.
1379 RN and RM are the source registers. */
1382 emit_eor (uint32_t *buf
, struct aarch64_register rd
,
1383 struct aarch64_register rn
, struct aarch64_register rm
)
1385 return emit_data_processing_reg (buf
, EOR
, rd
, rn
, rm
);
1388 /* Write a MVN instruction into *BUF.
1392 This is an alias for ORN rd, xzr, rm.
1394 RD is the destination register.
1395 RM is the source register. */
1398 emit_mvn (uint32_t *buf
, struct aarch64_register rd
,
1399 struct aarch64_register rm
)
1401 return emit_orn (buf
, rd
, xzr
, rm
);
1404 /* Write a LSLV instruction into *BUF.
1408 RD is the destination register.
1409 RN and RM are the source registers. */
1412 emit_lslv (uint32_t *buf
, struct aarch64_register rd
,
1413 struct aarch64_register rn
, struct aarch64_register rm
)
1415 return emit_data_processing_reg (buf
, LSLV
, rd
, rn
, rm
);
1418 /* Write a LSRV instruction into *BUF.
1422 RD is the destination register.
1423 RN and RM are the source registers. */
1426 emit_lsrv (uint32_t *buf
, struct aarch64_register rd
,
1427 struct aarch64_register rn
, struct aarch64_register rm
)
1429 return emit_data_processing_reg (buf
, LSRV
, rd
, rn
, rm
);
1432 /* Write a ASRV instruction into *BUF.
1436 RD is the destination register.
1437 RN and RM are the source registers. */
1440 emit_asrv (uint32_t *buf
, struct aarch64_register rd
,
1441 struct aarch64_register rn
, struct aarch64_register rm
)
1443 return emit_data_processing_reg (buf
, ASRV
, rd
, rn
, rm
);
1446 /* Write a MUL instruction into *BUF.
1450 RD is the destination register.
1451 RN and RM are the source registers. */
1454 emit_mul (uint32_t *buf
, struct aarch64_register rd
,
1455 struct aarch64_register rn
, struct aarch64_register rm
)
1457 return emit_data_processing_reg (buf
, MUL
, rd
, rn
, rm
);
1460 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1464 RT is the destination register.
1465 SYSTEM_REG is special purpose register to read. */
1468 emit_mrs (uint32_t *buf
, struct aarch64_register rt
,
1469 enum aarch64_system_control_registers system_reg
)
1471 return aarch64_emit_insn (buf
, MRS
| ENCODE (system_reg
, 15, 5)
1472 | ENCODE (rt
.num
, 5, 0));
1475 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1479 SYSTEM_REG is special purpose register to write.
1480 RT is the input register. */
1483 emit_msr (uint32_t *buf
, enum aarch64_system_control_registers system_reg
,
1484 struct aarch64_register rt
)
1486 return aarch64_emit_insn (buf
, MSR
| ENCODE (system_reg
, 15, 5)
1487 | ENCODE (rt
.num
, 5, 0));
1490 /* Write a SEVL instruction into *BUF.
1492 This is a hint instruction telling the hardware to trigger an event. */
1495 emit_sevl (uint32_t *buf
)
1497 return aarch64_emit_insn (buf
, SEVL
);
1500 /* Write a WFE instruction into *BUF.
1502 This is a hint instruction telling the hardware to wait for an event. */
1505 emit_wfe (uint32_t *buf
)
1507 return aarch64_emit_insn (buf
, WFE
);
1510 /* Write a SBFM instruction into *BUF.
1512 SBFM rd, rn, #immr, #imms
1514 This instruction moves the bits from #immr to #imms into the
1515 destination, sign extending the result.
1517 RD is the destination register.
1518 RN is the source register.
1519 IMMR is the bit number to start at (least significant bit).
1520 IMMS is the bit number to stop at (most significant bit). */
1523 emit_sbfm (uint32_t *buf
, struct aarch64_register rd
,
1524 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1526 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1527 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1529 return aarch64_emit_insn (buf
, SBFM
| size
| n
| ENCODE (immr
, 6, 16)
1530 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1531 | ENCODE (rd
.num
, 5, 0));
1534 /* Write a SBFX instruction into *BUF.
1536 SBFX rd, rn, #lsb, #width
1538 This instruction moves #width bits from #lsb into the destination, sign
1539 extending the result. This is an alias for:
1541 SBFM rd, rn, #lsb, #(lsb + width - 1)
1543 RD is the destination register.
1544 RN is the source register.
1545 LSB is the bit number to start at (least significant bit).
1546 WIDTH is the number of bits to move. */
1549 emit_sbfx (uint32_t *buf
, struct aarch64_register rd
,
1550 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1552 return emit_sbfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1555 /* Write a UBFM instruction into *BUF.
1557 UBFM rd, rn, #immr, #imms
1559 This instruction moves the bits from #immr to #imms into the
1560 destination, extending the result with zeros.
1562 RD is the destination register.
1563 RN is the source register.
1564 IMMR is the bit number to start at (least significant bit).
1565 IMMS is the bit number to stop at (most significant bit). */
1568 emit_ubfm (uint32_t *buf
, struct aarch64_register rd
,
1569 struct aarch64_register rn
, uint32_t immr
, uint32_t imms
)
1571 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1572 uint32_t n
= ENCODE (rd
.is64
, 1, 22);
1574 return aarch64_emit_insn (buf
, UBFM
| size
| n
| ENCODE (immr
, 6, 16)
1575 | ENCODE (imms
, 6, 10) | ENCODE (rn
.num
, 5, 5)
1576 | ENCODE (rd
.num
, 5, 0));
1579 /* Write a UBFX instruction into *BUF.
1581 UBFX rd, rn, #lsb, #width
1583 This instruction moves #width bits from #lsb into the destination,
1584 extending the result with zeros. This is an alias for:
1586 UBFM rd, rn, #lsb, #(lsb + width - 1)
1588 RD is the destination register.
1589 RN is the source register.
1590 LSB is the bit number to start at (least significant bit).
1591 WIDTH is the number of bits to move. */
1594 emit_ubfx (uint32_t *buf
, struct aarch64_register rd
,
1595 struct aarch64_register rn
, uint32_t lsb
, uint32_t width
)
1597 return emit_ubfm (buf
, rd
, rn
, lsb
, lsb
+ width
- 1);
1600 /* Write a CSINC instruction into *BUF.
1602 CSINC rd, rn, rm, cond
1604 This instruction conditionally increments rn or rm and places the result
1605 in rd. rn is chosen is the condition is true.
1607 RD is the destination register.
1608 RN and RM are the source registers.
1609 COND is the encoded condition. */
1612 emit_csinc (uint32_t *buf
, struct aarch64_register rd
,
1613 struct aarch64_register rn
, struct aarch64_register rm
,
1616 uint32_t size
= ENCODE (rd
.is64
, 1, 31);
1618 return aarch64_emit_insn (buf
, CSINC
| size
| ENCODE (rm
.num
, 5, 16)
1619 | ENCODE (cond
, 4, 12) | ENCODE (rn
.num
, 5, 5)
1620 | ENCODE (rd
.num
, 5, 0));
1623 /* Write a CSET instruction into *BUF.
1627 This instruction conditionally write 1 or 0 in the destination register.
1628 1 is written if the condition is true. This is an alias for:
1630 CSINC rd, xzr, xzr, !cond
1632 Note that the condition needs to be inverted.
1634 RD is the destination register.
1635 RN and RM are the source registers.
1636 COND is the encoded condition. */
1639 emit_cset (uint32_t *buf
, struct aarch64_register rd
, unsigned cond
)
1641 /* The least significant bit of the condition needs toggling in order to
1643 return emit_csinc (buf
, rd
, xzr
, xzr
, cond
^ 0x1);
1646 /* Write LEN instructions from BUF into the inferior memory at *TO.
1648 Note instructions are always little endian on AArch64, unlike data. */
1651 append_insns (CORE_ADDR
*to
, size_t len
, const uint32_t *buf
)
1653 size_t byte_len
= len
* sizeof (uint32_t);
1654 #if (__BYTE_ORDER == __BIG_ENDIAN)
1655 uint32_t *le_buf
= (uint32_t *) xmalloc (byte_len
);
1658 for (i
= 0; i
< len
; i
++)
1659 le_buf
[i
] = htole32 (buf
[i
]);
1661 target_write_memory (*to
, (const unsigned char *) le_buf
, byte_len
);
1665 target_write_memory (*to
, (const unsigned char *) buf
, byte_len
);
1671 /* Sub-class of struct aarch64_insn_data, store information of
1672 instruction relocation for fast tracepoint. Visitor can
1673 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1674 the relocated instructions in buffer pointed by INSN_PTR. */
1676 struct aarch64_insn_relocation_data
1678 struct aarch64_insn_data base
;
1680 /* The new address the instruction is relocated to. */
1682 /* Pointer to the buffer of relocated instruction(s). */
1686 /* Implementation of aarch64_insn_visitor method "b". */
1689 aarch64_ftrace_insn_reloc_b (const int is_bl
, const int32_t offset
,
1690 struct aarch64_insn_data
*data
)
1692 struct aarch64_insn_relocation_data
*insn_reloc
1693 = (struct aarch64_insn_relocation_data
*) data
;
1695 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1697 if (can_encode_int32 (new_offset
, 28))
1698 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, is_bl
, new_offset
);
1701 /* Implementation of aarch64_insn_visitor method "b_cond". */
1704 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond
, const int32_t offset
,
1705 struct aarch64_insn_data
*data
)
1707 struct aarch64_insn_relocation_data
*insn_reloc
1708 = (struct aarch64_insn_relocation_data
*) data
;
1710 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1712 if (can_encode_int32 (new_offset
, 21))
1714 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
,
1717 else if (can_encode_int32 (new_offset
, 28))
1719 /* The offset is out of range for a conditional branch
1720 instruction but not for a unconditional branch. We can use
1721 the following instructions instead:
1723 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1724 B NOT_TAKEN ; Else jump over TAKEN and continue.
1731 insn_reloc
->insn_ptr
+= emit_bcond (insn_reloc
->insn_ptr
, cond
, 8);
1732 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1733 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1737 /* Implementation of aarch64_insn_visitor method "cb". */
1740 aarch64_ftrace_insn_reloc_cb (const int32_t offset
, const int is_cbnz
,
1741 const unsigned rn
, int is64
,
1742 struct aarch64_insn_data
*data
)
1744 struct aarch64_insn_relocation_data
*insn_reloc
1745 = (struct aarch64_insn_relocation_data
*) data
;
1747 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1749 if (can_encode_int32 (new_offset
, 21))
1751 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1752 aarch64_register (rn
, is64
), new_offset
);
1754 else if (can_encode_int32 (new_offset
, 28))
1756 /* The offset is out of range for a compare and branch
1757 instruction but not for a unconditional branch. We can use
1758 the following instructions instead:
1760 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1761 B NOT_TAKEN ; Else jump over TAKEN and continue.
1767 insn_reloc
->insn_ptr
+= emit_cb (insn_reloc
->insn_ptr
, is_cbnz
,
1768 aarch64_register (rn
, is64
), 8);
1769 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1770 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, new_offset
- 8);
1774 /* Implementation of aarch64_insn_visitor method "tb". */
1777 aarch64_ftrace_insn_reloc_tb (const int32_t offset
, int is_tbnz
,
1778 const unsigned rt
, unsigned bit
,
1779 struct aarch64_insn_data
*data
)
1781 struct aarch64_insn_relocation_data
*insn_reloc
1782 = (struct aarch64_insn_relocation_data
*) data
;
1784 = insn_reloc
->base
.insn_addr
- insn_reloc
->new_addr
+ offset
;
1786 if (can_encode_int32 (new_offset
, 16))
1788 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1789 aarch64_register (rt
, 1), new_offset
);
1791 else if (can_encode_int32 (new_offset
, 28))
1793 /* The offset is out of range for a test bit and branch
1794 instruction but not for a unconditional branch. We can use
1795 the following instructions instead:
1797 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1798 B NOT_TAKEN ; Else jump over TAKEN and continue.
1804 insn_reloc
->insn_ptr
+= emit_tb (insn_reloc
->insn_ptr
, is_tbnz
, bit
,
1805 aarch64_register (rt
, 1), 8);
1806 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0, 8);
1807 insn_reloc
->insn_ptr
+= emit_b (insn_reloc
->insn_ptr
, 0,
1812 /* Implementation of aarch64_insn_visitor method "adr". */
1815 aarch64_ftrace_insn_reloc_adr (const int32_t offset
, const unsigned rd
,
1817 struct aarch64_insn_data
*data
)
1819 struct aarch64_insn_relocation_data
*insn_reloc
1820 = (struct aarch64_insn_relocation_data
*) data
;
1821 /* We know exactly the address the ADR{P,} instruction will compute.
1822 We can just write it to the destination register. */
1823 CORE_ADDR address
= data
->insn_addr
+ offset
;
1827 /* Clear the lower 12 bits of the offset to get the 4K page. */
1828 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1829 aarch64_register (rd
, 1),
1833 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1834 aarch64_register (rd
, 1), address
);
1837 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1840 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset
, const int is_sw
,
1841 const unsigned rt
, const int is64
,
1842 struct aarch64_insn_data
*data
)
1844 struct aarch64_insn_relocation_data
*insn_reloc
1845 = (struct aarch64_insn_relocation_data
*) data
;
1846 CORE_ADDR address
= data
->insn_addr
+ offset
;
1848 insn_reloc
->insn_ptr
+= emit_mov_addr (insn_reloc
->insn_ptr
,
1849 aarch64_register (rt
, 1), address
);
1851 /* We know exactly what address to load from, and what register we
1854 MOV xd, #(oldloc + offset)
1855 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1858 LDR xd, [xd] ; or LDRSW xd, [xd]
1863 insn_reloc
->insn_ptr
+= emit_ldrsw (insn_reloc
->insn_ptr
,
1864 aarch64_register (rt
, 1),
1865 aarch64_register (rt
, 1),
1866 offset_memory_operand (0));
1868 insn_reloc
->insn_ptr
+= emit_ldr (insn_reloc
->insn_ptr
,
1869 aarch64_register (rt
, is64
),
1870 aarch64_register (rt
, 1),
1871 offset_memory_operand (0));
1874 /* Implementation of aarch64_insn_visitor method "others". */
1877 aarch64_ftrace_insn_reloc_others (const uint32_t insn
,
1878 struct aarch64_insn_data
*data
)
1880 struct aarch64_insn_relocation_data
*insn_reloc
1881 = (struct aarch64_insn_relocation_data
*) data
;
1883 /* The instruction is not PC relative. Just re-emit it at the new
1885 insn_reloc
->insn_ptr
+= aarch64_emit_insn (insn_reloc
->insn_ptr
, insn
);
1888 static const struct aarch64_insn_visitor visitor
=
1890 aarch64_ftrace_insn_reloc_b
,
1891 aarch64_ftrace_insn_reloc_b_cond
,
1892 aarch64_ftrace_insn_reloc_cb
,
1893 aarch64_ftrace_insn_reloc_tb
,
1894 aarch64_ftrace_insn_reloc_adr
,
1895 aarch64_ftrace_insn_reloc_ldr_literal
,
1896 aarch64_ftrace_insn_reloc_others
,
1899 /* Implementation of linux_target_ops method
1900 "install_fast_tracepoint_jump_pad". */
1903 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1905 CORE_ADDR collector
,
1908 CORE_ADDR
*jump_entry
,
1909 CORE_ADDR
*trampoline
,
1910 ULONGEST
*trampoline_size
,
1911 unsigned char *jjump_pad_insn
,
1912 ULONGEST
*jjump_pad_insn_size
,
1913 CORE_ADDR
*adjusted_insn_addr
,
1914 CORE_ADDR
*adjusted_insn_addr_end
,
1922 CORE_ADDR buildaddr
= *jump_entry
;
1923 struct aarch64_insn_relocation_data insn_data
;
1925 /* We need to save the current state on the stack both to restore it
1926 later and to collect register values when the tracepoint is hit.
1928 The saved registers are pushed in a layout that needs to be in sync
1929 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1930 the supply_fast_tracepoint_registers function will fill in the
1931 register cache from a pointer to saved registers on the stack we build
1934 For simplicity, we set the size of each cell on the stack to 16 bytes.
1935 This way one cell can hold any register type, from system registers
1936 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1937 has to be 16 bytes aligned anyway.
1939 Note that the CPSR register does not exist on AArch64. Instead we
1940 can access system bits describing the process state with the
1941 MRS/MSR instructions, namely the condition flags. We save them as
1942 if they are part of a CPSR register because that's how GDB
1943 interprets these system bits. At the moment, only the condition
1944 flags are saved in CPSR (NZCV).
1946 Stack layout, each cell is 16 bytes (descending):
1948 High *-------- SIMD&FP registers from 31 down to 0. --------*
1954 *---- General purpose registers from 30 down to 0. ----*
1960 *------------- Special purpose registers. -------------*
1963 | CPSR (NZCV) | 5 cells
1966 *------------- collecting_t object --------------------*
1967 | TPIDR_EL0 | struct tracepoint * |
1968 Low *------------------------------------------------------*
1970 After this stack is set up, we issue a call to the collector, passing
1971 it the saved registers at (SP + 16). */
1973 /* Push SIMD&FP registers on the stack:
1975 SUB sp, sp, #(32 * 16)
1977 STP q30, q31, [sp, #(30 * 16)]
1982 p
+= emit_sub (p
, sp
, sp
, immediate_operand (32 * 16));
1983 for (i
= 30; i
>= 0; i
-= 2)
1984 p
+= emit_stp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
1986 /* Push general purpose registers on the stack. Note that we do not need
1987 to push x31 as it represents the xzr register and not the stack
1988 pointer in a STR instruction.
1990 SUB sp, sp, #(31 * 16)
1992 STR x30, [sp, #(30 * 16)]
1997 p
+= emit_sub (p
, sp
, sp
, immediate_operand (31 * 16));
1998 for (i
= 30; i
>= 0; i
-= 1)
1999 p
+= emit_str (p
, aarch64_register (i
, 1), sp
,
2000 offset_memory_operand (i
* 16));
2002 /* Make space for 5 more cells.
2004 SUB sp, sp, #(5 * 16)
2007 p
+= emit_sub (p
, sp
, sp
, immediate_operand (5 * 16));
2012 ADD x4, sp, #((32 + 31 + 5) * 16)
2013 STR x4, [sp, #(4 * 16)]
2016 p
+= emit_add (p
, x4
, sp
, immediate_operand ((32 + 31 + 5) * 16));
2017 p
+= emit_str (p
, x4
, sp
, offset_memory_operand (4 * 16));
2019 /* Save PC (tracepoint address):
2024 STR x3, [sp, #(3 * 16)]
2028 p
+= emit_mov_addr (p
, x3
, tpaddr
);
2029 p
+= emit_str (p
, x3
, sp
, offset_memory_operand (3 * 16));
2031 /* Save CPSR (NZCV), FPSR and FPCR:
2037 STR x2, [sp, #(2 * 16)]
2038 STR x1, [sp, #(1 * 16)]
2039 STR x0, [sp, #(0 * 16)]
2042 p
+= emit_mrs (p
, x2
, NZCV
);
2043 p
+= emit_mrs (p
, x1
, FPSR
);
2044 p
+= emit_mrs (p
, x0
, FPCR
);
2045 p
+= emit_str (p
, x2
, sp
, offset_memory_operand (2 * 16));
2046 p
+= emit_str (p
, x1
, sp
, offset_memory_operand (1 * 16));
2047 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2049 /* Push the collecting_t object. It consist of the address of the
2050 tracepoint and an ID for the current thread. We get the latter by
2051 reading the tpidr_el0 system register. It corresponds to the
2052 NT_ARM_TLS register accessible with ptrace.
2059 STP x0, x1, [sp, #-16]!
2063 p
+= emit_mov_addr (p
, x0
, tpoint
);
2064 p
+= emit_mrs (p
, x1
, TPIDR_EL0
);
2065 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-16));
2069 The shared memory for the lock is at lockaddr. It will hold zero
2070 if no-one is holding the lock, otherwise it contains the address of
2071 the collecting_t object on the stack of the thread which acquired it.
2073 At this stage, the stack pointer points to this thread's collecting_t
2076 We use the following registers:
2077 - x0: Address of the lock.
2078 - x1: Pointer to collecting_t object.
2079 - x2: Scratch register.
2085 ; Trigger an event local to this core. So the following WFE
2086 ; instruction is ignored.
2089 ; Wait for an event. The event is triggered by either the SEVL
2090 ; or STLR instructions (store release).
2093 ; Atomically read at lockaddr. This marks the memory location as
2094 ; exclusive. This instruction also has memory constraints which
2095 ; make sure all previous data reads and writes are done before
2099 ; Try again if another thread holds the lock.
2102 ; We can lock it! Write the address of the collecting_t object.
2103 ; This instruction will fail if the memory location is not marked
2104 ; as exclusive anymore. If it succeeds, it will remove the
2105 ; exclusive mark on the memory location. This way, if another
2106 ; thread executes this instruction before us, we will fail and try
2113 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2114 p
+= emit_mov (p
, x1
, register_operand (sp
));
2118 p
+= emit_ldaxr (p
, x2
, x0
);
2119 p
+= emit_cb (p
, 1, w2
, -2 * 4);
2120 p
+= emit_stxr (p
, w2
, x1
, x0
);
2121 p
+= emit_cb (p
, 1, x2
, -4 * 4);
2123 /* Call collector (struct tracepoint *, unsigned char *):
2128 ; Saved registers start after the collecting_t object.
2131 ; We use an intra-procedure-call scratch register.
2132 MOV ip0, #(collector)
2135 ; And call back to C!
2140 p
+= emit_mov_addr (p
, x0
, tpoint
);
2141 p
+= emit_add (p
, x1
, sp
, immediate_operand (16));
2143 p
+= emit_mov_addr (p
, ip0
, collector
);
2144 p
+= emit_blr (p
, ip0
);
2146 /* Release the lock.
2151 ; This instruction is a normal store with memory ordering
2152 ; constraints. Thanks to this we do not have to put a data
2153 ; barrier instruction to make sure all data read and writes are done
2154 ; before this instruction is executed. Furthermore, this instruction
2155 ; will trigger an event, letting other threads know they can grab
2160 p
+= emit_mov_addr (p
, x0
, lockaddr
);
2161 p
+= emit_stlr (p
, xzr
, x0
);
2163 /* Free collecting_t object:
2168 p
+= emit_add (p
, sp
, sp
, immediate_operand (16));
2170 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2171 registers from the stack.
2173 LDR x2, [sp, #(2 * 16)]
2174 LDR x1, [sp, #(1 * 16)]
2175 LDR x0, [sp, #(0 * 16)]
2181 ADD sp, sp #(5 * 16)
2184 p
+= emit_ldr (p
, x2
, sp
, offset_memory_operand (2 * 16));
2185 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (1 * 16));
2186 p
+= emit_ldr (p
, x0
, sp
, offset_memory_operand (0 * 16));
2187 p
+= emit_msr (p
, NZCV
, x2
);
2188 p
+= emit_msr (p
, FPSR
, x1
);
2189 p
+= emit_msr (p
, FPCR
, x0
);
2191 p
+= emit_add (p
, sp
, sp
, immediate_operand (5 * 16));
2193 /* Pop general purpose registers:
2197 LDR x30, [sp, #(30 * 16)]
2199 ADD sp, sp, #(31 * 16)
2202 for (i
= 0; i
<= 30; i
+= 1)
2203 p
+= emit_ldr (p
, aarch64_register (i
, 1), sp
,
2204 offset_memory_operand (i
* 16));
2205 p
+= emit_add (p
, sp
, sp
, immediate_operand (31 * 16));
2207 /* Pop SIMD&FP registers:
2211 LDP q30, q31, [sp, #(30 * 16)]
2213 ADD sp, sp, #(32 * 16)
2216 for (i
= 0; i
<= 30; i
+= 2)
2217 p
+= emit_ldp_q_offset (p
, i
, i
+ 1, sp
, i
* 16);
2218 p
+= emit_add (p
, sp
, sp
, immediate_operand (32 * 16));
2220 /* Write the code into the inferior memory. */
2221 append_insns (&buildaddr
, p
- buf
, buf
);
2223 /* Now emit the relocated instruction. */
2224 *adjusted_insn_addr
= buildaddr
;
2225 target_read_uint32 (tpaddr
, &insn
);
2227 insn_data
.base
.insn_addr
= tpaddr
;
2228 insn_data
.new_addr
= buildaddr
;
2229 insn_data
.insn_ptr
= buf
;
2231 aarch64_relocate_instruction (insn
, &visitor
,
2232 (struct aarch64_insn_data
*) &insn_data
);
2234 /* We may not have been able to relocate the instruction. */
2235 if (insn_data
.insn_ptr
== buf
)
2238 "E.Could not relocate instruction from %s to %s.",
2239 core_addr_to_string_nz (tpaddr
),
2240 core_addr_to_string_nz (buildaddr
));
2244 append_insns (&buildaddr
, insn_data
.insn_ptr
- buf
, buf
);
2245 *adjusted_insn_addr_end
= buildaddr
;
2247 /* Go back to the start of the buffer. */
2250 /* Emit a branch back from the jump pad. */
2251 offset
= (tpaddr
+ orig_size
- buildaddr
);
2252 if (!can_encode_int32 (offset
, 28))
2255 "E.Jump back from jump pad too far from tracepoint "
2256 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2261 p
+= emit_b (p
, 0, offset
);
2262 append_insns (&buildaddr
, p
- buf
, buf
);
2264 /* Give the caller a branch instruction into the jump pad. */
2265 offset
= (*jump_entry
- tpaddr
);
2266 if (!can_encode_int32 (offset
, 28))
2269 "E.Jump pad too far from tracepoint "
2270 "(offset 0x%" PRIx64
" cannot be encoded in 28 bits).",
2275 emit_b ((uint32_t *) jjump_pad_insn
, 0, offset
);
2276 *jjump_pad_insn_size
= 4;
2278 /* Return the end address of our pad. */
2279 *jump_entry
= buildaddr
;
2284 /* Helper function writing LEN instructions from START into
2285 current_insn_ptr. */
2288 emit_ops_insns (const uint32_t *start
, int len
)
2290 CORE_ADDR buildaddr
= current_insn_ptr
;
2293 debug_printf ("Adding %d instrucions at %s\n",
2294 len
, paddress (buildaddr
));
2296 append_insns (&buildaddr
, len
, start
);
2297 current_insn_ptr
= buildaddr
;
2300 /* Pop a register from the stack. */
2303 emit_pop (uint32_t *buf
, struct aarch64_register rt
)
2305 return emit_ldr (buf
, rt
, sp
, postindex_memory_operand (1 * 16));
2308 /* Push a register on the stack. */
2311 emit_push (uint32_t *buf
, struct aarch64_register rt
)
2313 return emit_str (buf
, rt
, sp
, preindex_memory_operand (-1 * 16));
2316 /* Implementation of emit_ops method "emit_prologue". */
2319 aarch64_emit_prologue (void)
2324 /* This function emit a prologue for the following function prototype:
2326 enum eval_result_type f (unsigned char *regs,
2329 The first argument is a buffer of raw registers. The second
2330 argument is the result of
2331 evaluating the expression, which will be set to whatever is on top of
2332 the stack at the end.
2334 The stack set up by the prologue is as such:
2336 High *------------------------------------------------------*
2339 | x1 (ULONGEST *value) |
2340 | x0 (unsigned char *regs) |
2341 Low *------------------------------------------------------*
2343 As we are implementing a stack machine, each opcode can expand the
2344 stack so we never know how far we are from the data saved by this
2345 prologue. In order to be able refer to value and regs later, we save
2346 the current stack pointer in the frame pointer. This way, it is not
2347 clobbered when calling C functions.
2349 Finally, throughout every operation, we are using register x0 as the
2350 top of the stack, and x1 as a scratch register. */
2352 p
+= emit_stp (p
, x0
, x1
, sp
, preindex_memory_operand (-2 * 16));
2353 p
+= emit_str (p
, lr
, sp
, offset_memory_operand (3 * 8));
2354 p
+= emit_str (p
, fp
, sp
, offset_memory_operand (2 * 8));
2356 p
+= emit_add (p
, fp
, sp
, immediate_operand (2 * 8));
2359 emit_ops_insns (buf
, p
- buf
);
2362 /* Implementation of emit_ops method "emit_epilogue". */
2365 aarch64_emit_epilogue (void)
2370 /* Store the result of the expression (x0) in *value. */
2371 p
+= emit_sub (p
, x1
, fp
, immediate_operand (1 * 8));
2372 p
+= emit_ldr (p
, x1
, x1
, offset_memory_operand (0));
2373 p
+= emit_str (p
, x0
, x1
, offset_memory_operand (0));
2375 /* Restore the previous state. */
2376 p
+= emit_add (p
, sp
, fp
, immediate_operand (2 * 8));
2377 p
+= emit_ldp (p
, fp
, lr
, fp
, offset_memory_operand (0));
2379 /* Return expr_eval_no_error. */
2380 p
+= emit_mov (p
, x0
, immediate_operand (expr_eval_no_error
));
2381 p
+= emit_ret (p
, lr
);
2383 emit_ops_insns (buf
, p
- buf
);
2386 /* Implementation of emit_ops method "emit_add". */
2389 aarch64_emit_add (void)
2394 p
+= emit_pop (p
, x1
);
2395 p
+= emit_add (p
, x0
, x1
, register_operand (x0
));
2397 emit_ops_insns (buf
, p
- buf
);
2400 /* Implementation of emit_ops method "emit_sub". */
2403 aarch64_emit_sub (void)
2408 p
+= emit_pop (p
, x1
);
2409 p
+= emit_sub (p
, x0
, x1
, register_operand (x0
));
2411 emit_ops_insns (buf
, p
- buf
);
2414 /* Implementation of emit_ops method "emit_mul". */
2417 aarch64_emit_mul (void)
2422 p
+= emit_pop (p
, x1
);
2423 p
+= emit_mul (p
, x0
, x1
, x0
);
2425 emit_ops_insns (buf
, p
- buf
);
2428 /* Implementation of emit_ops method "emit_lsh". */
2431 aarch64_emit_lsh (void)
2436 p
+= emit_pop (p
, x1
);
2437 p
+= emit_lslv (p
, x0
, x1
, x0
);
2439 emit_ops_insns (buf
, p
- buf
);
2442 /* Implementation of emit_ops method "emit_rsh_signed". */
2445 aarch64_emit_rsh_signed (void)
2450 p
+= emit_pop (p
, x1
);
2451 p
+= emit_asrv (p
, x0
, x1
, x0
);
2453 emit_ops_insns (buf
, p
- buf
);
2456 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2459 aarch64_emit_rsh_unsigned (void)
2464 p
+= emit_pop (p
, x1
);
2465 p
+= emit_lsrv (p
, x0
, x1
, x0
);
2467 emit_ops_insns (buf
, p
- buf
);
2470 /* Implementation of emit_ops method "emit_ext". */
2473 aarch64_emit_ext (int arg
)
2478 p
+= emit_sbfx (p
, x0
, x0
, 0, arg
);
2480 emit_ops_insns (buf
, p
- buf
);
2483 /* Implementation of emit_ops method "emit_log_not". */
2486 aarch64_emit_log_not (void)
2491 /* If the top of the stack is 0, replace it with 1. Else replace it with
2494 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2495 p
+= emit_cset (p
, x0
, EQ
);
2497 emit_ops_insns (buf
, p
- buf
);
2500 /* Implementation of emit_ops method "emit_bit_and". */
2503 aarch64_emit_bit_and (void)
2508 p
+= emit_pop (p
, x1
);
2509 p
+= emit_and (p
, x0
, x0
, x1
);
2511 emit_ops_insns (buf
, p
- buf
);
2514 /* Implementation of emit_ops method "emit_bit_or". */
2517 aarch64_emit_bit_or (void)
2522 p
+= emit_pop (p
, x1
);
2523 p
+= emit_orr (p
, x0
, x0
, x1
);
2525 emit_ops_insns (buf
, p
- buf
);
2528 /* Implementation of emit_ops method "emit_bit_xor". */
2531 aarch64_emit_bit_xor (void)
2536 p
+= emit_pop (p
, x1
);
2537 p
+= emit_eor (p
, x0
, x0
, x1
);
2539 emit_ops_insns (buf
, p
- buf
);
2542 /* Implementation of emit_ops method "emit_bit_not". */
2545 aarch64_emit_bit_not (void)
2550 p
+= emit_mvn (p
, x0
, x0
);
2552 emit_ops_insns (buf
, p
- buf
);
2555 /* Implementation of emit_ops method "emit_equal". */
2558 aarch64_emit_equal (void)
2563 p
+= emit_pop (p
, x1
);
2564 p
+= emit_cmp (p
, x0
, register_operand (x1
));
2565 p
+= emit_cset (p
, x0
, EQ
);
2567 emit_ops_insns (buf
, p
- buf
);
2570 /* Implementation of emit_ops method "emit_less_signed". */
2573 aarch64_emit_less_signed (void)
2578 p
+= emit_pop (p
, x1
);
2579 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2580 p
+= emit_cset (p
, x0
, LT
);
2582 emit_ops_insns (buf
, p
- buf
);
2585 /* Implementation of emit_ops method "emit_less_unsigned". */
2588 aarch64_emit_less_unsigned (void)
2593 p
+= emit_pop (p
, x1
);
2594 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2595 p
+= emit_cset (p
, x0
, LO
);
2597 emit_ops_insns (buf
, p
- buf
);
2600 /* Implementation of emit_ops method "emit_ref". */
2603 aarch64_emit_ref (int size
)
2611 p
+= emit_ldrb (p
, w0
, x0
, offset_memory_operand (0));
2614 p
+= emit_ldrh (p
, w0
, x0
, offset_memory_operand (0));
2617 p
+= emit_ldr (p
, w0
, x0
, offset_memory_operand (0));
2620 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2623 /* Unknown size, bail on compilation. */
2628 emit_ops_insns (buf
, p
- buf
);
2631 /* Implementation of emit_ops method "emit_if_goto". */
2634 aarch64_emit_if_goto (int *offset_p
, int *size_p
)
2639 /* The Z flag is set or cleared here. */
2640 p
+= emit_cmp (p
, x0
, immediate_operand (0));
2641 /* This instruction must not change the Z flag. */
2642 p
+= emit_pop (p
, x0
);
2643 /* Branch over the next instruction if x0 == 0. */
2644 p
+= emit_bcond (p
, EQ
, 8);
2646 /* The NOP instruction will be patched with an unconditional branch. */
2648 *offset_p
= (p
- buf
) * 4;
2653 emit_ops_insns (buf
, p
- buf
);
2656 /* Implementation of emit_ops method "emit_goto". */
2659 aarch64_emit_goto (int *offset_p
, int *size_p
)
2664 /* The NOP instruction will be patched with an unconditional branch. */
2671 emit_ops_insns (buf
, p
- buf
);
2674 /* Implementation of emit_ops method "write_goto_address". */
2677 aarch64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2681 emit_b (&insn
, 0, to
- from
);
2682 append_insns (&from
, 1, &insn
);
2685 /* Implementation of emit_ops method "emit_const". */
2688 aarch64_emit_const (LONGEST num
)
2693 p
+= emit_mov_addr (p
, x0
, num
);
2695 emit_ops_insns (buf
, p
- buf
);
2698 /* Implementation of emit_ops method "emit_call". */
2701 aarch64_emit_call (CORE_ADDR fn
)
2706 p
+= emit_mov_addr (p
, ip0
, fn
);
2707 p
+= emit_blr (p
, ip0
);
2709 emit_ops_insns (buf
, p
- buf
);
2712 /* Implementation of emit_ops method "emit_reg". */
2715 aarch64_emit_reg (int reg
)
2720 /* Set x0 to unsigned char *regs. */
2721 p
+= emit_sub (p
, x0
, fp
, immediate_operand (2 * 8));
2722 p
+= emit_ldr (p
, x0
, x0
, offset_memory_operand (0));
2723 p
+= emit_mov (p
, x1
, immediate_operand (reg
));
2725 emit_ops_insns (buf
, p
- buf
);
2727 aarch64_emit_call (get_raw_reg_func_addr ());
2730 /* Implementation of emit_ops method "emit_pop". */
2733 aarch64_emit_pop (void)
2738 p
+= emit_pop (p
, x0
);
2740 emit_ops_insns (buf
, p
- buf
);
2743 /* Implementation of emit_ops method "emit_stack_flush". */
2746 aarch64_emit_stack_flush (void)
2751 p
+= emit_push (p
, x0
);
2753 emit_ops_insns (buf
, p
- buf
);
2756 /* Implementation of emit_ops method "emit_zero_ext". */
2759 aarch64_emit_zero_ext (int arg
)
2764 p
+= emit_ubfx (p
, x0
, x0
, 0, arg
);
2766 emit_ops_insns (buf
, p
- buf
);
2769 /* Implementation of emit_ops method "emit_swap". */
2772 aarch64_emit_swap (void)
2777 p
+= emit_ldr (p
, x1
, sp
, offset_memory_operand (0 * 16));
2778 p
+= emit_str (p
, x0
, sp
, offset_memory_operand (0 * 16));
2779 p
+= emit_mov (p
, x0
, register_operand (x1
));
2781 emit_ops_insns (buf
, p
- buf
);
2784 /* Implementation of emit_ops method "emit_stack_adjust". */
2787 aarch64_emit_stack_adjust (int n
)
2789 /* This is not needed with our design. */
2793 p
+= emit_add (p
, sp
, sp
, immediate_operand (n
* 16));
2795 emit_ops_insns (buf
, p
- buf
);
2798 /* Implementation of emit_ops method "emit_int_call_1". */
2801 aarch64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2806 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2808 emit_ops_insns (buf
, p
- buf
);
2810 aarch64_emit_call (fn
);
2813 /* Implementation of emit_ops method "emit_void_call_2". */
2816 aarch64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2821 /* Push x0 on the stack. */
2822 aarch64_emit_stack_flush ();
2824 /* Setup arguments for the function call:
2827 x1: top of the stack
2832 p
+= emit_mov (p
, x1
, register_operand (x0
));
2833 p
+= emit_mov (p
, x0
, immediate_operand (arg1
));
2835 emit_ops_insns (buf
, p
- buf
);
2837 aarch64_emit_call (fn
);
2840 aarch64_emit_pop ();
2843 /* Implementation of emit_ops method "emit_eq_goto". */
2846 aarch64_emit_eq_goto (int *offset_p
, int *size_p
)
2851 p
+= emit_pop (p
, x1
);
2852 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2853 /* Branch over the next instruction if x0 != x1. */
2854 p
+= emit_bcond (p
, NE
, 8);
2855 /* The NOP instruction will be patched with an unconditional branch. */
2857 *offset_p
= (p
- buf
) * 4;
2862 emit_ops_insns (buf
, p
- buf
);
2865 /* Implementation of emit_ops method "emit_ne_goto". */
2868 aarch64_emit_ne_goto (int *offset_p
, int *size_p
)
2873 p
+= emit_pop (p
, x1
);
2874 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2875 /* Branch over the next instruction if x0 == x1. */
2876 p
+= emit_bcond (p
, EQ
, 8);
2877 /* The NOP instruction will be patched with an unconditional branch. */
2879 *offset_p
= (p
- buf
) * 4;
2884 emit_ops_insns (buf
, p
- buf
);
2887 /* Implementation of emit_ops method "emit_lt_goto". */
2890 aarch64_emit_lt_goto (int *offset_p
, int *size_p
)
2895 p
+= emit_pop (p
, x1
);
2896 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2897 /* Branch over the next instruction if x0 >= x1. */
2898 p
+= emit_bcond (p
, GE
, 8);
2899 /* The NOP instruction will be patched with an unconditional branch. */
2901 *offset_p
= (p
- buf
) * 4;
2906 emit_ops_insns (buf
, p
- buf
);
2909 /* Implementation of emit_ops method "emit_le_goto". */
2912 aarch64_emit_le_goto (int *offset_p
, int *size_p
)
2917 p
+= emit_pop (p
, x1
);
2918 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2919 /* Branch over the next instruction if x0 > x1. */
2920 p
+= emit_bcond (p
, GT
, 8);
2921 /* The NOP instruction will be patched with an unconditional branch. */
2923 *offset_p
= (p
- buf
) * 4;
2928 emit_ops_insns (buf
, p
- buf
);
2931 /* Implementation of emit_ops method "emit_gt_goto". */
2934 aarch64_emit_gt_goto (int *offset_p
, int *size_p
)
2939 p
+= emit_pop (p
, x1
);
2940 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2941 /* Branch over the next instruction if x0 <= x1. */
2942 p
+= emit_bcond (p
, LE
, 8);
2943 /* The NOP instruction will be patched with an unconditional branch. */
2945 *offset_p
= (p
- buf
) * 4;
2950 emit_ops_insns (buf
, p
- buf
);
2953 /* Implementation of emit_ops method "emit_ge_got". */
2956 aarch64_emit_ge_got (int *offset_p
, int *size_p
)
2961 p
+= emit_pop (p
, x1
);
2962 p
+= emit_cmp (p
, x1
, register_operand (x0
));
2963 /* Branch over the next instruction if x0 <= x1. */
2964 p
+= emit_bcond (p
, LT
, 8);
2965 /* The NOP instruction will be patched with an unconditional branch. */
2967 *offset_p
= (p
- buf
) * 4;
2972 emit_ops_insns (buf
, p
- buf
);
2975 static struct emit_ops aarch64_emit_ops_impl
=
2977 aarch64_emit_prologue
,
2978 aarch64_emit_epilogue
,
2983 aarch64_emit_rsh_signed
,
2984 aarch64_emit_rsh_unsigned
,
2986 aarch64_emit_log_not
,
2987 aarch64_emit_bit_and
,
2988 aarch64_emit_bit_or
,
2989 aarch64_emit_bit_xor
,
2990 aarch64_emit_bit_not
,
2992 aarch64_emit_less_signed
,
2993 aarch64_emit_less_unsigned
,
2995 aarch64_emit_if_goto
,
2997 aarch64_write_goto_address
,
3002 aarch64_emit_stack_flush
,
3003 aarch64_emit_zero_ext
,
3005 aarch64_emit_stack_adjust
,
3006 aarch64_emit_int_call_1
,
3007 aarch64_emit_void_call_2
,
3008 aarch64_emit_eq_goto
,
3009 aarch64_emit_ne_goto
,
3010 aarch64_emit_lt_goto
,
3011 aarch64_emit_le_goto
,
3012 aarch64_emit_gt_goto
,
3013 aarch64_emit_ge_got
,
3016 /* Implementation of linux_target_ops method "emit_ops". */
3018 static struct emit_ops
*
3019 aarch64_emit_ops (void)
3021 return &aarch64_emit_ops_impl
;
3024 /* Implementation of linux_target_ops method
3025 "get_min_fast_tracepoint_insn_len". */
3028 aarch64_get_min_fast_tracepoint_insn_len (void)
3033 /* Implementation of linux_target_ops method "supports_range_stepping". */
3036 aarch64_supports_range_stepping (void)
3041 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3043 static const gdb_byte
*
3044 aarch64_sw_breakpoint_from_kind (int kind
, int *size
)
3046 if (is_64bit_tdesc ())
3048 *size
= aarch64_breakpoint_len
;
3049 return aarch64_breakpoint
;
3052 return arm_sw_breakpoint_from_kind (kind
, size
);
3055 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3058 aarch64_breakpoint_kind_from_pc (CORE_ADDR
*pcptr
)
3060 if (is_64bit_tdesc ())
3061 return aarch64_breakpoint_len
;
3063 return arm_breakpoint_kind_from_pc (pcptr
);
3066 /* Implementation of the linux_target_ops method
3067 "breakpoint_kind_from_current_state". */
3070 aarch64_breakpoint_kind_from_current_state (CORE_ADDR
*pcptr
)
3072 if (is_64bit_tdesc ())
3073 return aarch64_breakpoint_len
;
3075 return arm_breakpoint_kind_from_current_state (pcptr
);
3078 /* Support for hardware single step. */
3081 aarch64_supports_hardware_single_step (void)
3086 struct linux_target_ops the_low_target
=
3090 aarch64_breakpoint_kind_from_pc
,
3091 aarch64_sw_breakpoint_from_kind
,
3092 NULL
, /* get_next_pcs */
3093 0, /* decr_pc_after_break */
3094 aarch64_breakpoint_at
,
3095 aarch64_supports_z_point_type
,
3096 aarch64_insert_point
,
3097 aarch64_remove_point
,
3098 aarch64_stopped_by_watchpoint
,
3099 aarch64_stopped_data_address
,
3100 NULL
, /* collect_ptrace_register */
3101 NULL
, /* supply_ptrace_register */
3102 aarch64_linux_siginfo_fixup
,
3103 aarch64_linux_new_process
,
3104 aarch64_linux_delete_process
,
3105 aarch64_linux_new_thread
,
3106 aarch64_linux_delete_thread
,
3107 aarch64_linux_new_fork
,
3108 aarch64_linux_prepare_to_resume
,
3109 NULL
, /* process_qsupported */
3110 aarch64_supports_tracepoints
,
3111 aarch64_get_thread_area
,
3112 aarch64_install_fast_tracepoint_jump_pad
,
3114 aarch64_get_min_fast_tracepoint_insn_len
,
3115 aarch64_supports_range_stepping
,
3116 aarch64_breakpoint_kind_from_current_state
,
3117 aarch64_supports_hardware_single_step
,
3118 aarch64_get_syscall_trapinfo
,
3121 /* The linux target ops object. */
3123 linux_process_target
*the_linux_target
= &the_aarch64_target
;
3126 initialize_low_arch (void)
3128 initialize_low_arch_aarch32 ();
3130 initialize_regsets_info (&aarch64_regsets_info
);
3131 initialize_regsets_info (&aarch64_sve_regsets_info
);