]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-aarch64-low.c
Replace write_inferior_memory with target_write_memory
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-aarch64-low.c
CommitLineData
176eb98c
MS
1/* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
42a4f53d 4 Copyright (C) 2009-2019 Free Software Foundation, Inc.
176eb98c
MS
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "server.h"
23#include "linux-low.h"
db3cb7cb 24#include "nat/aarch64-linux.h"
554717a3 25#include "nat/aarch64-linux-hw-point.h"
bb903df0 26#include "arch/aarch64-insn.h"
3b53ae99 27#include "linux-aarch32-low.h"
176eb98c 28#include "elf/common.h"
afbe19f8
PL
29#include "ax.h"
30#include "tracepoint.h"
176eb98c
MS
31
32#include <signal.h>
33#include <sys/user.h>
5826e159 34#include "nat/gdb_ptrace.h"
e9dae05e 35#include <asm/ptrace.h>
bb903df0
PL
36#include <inttypes.h>
37#include <endian.h>
38#include <sys/uio.h>
176eb98c
MS
39
40#include "gdb_proc_service.h"
cc628f3d 41#include "arch/aarch64.h"
7cc17433 42#include "linux-aarch32-tdesc.h"
d6d7ce56 43#include "linux-aarch64-tdesc.h"
fefa175e 44#include "nat/aarch64-sve-linux-ptrace.h"
02895270 45#include "tdesc.h"
176eb98c 46
176eb98c
MS
47#ifdef HAVE_SYS_REG_H
48#include <sys/reg.h>
49#endif
50
176eb98c
MS
51/* Per-process arch-specific data we want to keep. */
52
53struct arch_process_info
54{
55 /* Hardware breakpoint/watchpoint data.
56 The reason for them to be per-process rather than per-thread is
57 due to the lack of information in the gdbserver environment;
58 gdbserver is not told that whether a requested hardware
59 breakpoint/watchpoint is thread specific or not, so it has to set
60 each hw bp/wp for every thread in the current process. The
61 higher level bp/wp management in gdb will resume a thread if a hw
62 bp/wp trap is not expected for it. Since the hw bp/wp setting is
63 same for each thread, it is reasonable for the data to live here.
64 */
65 struct aarch64_debug_reg_state debug_reg_state;
66};
67
3b53ae99
YQ
68/* Return true if the size of register 0 is 8 byte. */
69
70static int
71is_64bit_tdesc (void)
72{
73 struct regcache *regcache = get_thread_regcache (current_thread, 0);
74
75 return register_size (regcache->tdesc, 0) == 8;
76}
77
02895270
AH
78/* Return true if the regcache contains the number of SVE registers. */
79
80static bool
81is_sve_tdesc (void)
82{
83 struct regcache *regcache = get_thread_regcache (current_thread, 0);
84
85 return regcache->tdesc->reg_defs.size () == AARCH64_SVE_NUM_REGS;
86}
87
176eb98c
MS
88static void
89aarch64_fill_gregset (struct regcache *regcache, void *buf)
90{
6a69a054 91 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
176eb98c
MS
92 int i;
93
94 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
95 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
96 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
97 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
98 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
99}
100
101static void
102aarch64_store_gregset (struct regcache *regcache, const void *buf)
103{
6a69a054 104 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
176eb98c
MS
105 int i;
106
107 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
cc628f3d
AH
108 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
109 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
110 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
111 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
176eb98c
MS
112}
113
114static void
115aarch64_fill_fpregset (struct regcache *regcache, void *buf)
116{
9caa3311 117 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
176eb98c
MS
118 int i;
119
120 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
121 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
122 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
123 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
124}
125
126static void
127aarch64_store_fpregset (struct regcache *regcache, const void *buf)
128{
9caa3311
YQ
129 const struct user_fpsimd_state *regset
130 = (const struct user_fpsimd_state *) buf;
176eb98c
MS
131 int i;
132
133 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
cc628f3d
AH
134 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
135 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
136 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
176eb98c
MS
137}
138
1ef53e6b
AH
139/* Store the pauth registers to regcache. */
140
141static void
142aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
143{
144 uint64_t *pauth_regset = (uint64_t *) buf;
145 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
146
147 if (pauth_base == 0)
148 return;
149
150 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
151 &pauth_regset[0]);
152 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
153 &pauth_regset[1]);
154}
155
176eb98c
MS
156/* Enable miscellaneous debugging output. The name is historical - it
157 was originally used to debug LinuxThreads support. */
158extern int debug_threads;
159
421530db
PL
160/* Implementation of linux_target_ops method "get_pc". */
161
176eb98c
MS
162static CORE_ADDR
163aarch64_get_pc (struct regcache *regcache)
164{
8a7e4587 165 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 166 return linux_get_pc_64bit (regcache);
8a7e4587 167 else
a5652c21 168 return linux_get_pc_32bit (regcache);
176eb98c
MS
169}
170
421530db
PL
171/* Implementation of linux_target_ops method "set_pc". */
172
176eb98c
MS
173static void
174aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
175{
8a7e4587 176 if (register_size (regcache->tdesc, 0) == 8)
a5652c21 177 linux_set_pc_64bit (regcache, pc);
8a7e4587 178 else
a5652c21 179 linux_set_pc_32bit (regcache, pc);
176eb98c
MS
180}
181
176eb98c
MS
182#define aarch64_breakpoint_len 4
183
37d66942
PL
184/* AArch64 BRK software debug mode instruction.
185 This instruction needs to match gdb/aarch64-tdep.c
186 (aarch64_default_breakpoint). */
187static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
176eb98c 188
421530db
PL
189/* Implementation of linux_target_ops method "breakpoint_at". */
190
176eb98c
MS
191static int
192aarch64_breakpoint_at (CORE_ADDR where)
193{
db91f502
YQ
194 if (is_64bit_tdesc ())
195 {
196 gdb_byte insn[aarch64_breakpoint_len];
176eb98c 197
db91f502
YQ
198 (*the_target->read_memory) (where, (unsigned char *) &insn,
199 aarch64_breakpoint_len);
200 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
201 return 1;
176eb98c 202
db91f502
YQ
203 return 0;
204 }
205 else
206 return arm_breakpoint_at (where);
176eb98c
MS
207}
208
176eb98c
MS
209static void
210aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
211{
212 int i;
213
214 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
215 {
216 state->dr_addr_bp[i] = 0;
217 state->dr_ctrl_bp[i] = 0;
218 state->dr_ref_count_bp[i] = 0;
219 }
220
221 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
222 {
223 state->dr_addr_wp[i] = 0;
224 state->dr_ctrl_wp[i] = 0;
225 state->dr_ref_count_wp[i] = 0;
226 }
227}
228
176eb98c
MS
229/* Return the pointer to the debug register state structure in the
230 current process' arch-specific data area. */
231
db3cb7cb 232struct aarch64_debug_reg_state *
88e2cf7e 233aarch64_get_debug_reg_state (pid_t pid)
176eb98c 234{
88e2cf7e 235 struct process_info *proc = find_process_pid (pid);
176eb98c 236
fe978cb0 237 return &proc->priv->arch_private->debug_reg_state;
176eb98c
MS
238}
239
421530db
PL
240/* Implementation of linux_target_ops method "supports_z_point_type". */
241
4ff0d3d8
PA
242static int
243aarch64_supports_z_point_type (char z_type)
244{
245 switch (z_type)
246 {
96c97461 247 case Z_PACKET_SW_BP:
4ff0d3d8
PA
248 case Z_PACKET_HW_BP:
249 case Z_PACKET_WRITE_WP:
250 case Z_PACKET_READ_WP:
251 case Z_PACKET_ACCESS_WP:
252 return 1;
253 default:
4ff0d3d8
PA
254 return 0;
255 }
256}
257
421530db 258/* Implementation of linux_target_ops method "insert_point".
176eb98c 259
421530db
PL
260 It actually only records the info of the to-be-inserted bp/wp;
261 the actual insertion will happen when threads are resumed. */
176eb98c
MS
262
263static int
802e8e6d
PA
264aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
265 int len, struct raw_breakpoint *bp)
176eb98c
MS
266{
267 int ret;
4ff0d3d8 268 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
269 struct aarch64_debug_reg_state *state
270 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 271
c5e92cca 272 if (show_debug_regs)
176eb98c
MS
273 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
274 (unsigned long) addr, len);
275
802e8e6d
PA
276 /* Determine the type from the raw breakpoint type. */
277 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
278
279 if (targ_type != hw_execute)
39edd165
YQ
280 {
281 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
282 ret = aarch64_handle_watchpoint (targ_type, addr, len,
283 1 /* is_insert */, state);
284 else
285 ret = -1;
286 }
176eb98c 287 else
8d689ee5
YQ
288 {
289 if (len == 3)
290 {
291 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
292 instruction. Set it to 2 to correctly encode length bit
293 mask in hardware/watchpoint control register. */
294 len = 2;
295 }
296 ret = aarch64_handle_breakpoint (targ_type, addr, len,
297 1 /* is_insert */, state);
298 }
176eb98c 299
60a191ed 300 if (show_debug_regs)
88e2cf7e
YQ
301 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
302 targ_type);
176eb98c
MS
303
304 return ret;
305}
306
421530db 307/* Implementation of linux_target_ops method "remove_point".
176eb98c 308
421530db
PL
309 It actually only records the info of the to-be-removed bp/wp,
310 the actual removal will be done when threads are resumed. */
176eb98c
MS
311
312static int
802e8e6d
PA
313aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
314 int len, struct raw_breakpoint *bp)
176eb98c
MS
315{
316 int ret;
4ff0d3d8 317 enum target_hw_bp_type targ_type;
88e2cf7e
YQ
318 struct aarch64_debug_reg_state *state
319 = aarch64_get_debug_reg_state (pid_of (current_thread));
4ff0d3d8 320
c5e92cca 321 if (show_debug_regs)
176eb98c
MS
322 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
323 (unsigned long) addr, len);
324
802e8e6d
PA
325 /* Determine the type from the raw breakpoint type. */
326 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
176eb98c
MS
327
328 /* Set up state pointers. */
329 if (targ_type != hw_execute)
330 ret =
c67ca4de
YQ
331 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
332 state);
176eb98c 333 else
8d689ee5
YQ
334 {
335 if (len == 3)
336 {
337 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
338 instruction. Set it to 2 to correctly encode length bit
339 mask in hardware/watchpoint control register. */
340 len = 2;
341 }
342 ret = aarch64_handle_breakpoint (targ_type, addr, len,
343 0 /* is_insert */, state);
344 }
176eb98c 345
60a191ed 346 if (show_debug_regs)
88e2cf7e
YQ
347 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
348 targ_type);
176eb98c
MS
349
350 return ret;
351}
352
421530db 353/* Implementation of linux_target_ops method "stopped_data_address". */
176eb98c
MS
354
355static CORE_ADDR
356aarch64_stopped_data_address (void)
357{
358 siginfo_t siginfo;
359 int pid, i;
360 struct aarch64_debug_reg_state *state;
361
0bfdf32f 362 pid = lwpid_of (current_thread);
176eb98c
MS
363
364 /* Get the siginfo. */
365 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
366 return (CORE_ADDR) 0;
367
368 /* Need to be a hardware breakpoint/watchpoint trap. */
369 if (siginfo.si_signo != SIGTRAP
370 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
371 return (CORE_ADDR) 0;
372
373 /* Check if the address matches any watched address. */
88e2cf7e 374 state = aarch64_get_debug_reg_state (pid_of (current_thread));
176eb98c
MS
375 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
376 {
a3b60e45
JK
377 const unsigned int offset
378 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
176eb98c
MS
379 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
380 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
a3b60e45
JK
381 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
382 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
383 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
384
176eb98c
MS
385 if (state->dr_ref_count_wp[i]
386 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
a3b60e45 387 && addr_trap >= addr_watch_aligned
176eb98c 388 && addr_trap < addr_watch + len)
a3b60e45
JK
389 {
390 /* ADDR_TRAP reports the first address of the memory range
391 accessed by the CPU, regardless of what was the memory
392 range watched. Thus, a large CPU access that straddles
393 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
394 ADDR_TRAP that is lower than the
395 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
396
397 addr: | 4 | 5 | 6 | 7 | 8 |
398 |---- range watched ----|
399 |----------- range accessed ------------|
400
401 In this case, ADDR_TRAP will be 4.
402
403 To match a watchpoint known to GDB core, we must never
404 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
405 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
406 positive on kernels older than 4.10. See PR
407 external/20207. */
408 return addr_orig;
409 }
176eb98c
MS
410 }
411
412 return (CORE_ADDR) 0;
413}
414
421530db 415/* Implementation of linux_target_ops method "stopped_by_watchpoint". */
176eb98c
MS
416
417static int
418aarch64_stopped_by_watchpoint (void)
419{
420 if (aarch64_stopped_data_address () != 0)
421 return 1;
422 else
423 return 0;
424}
425
426/* Fetch the thread-local storage pointer for libthread_db. */
427
428ps_err_e
754653a7 429ps_get_thread_area (struct ps_prochandle *ph,
176eb98c
MS
430 lwpid_t lwpid, int idx, void **base)
431{
a0cc84cd
YQ
432 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
433 is_64bit_tdesc ());
176eb98c
MS
434}
435
ade90bde
YQ
436/* Implementation of linux_target_ops method "siginfo_fixup". */
437
438static int
8adce034 439aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
ade90bde
YQ
440{
441 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
442 if (!is_64bit_tdesc ())
443 {
444 if (direction == 0)
445 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
446 native);
447 else
448 aarch64_siginfo_from_compat_siginfo (native,
449 (struct compat_siginfo *) inf);
450
451 return 1;
452 }
453
454 return 0;
455}
456
04ec7890 457/* Implementation of linux_target_ops method "new_process". */
176eb98c
MS
458
459static struct arch_process_info *
460aarch64_linux_new_process (void)
461{
8d749320 462 struct arch_process_info *info = XCNEW (struct arch_process_info);
176eb98c
MS
463
464 aarch64_init_debug_reg_state (&info->debug_reg_state);
465
466 return info;
467}
468
04ec7890
SM
469/* Implementation of linux_target_ops method "delete_process". */
470
471static void
472aarch64_linux_delete_process (struct arch_process_info *info)
473{
474 xfree (info);
475}
476
421530db
PL
477/* Implementation of linux_target_ops method "linux_new_fork". */
478
3a8a0396
DB
479static void
480aarch64_linux_new_fork (struct process_info *parent,
481 struct process_info *child)
482{
483 /* These are allocated by linux_add_process. */
61a7418c
DB
484 gdb_assert (parent->priv != NULL
485 && parent->priv->arch_private != NULL);
486 gdb_assert (child->priv != NULL
487 && child->priv->arch_private != NULL);
3a8a0396
DB
488
489 /* Linux kernel before 2.6.33 commit
490 72f674d203cd230426437cdcf7dd6f681dad8b0d
491 will inherit hardware debug registers from parent
492 on fork/vfork/clone. Newer Linux kernels create such tasks with
493 zeroed debug registers.
494
495 GDB core assumes the child inherits the watchpoints/hw
496 breakpoints of the parent, and will remove them all from the
497 forked off process. Copy the debug registers mirrors into the
498 new process so that all breakpoints and watchpoints can be
499 removed together. The debug registers mirror will become zeroed
500 in the end before detaching the forked off process, thus making
501 this compatible with older Linux kernels too. */
502
61a7418c 503 *child->priv->arch_private = *parent->priv->arch_private;
3a8a0396
DB
504}
505
ee4fbcfa
AH
506/* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
507#define AARCH64_HWCAP_PACA (1 << 30)
508
d6d7ce56 509/* Implementation of linux_target_ops method "arch_setup". */
3b53ae99 510
d6d7ce56
AH
511static void
512aarch64_arch_setup (void)
3b53ae99
YQ
513{
514 unsigned int machine;
515 int is_elf64;
516 int tid;
517
518 tid = lwpid_of (current_thread);
519
520 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
521
522 if (is_elf64)
fefa175e
AH
523 {
524 uint64_t vq = aarch64_sve_get_vq (tid);
974c89e0
AH
525 unsigned long hwcap = linux_get_hwcap (8);
526 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
ee4fbcfa
AH
527
528 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p);
fefa175e 529 }
3b53ae99 530 else
7cc17433 531 current_process ()->tdesc = aarch32_linux_read_description ();
176eb98c 532
af1b22f3 533 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
176eb98c
MS
534}
535
02895270
AH
536/* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
537
538static void
539aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
540{
541 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
542}
543
544/* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
545
546static void
547aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
548{
549 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
550}
551
3aee8918 552static struct regset_info aarch64_regsets[] =
176eb98c
MS
553{
554 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
555 sizeof (struct user_pt_regs), GENERAL_REGS,
556 aarch64_fill_gregset, aarch64_store_gregset },
557 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
558 sizeof (struct user_fpsimd_state), FP_REGS,
559 aarch64_fill_fpregset, aarch64_store_fpregset
560 },
1ef53e6b
AH
561 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
562 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
563 NULL, aarch64_store_pauthregset },
50bc912a 564 NULL_REGSET
176eb98c
MS
565};
566
3aee8918
PA
567static struct regsets_info aarch64_regsets_info =
568 {
569 aarch64_regsets, /* regsets */
570 0, /* num_regsets */
571 NULL, /* disabled_regsets */
572 };
573
3b53ae99 574static struct regs_info regs_info_aarch64 =
3aee8918
PA
575 {
576 NULL, /* regset_bitmap */
c2d65f38 577 NULL, /* usrregs */
3aee8918
PA
578 &aarch64_regsets_info,
579 };
580
02895270
AH
581static struct regset_info aarch64_sve_regsets[] =
582{
583 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
584 sizeof (struct user_pt_regs), GENERAL_REGS,
585 aarch64_fill_gregset, aarch64_store_gregset },
586 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
587 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
588 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
589 },
1ef53e6b
AH
590 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
591 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
592 NULL, aarch64_store_pauthregset },
02895270
AH
593 NULL_REGSET
594};
595
596static struct regsets_info aarch64_sve_regsets_info =
597 {
598 aarch64_sve_regsets, /* regsets. */
599 0, /* num_regsets. */
600 NULL, /* disabled_regsets. */
601 };
602
603static struct regs_info regs_info_aarch64_sve =
604 {
605 NULL, /* regset_bitmap. */
606 NULL, /* usrregs. */
607 &aarch64_sve_regsets_info,
608 };
609
421530db
PL
610/* Implementation of linux_target_ops method "regs_info". */
611
3aee8918
PA
612static const struct regs_info *
613aarch64_regs_info (void)
614{
02895270 615 if (!is_64bit_tdesc ())
3b53ae99 616 return &regs_info_aarch32;
02895270
AH
617
618 if (is_sve_tdesc ())
619 return &regs_info_aarch64_sve;
620
621 return &regs_info_aarch64;
3aee8918
PA
622}
623
7671bf47
PL
624/* Implementation of linux_target_ops method "supports_tracepoints". */
625
626static int
627aarch64_supports_tracepoints (void)
628{
524b57e6
YQ
629 if (current_thread == NULL)
630 return 1;
631 else
632 {
633 /* We don't support tracepoints on aarch32 now. */
634 return is_64bit_tdesc ();
635 }
7671bf47
PL
636}
637
bb903df0
PL
638/* Implementation of linux_target_ops method "get_thread_area". */
639
640static int
641aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
642{
643 struct iovec iovec;
644 uint64_t reg;
645
646 iovec.iov_base = &reg;
647 iovec.iov_len = sizeof (reg);
648
649 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
650 return -1;
651
652 *addrp = reg;
653
654 return 0;
655}
656
061fc021
YQ
657/* Implementation of linux_target_ops method "get_syscall_trapinfo". */
658
659static void
660aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
661{
662 int use_64bit = register_size (regcache->tdesc, 0) == 8;
663
664 if (use_64bit)
665 {
666 long l_sysno;
667
668 collect_register_by_name (regcache, "x8", &l_sysno);
669 *sysno = (int) l_sysno;
670 }
671 else
672 collect_register_by_name (regcache, "r7", sysno);
673}
674
afbe19f8
PL
675/* List of condition codes that we need. */
676
677enum aarch64_condition_codes
678{
679 EQ = 0x0,
680 NE = 0x1,
681 LO = 0x3,
682 GE = 0xa,
683 LT = 0xb,
684 GT = 0xc,
685 LE = 0xd,
bb903df0
PL
686};
687
6c1c9a8b
YQ
688enum aarch64_operand_type
689{
690 OPERAND_IMMEDIATE,
691 OPERAND_REGISTER,
692};
693
bb903df0
PL
694/* Representation of an operand. At this time, it only supports register
695 and immediate types. */
696
697struct aarch64_operand
698{
699 /* Type of the operand. */
6c1c9a8b
YQ
700 enum aarch64_operand_type type;
701
bb903df0
PL
702 /* Value of the operand according to the type. */
703 union
704 {
705 uint32_t imm;
706 struct aarch64_register reg;
707 };
708};
709
710/* List of registers that we are currently using, we can add more here as
711 we need to use them. */
712
713/* General purpose scratch registers (64 bit). */
714static const struct aarch64_register x0 = { 0, 1 };
715static const struct aarch64_register x1 = { 1, 1 };
716static const struct aarch64_register x2 = { 2, 1 };
717static const struct aarch64_register x3 = { 3, 1 };
718static const struct aarch64_register x4 = { 4, 1 };
719
720/* General purpose scratch registers (32 bit). */
afbe19f8 721static const struct aarch64_register w0 = { 0, 0 };
bb903df0
PL
722static const struct aarch64_register w2 = { 2, 0 };
723
724/* Intra-procedure scratch registers. */
725static const struct aarch64_register ip0 = { 16, 1 };
726
727/* Special purpose registers. */
afbe19f8
PL
728static const struct aarch64_register fp = { 29, 1 };
729static const struct aarch64_register lr = { 30, 1 };
bb903df0
PL
730static const struct aarch64_register sp = { 31, 1 };
731static const struct aarch64_register xzr = { 31, 1 };
732
733/* Dynamically allocate a new register. If we know the register
734 statically, we should make it a global as above instead of using this
735 helper function. */
736
737static struct aarch64_register
738aarch64_register (unsigned num, int is64)
739{
740 return (struct aarch64_register) { num, is64 };
741}
742
743/* Helper function to create a register operand, for instructions with
744 different types of operands.
745
746 For example:
747 p += emit_mov (p, x0, register_operand (x1)); */
748
749static struct aarch64_operand
750register_operand (struct aarch64_register reg)
751{
752 struct aarch64_operand operand;
753
754 operand.type = OPERAND_REGISTER;
755 operand.reg = reg;
756
757 return operand;
758}
759
760/* Helper function to create an immediate operand, for instructions with
761 different types of operands.
762
763 For example:
764 p += emit_mov (p, x0, immediate_operand (12)); */
765
766static struct aarch64_operand
767immediate_operand (uint32_t imm)
768{
769 struct aarch64_operand operand;
770
771 operand.type = OPERAND_IMMEDIATE;
772 operand.imm = imm;
773
774 return operand;
775}
776
bb903df0
PL
777/* Helper function to create an offset memory operand.
778
779 For example:
780 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
781
782static struct aarch64_memory_operand
783offset_memory_operand (int32_t offset)
784{
785 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
786}
787
788/* Helper function to create a pre-index memory operand.
789
790 For example:
791 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
792
793static struct aarch64_memory_operand
794preindex_memory_operand (int32_t index)
795{
796 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
797}
798
afbe19f8
PL
799/* Helper function to create a post-index memory operand.
800
801 For example:
802 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
803
804static struct aarch64_memory_operand
805postindex_memory_operand (int32_t index)
806{
807 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
808}
809
bb903df0
PL
810/* System control registers. These special registers can be written and
811 read with the MRS and MSR instructions.
812
813 - NZCV: Condition flags. GDB refers to this register under the CPSR
814 name.
815 - FPSR: Floating-point status register.
816 - FPCR: Floating-point control registers.
817 - TPIDR_EL0: Software thread ID register. */
818
819enum aarch64_system_control_registers
820{
821 /* op0 op1 crn crm op2 */
822 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
823 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
824 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
825 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
826};
827
bb903df0
PL
828/* Write a BLR instruction into *BUF.
829
830 BLR rn
831
832 RN is the register to branch to. */
833
834static int
835emit_blr (uint32_t *buf, struct aarch64_register rn)
836{
e1c587c3 837 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
bb903df0
PL
838}
839
afbe19f8 840/* Write a RET instruction into *BUF.
bb903df0 841
afbe19f8 842 RET xn
bb903df0 843
afbe19f8 844 RN is the register to branch to. */
bb903df0
PL
845
846static int
afbe19f8
PL
847emit_ret (uint32_t *buf, struct aarch64_register rn)
848{
e1c587c3 849 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
afbe19f8
PL
850}
851
852static int
853emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
854 struct aarch64_register rt,
855 struct aarch64_register rt2,
856 struct aarch64_register rn,
857 struct aarch64_memory_operand operand)
bb903df0
PL
858{
859 uint32_t opc;
860 uint32_t pre_index;
861 uint32_t write_back;
862
863 if (rt.is64)
864 opc = ENCODE (2, 2, 30);
865 else
866 opc = ENCODE (0, 2, 30);
867
868 switch (operand.type)
869 {
870 case MEMORY_OPERAND_OFFSET:
871 {
872 pre_index = ENCODE (1, 1, 24);
873 write_back = ENCODE (0, 1, 23);
874 break;
875 }
afbe19f8
PL
876 case MEMORY_OPERAND_POSTINDEX:
877 {
878 pre_index = ENCODE (0, 1, 24);
879 write_back = ENCODE (1, 1, 23);
880 break;
881 }
bb903df0
PL
882 case MEMORY_OPERAND_PREINDEX:
883 {
884 pre_index = ENCODE (1, 1, 24);
885 write_back = ENCODE (1, 1, 23);
886 break;
887 }
888 default:
889 return 0;
890 }
891
e1c587c3
YQ
892 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
893 | ENCODE (operand.index >> 3, 7, 15)
894 | ENCODE (rt2.num, 5, 10)
895 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
896}
897
afbe19f8
PL
898/* Write a STP instruction into *BUF.
899
900 STP rt, rt2, [rn, #offset]
901 STP rt, rt2, [rn, #index]!
902 STP rt, rt2, [rn], #index
903
904 RT and RT2 are the registers to store.
905 RN is the base address register.
906 OFFSET is the immediate to add to the base address. It is limited to a
907 -512 .. 504 range (7 bits << 3). */
908
909static int
910emit_stp (uint32_t *buf, struct aarch64_register rt,
911 struct aarch64_register rt2, struct aarch64_register rn,
912 struct aarch64_memory_operand operand)
913{
914 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
915}
916
917/* Write a LDP instruction into *BUF.
918
919 LDP rt, rt2, [rn, #offset]
920 LDP rt, rt2, [rn, #index]!
921 LDP rt, rt2, [rn], #index
922
923 RT and RT2 are the registers to store.
924 RN is the base address register.
925 OFFSET is the immediate to add to the base address. It is limited to a
926 -512 .. 504 range (7 bits << 3). */
927
928static int
929emit_ldp (uint32_t *buf, struct aarch64_register rt,
930 struct aarch64_register rt2, struct aarch64_register rn,
931 struct aarch64_memory_operand operand)
932{
933 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
934}
935
bb903df0
PL
936/* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
937
938 LDP qt, qt2, [rn, #offset]
939
940 RT and RT2 are the Q registers to store.
941 RN is the base address register.
942 OFFSET is the immediate to add to the base address. It is limited to
943 -1024 .. 1008 range (7 bits << 4). */
944
945static int
946emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
947 struct aarch64_register rn, int32_t offset)
948{
949 uint32_t opc = ENCODE (2, 2, 30);
950 uint32_t pre_index = ENCODE (1, 1, 24);
951
e1c587c3
YQ
952 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
953 | ENCODE (offset >> 4, 7, 15)
954 | ENCODE (rt2, 5, 10)
955 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
956}
957
958/* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
959
960 STP qt, qt2, [rn, #offset]
961
962 RT and RT2 are the Q registers to store.
963 RN is the base address register.
964 OFFSET is the immediate to add to the base address. It is limited to
965 -1024 .. 1008 range (7 bits << 4). */
966
967static int
968emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
969 struct aarch64_register rn, int32_t offset)
970{
971 uint32_t opc = ENCODE (2, 2, 30);
972 uint32_t pre_index = ENCODE (1, 1, 24);
973
e1c587c3 974 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
b6542f81
YQ
975 | ENCODE (offset >> 4, 7, 15)
976 | ENCODE (rt2, 5, 10)
977 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
bb903df0
PL
978}
979
afbe19f8
PL
980/* Write a LDRH instruction into *BUF.
981
982 LDRH wt, [xn, #offset]
983 LDRH wt, [xn, #index]!
984 LDRH wt, [xn], #index
985
986 RT is the register to store.
987 RN is the base address register.
988 OFFSET is the immediate to add to the base address. It is limited to
989 0 .. 32760 range (12 bits << 3). */
990
991static int
992emit_ldrh (uint32_t *buf, struct aarch64_register rt,
993 struct aarch64_register rn,
994 struct aarch64_memory_operand operand)
995{
1c2e1515 996 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
afbe19f8
PL
997}
998
999/* Write a LDRB instruction into *BUF.
1000
1001 LDRB wt, [xn, #offset]
1002 LDRB wt, [xn, #index]!
1003 LDRB wt, [xn], #index
1004
1005 RT is the register to store.
1006 RN is the base address register.
1007 OFFSET is the immediate to add to the base address. It is limited to
1008 0 .. 32760 range (12 bits << 3). */
1009
1010static int
1011emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1012 struct aarch64_register rn,
1013 struct aarch64_memory_operand operand)
1014{
1c2e1515 1015 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
afbe19f8
PL
1016}
1017
bb903df0 1018
bb903df0
PL
1019
1020/* Write a STR instruction into *BUF.
1021
1022 STR rt, [rn, #offset]
1023 STR rt, [rn, #index]!
afbe19f8 1024 STR rt, [rn], #index
bb903df0
PL
1025
1026 RT is the register to store.
1027 RN is the base address register.
1028 OFFSET is the immediate to add to the base address. It is limited to
1029 0 .. 32760 range (12 bits << 3). */
1030
1031static int
1032emit_str (uint32_t *buf, struct aarch64_register rt,
1033 struct aarch64_register rn,
1034 struct aarch64_memory_operand operand)
1035{
1c2e1515 1036 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
bb903df0
PL
1037}
1038
1039/* Helper function emitting an exclusive load or store instruction. */
1040
1041static int
1042emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1043 enum aarch64_opcodes opcode,
1044 struct aarch64_register rs,
1045 struct aarch64_register rt,
1046 struct aarch64_register rt2,
1047 struct aarch64_register rn)
1048{
e1c587c3
YQ
1049 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1050 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1051 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
bb903df0
PL
1052}
1053
1054/* Write a LAXR instruction into *BUF.
1055
1056 LDAXR rt, [xn]
1057
1058 RT is the destination register.
1059 RN is the base address register. */
1060
1061static int
1062emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1063 struct aarch64_register rn)
1064{
1065 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1066 xzr, rn);
1067}
1068
1069/* Write a STXR instruction into *BUF.
1070
1071 STXR ws, rt, [xn]
1072
1073 RS is the result register, it indicates if the store succeeded or not.
1074 RT is the destination register.
1075 RN is the base address register. */
1076
1077static int
1078emit_stxr (uint32_t *buf, struct aarch64_register rs,
1079 struct aarch64_register rt, struct aarch64_register rn)
1080{
1081 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1082 xzr, rn);
1083}
1084
1085/* Write a STLR instruction into *BUF.
1086
1087 STLR rt, [xn]
1088
1089 RT is the register to store.
1090 RN is the base address register. */
1091
1092static int
1093emit_stlr (uint32_t *buf, struct aarch64_register rt,
1094 struct aarch64_register rn)
1095{
1096 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1097 xzr, rn);
1098}
1099
1100/* Helper function for data processing instructions with register sources. */
1101
1102static int
231c0592 1103emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
bb903df0
PL
1104 struct aarch64_register rd,
1105 struct aarch64_register rn,
1106 struct aarch64_register rm)
1107{
1108 uint32_t size = ENCODE (rd.is64, 1, 31);
1109
e1c587c3
YQ
1110 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1111 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1112}
1113
1114/* Helper function for data processing instructions taking either a register
1115 or an immediate. */
1116
1117static int
1118emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1119 struct aarch64_register rd,
1120 struct aarch64_register rn,
1121 struct aarch64_operand operand)
1122{
1123 uint32_t size = ENCODE (rd.is64, 1, 31);
1124 /* The opcode is different for register and immediate source operands. */
1125 uint32_t operand_opcode;
1126
1127 if (operand.type == OPERAND_IMMEDIATE)
1128 {
1129 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1130 operand_opcode = ENCODE (8, 4, 25);
1131
e1c587c3
YQ
1132 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1133 | ENCODE (operand.imm, 12, 10)
1134 | ENCODE (rn.num, 5, 5)
1135 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1136 }
1137 else
1138 {
1139 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1140 operand_opcode = ENCODE (5, 4, 25);
1141
1142 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1143 rn, operand.reg);
1144 }
1145}
1146
1147/* Write an ADD instruction into *BUF.
1148
1149 ADD rd, rn, #imm
1150 ADD rd, rn, rm
1151
1152 This function handles both an immediate and register add.
1153
1154 RD is the destination register.
1155 RN is the input register.
1156 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1157 OPERAND_REGISTER. */
1158
1159static int
1160emit_add (uint32_t *buf, struct aarch64_register rd,
1161 struct aarch64_register rn, struct aarch64_operand operand)
1162{
1163 return emit_data_processing (buf, ADD, rd, rn, operand);
1164}
1165
1166/* Write a SUB instruction into *BUF.
1167
1168 SUB rd, rn, #imm
1169 SUB rd, rn, rm
1170
1171 This function handles both an immediate and register sub.
1172
1173 RD is the destination register.
1174 RN is the input register.
1175 IMM is the immediate to substract to RN. */
1176
1177static int
1178emit_sub (uint32_t *buf, struct aarch64_register rd,
1179 struct aarch64_register rn, struct aarch64_operand operand)
1180{
1181 return emit_data_processing (buf, SUB, rd, rn, operand);
1182}
1183
1184/* Write a MOV instruction into *BUF.
1185
1186 MOV rd, #imm
1187 MOV rd, rm
1188
1189 This function handles both a wide immediate move and a register move,
1190 with the condition that the source register is not xzr. xzr and the
1191 stack pointer share the same encoding and this function only supports
1192 the stack pointer.
1193
1194 RD is the destination register.
1195 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1196 OPERAND_REGISTER. */
1197
1198static int
1199emit_mov (uint32_t *buf, struct aarch64_register rd,
1200 struct aarch64_operand operand)
1201{
1202 if (operand.type == OPERAND_IMMEDIATE)
1203 {
1204 uint32_t size = ENCODE (rd.is64, 1, 31);
1205 /* Do not shift the immediate. */
1206 uint32_t shift = ENCODE (0, 2, 21);
1207
e1c587c3
YQ
1208 return aarch64_emit_insn (buf, MOV | size | shift
1209 | ENCODE (operand.imm, 16, 5)
1210 | ENCODE (rd.num, 5, 0));
bb903df0
PL
1211 }
1212 else
1213 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1214}
1215
1216/* Write a MOVK instruction into *BUF.
1217
1218 MOVK rd, #imm, lsl #shift
1219
1220 RD is the destination register.
1221 IMM is the immediate.
1222 SHIFT is the logical shift left to apply to IMM. */
1223
1224static int
7781c06f
YQ
1225emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1226 unsigned shift)
bb903df0
PL
1227{
1228 uint32_t size = ENCODE (rd.is64, 1, 31);
1229
e1c587c3
YQ
1230 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1231 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
bb903df0
PL
1232}
1233
1234/* Write instructions into *BUF in order to move ADDR into a register.
1235 ADDR can be a 64-bit value.
1236
1237 This function will emit a series of MOV and MOVK instructions, such as:
1238
1239 MOV xd, #(addr)
1240 MOVK xd, #(addr >> 16), lsl #16
1241 MOVK xd, #(addr >> 32), lsl #32
1242 MOVK xd, #(addr >> 48), lsl #48 */
1243
1244static int
1245emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1246{
1247 uint32_t *p = buf;
1248
1249 /* The MOV (wide immediate) instruction clears to top bits of the
1250 register. */
1251 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1252
1253 if ((addr >> 16) != 0)
1254 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1255 else
1256 return p - buf;
1257
1258 if ((addr >> 32) != 0)
1259 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1260 else
1261 return p - buf;
1262
1263 if ((addr >> 48) != 0)
1264 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1265
1266 return p - buf;
1267}
1268
afbe19f8
PL
1269/* Write a SUBS instruction into *BUF.
1270
1271 SUBS rd, rn, rm
1272
1273 This instruction update the condition flags.
1274
1275 RD is the destination register.
1276 RN and RM are the source registers. */
1277
1278static int
1279emit_subs (uint32_t *buf, struct aarch64_register rd,
1280 struct aarch64_register rn, struct aarch64_operand operand)
1281{
1282 return emit_data_processing (buf, SUBS, rd, rn, operand);
1283}
1284
1285/* Write a CMP instruction into *BUF.
1286
1287 CMP rn, rm
1288
1289 This instruction is an alias of SUBS xzr, rn, rm.
1290
1291 RN and RM are the registers to compare. */
1292
1293static int
1294emit_cmp (uint32_t *buf, struct aarch64_register rn,
1295 struct aarch64_operand operand)
1296{
1297 return emit_subs (buf, xzr, rn, operand);
1298}
1299
1300/* Write a AND instruction into *BUF.
1301
1302 AND rd, rn, rm
1303
1304 RD is the destination register.
1305 RN and RM are the source registers. */
1306
1307static int
1308emit_and (uint32_t *buf, struct aarch64_register rd,
1309 struct aarch64_register rn, struct aarch64_register rm)
1310{
1311 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1312}
1313
1314/* Write a ORR instruction into *BUF.
1315
1316 ORR rd, rn, rm
1317
1318 RD is the destination register.
1319 RN and RM are the source registers. */
1320
1321static int
1322emit_orr (uint32_t *buf, struct aarch64_register rd,
1323 struct aarch64_register rn, struct aarch64_register rm)
1324{
1325 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1326}
1327
1328/* Write a ORN instruction into *BUF.
1329
1330 ORN rd, rn, rm
1331
1332 RD is the destination register.
1333 RN and RM are the source registers. */
1334
1335static int
1336emit_orn (uint32_t *buf, struct aarch64_register rd,
1337 struct aarch64_register rn, struct aarch64_register rm)
1338{
1339 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1340}
1341
1342/* Write a EOR instruction into *BUF.
1343
1344 EOR rd, rn, rm
1345
1346 RD is the destination register.
1347 RN and RM are the source registers. */
1348
1349static int
1350emit_eor (uint32_t *buf, struct aarch64_register rd,
1351 struct aarch64_register rn, struct aarch64_register rm)
1352{
1353 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1354}
1355
1356/* Write a MVN instruction into *BUF.
1357
1358 MVN rd, rm
1359
1360 This is an alias for ORN rd, xzr, rm.
1361
1362 RD is the destination register.
1363 RM is the source register. */
1364
1365static int
1366emit_mvn (uint32_t *buf, struct aarch64_register rd,
1367 struct aarch64_register rm)
1368{
1369 return emit_orn (buf, rd, xzr, rm);
1370}
1371
1372/* Write a LSLV instruction into *BUF.
1373
1374 LSLV rd, rn, rm
1375
1376 RD is the destination register.
1377 RN and RM are the source registers. */
1378
1379static int
1380emit_lslv (uint32_t *buf, struct aarch64_register rd,
1381 struct aarch64_register rn, struct aarch64_register rm)
1382{
1383 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1384}
1385
1386/* Write a LSRV instruction into *BUF.
1387
1388 LSRV rd, rn, rm
1389
1390 RD is the destination register.
1391 RN and RM are the source registers. */
1392
1393static int
1394emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1395 struct aarch64_register rn, struct aarch64_register rm)
1396{
1397 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1398}
1399
1400/* Write a ASRV instruction into *BUF.
1401
1402 ASRV rd, rn, rm
1403
1404 RD is the destination register.
1405 RN and RM are the source registers. */
1406
1407static int
1408emit_asrv (uint32_t *buf, struct aarch64_register rd,
1409 struct aarch64_register rn, struct aarch64_register rm)
1410{
1411 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1412}
1413
1414/* Write a MUL instruction into *BUF.
1415
1416 MUL rd, rn, rm
1417
1418 RD is the destination register.
1419 RN and RM are the source registers. */
1420
1421static int
1422emit_mul (uint32_t *buf, struct aarch64_register rd,
1423 struct aarch64_register rn, struct aarch64_register rm)
1424{
1425 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1426}
1427
bb903df0
PL
1428/* Write a MRS instruction into *BUF. The register size is 64-bit.
1429
1430 MRS xt, system_reg
1431
1432 RT is the destination register.
1433 SYSTEM_REG is special purpose register to read. */
1434
1435static int
1436emit_mrs (uint32_t *buf, struct aarch64_register rt,
1437 enum aarch64_system_control_registers system_reg)
1438{
e1c587c3
YQ
1439 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1440 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1441}
1442
1443/* Write a MSR instruction into *BUF. The register size is 64-bit.
1444
1445 MSR system_reg, xt
1446
1447 SYSTEM_REG is special purpose register to write.
1448 RT is the input register. */
1449
1450static int
1451emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1452 struct aarch64_register rt)
1453{
e1c587c3
YQ
1454 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1455 | ENCODE (rt.num, 5, 0));
bb903df0
PL
1456}
1457
1458/* Write a SEVL instruction into *BUF.
1459
1460 This is a hint instruction telling the hardware to trigger an event. */
1461
1462static int
1463emit_sevl (uint32_t *buf)
1464{
e1c587c3 1465 return aarch64_emit_insn (buf, SEVL);
bb903df0
PL
1466}
1467
1468/* Write a WFE instruction into *BUF.
1469
1470 This is a hint instruction telling the hardware to wait for an event. */
1471
1472static int
1473emit_wfe (uint32_t *buf)
1474{
e1c587c3 1475 return aarch64_emit_insn (buf, WFE);
bb903df0
PL
1476}
1477
afbe19f8
PL
1478/* Write a SBFM instruction into *BUF.
1479
1480 SBFM rd, rn, #immr, #imms
1481
1482 This instruction moves the bits from #immr to #imms into the
1483 destination, sign extending the result.
1484
1485 RD is the destination register.
1486 RN is the source register.
1487 IMMR is the bit number to start at (least significant bit).
1488 IMMS is the bit number to stop at (most significant bit). */
1489
1490static int
1491emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1492 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1493{
1494 uint32_t size = ENCODE (rd.is64, 1, 31);
1495 uint32_t n = ENCODE (rd.is64, 1, 22);
1496
e1c587c3
YQ
1497 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1498 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1499 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1500}
1501
1502/* Write a SBFX instruction into *BUF.
1503
1504 SBFX rd, rn, #lsb, #width
1505
1506 This instruction moves #width bits from #lsb into the destination, sign
1507 extending the result. This is an alias for:
1508
1509 SBFM rd, rn, #lsb, #(lsb + width - 1)
1510
1511 RD is the destination register.
1512 RN is the source register.
1513 LSB is the bit number to start at (least significant bit).
1514 WIDTH is the number of bits to move. */
1515
1516static int
1517emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1518 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1519{
1520 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1521}
1522
1523/* Write a UBFM instruction into *BUF.
1524
1525 UBFM rd, rn, #immr, #imms
1526
1527 This instruction moves the bits from #immr to #imms into the
1528 destination, extending the result with zeros.
1529
1530 RD is the destination register.
1531 RN is the source register.
1532 IMMR is the bit number to start at (least significant bit).
1533 IMMS is the bit number to stop at (most significant bit). */
1534
1535static int
1536emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1537 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1538{
1539 uint32_t size = ENCODE (rd.is64, 1, 31);
1540 uint32_t n = ENCODE (rd.is64, 1, 22);
1541
e1c587c3
YQ
1542 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1543 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1544 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1545}
1546
1547/* Write a UBFX instruction into *BUF.
1548
1549 UBFX rd, rn, #lsb, #width
1550
1551 This instruction moves #width bits from #lsb into the destination,
1552 extending the result with zeros. This is an alias for:
1553
1554 UBFM rd, rn, #lsb, #(lsb + width - 1)
1555
1556 RD is the destination register.
1557 RN is the source register.
1558 LSB is the bit number to start at (least significant bit).
1559 WIDTH is the number of bits to move. */
1560
1561static int
1562emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1563 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1564{
1565 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1566}
1567
1568/* Write a CSINC instruction into *BUF.
1569
1570 CSINC rd, rn, rm, cond
1571
1572 This instruction conditionally increments rn or rm and places the result
1573 in rd. rn is chosen is the condition is true.
1574
1575 RD is the destination register.
1576 RN and RM are the source registers.
1577 COND is the encoded condition. */
1578
1579static int
1580emit_csinc (uint32_t *buf, struct aarch64_register rd,
1581 struct aarch64_register rn, struct aarch64_register rm,
1582 unsigned cond)
1583{
1584 uint32_t size = ENCODE (rd.is64, 1, 31);
1585
e1c587c3
YQ
1586 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1587 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1588 | ENCODE (rd.num, 5, 0));
afbe19f8
PL
1589}
1590
1591/* Write a CSET instruction into *BUF.
1592
1593 CSET rd, cond
1594
1595 This instruction conditionally write 1 or 0 in the destination register.
1596 1 is written if the condition is true. This is an alias for:
1597
1598 CSINC rd, xzr, xzr, !cond
1599
1600 Note that the condition needs to be inverted.
1601
1602 RD is the destination register.
1603 RN and RM are the source registers.
1604 COND is the encoded condition. */
1605
1606static int
1607emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1608{
1609 /* The least significant bit of the condition needs toggling in order to
1610 invert it. */
1611 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1612}
1613
bb903df0
PL
1614/* Write LEN instructions from BUF into the inferior memory at *TO.
1615
1616 Note instructions are always little endian on AArch64, unlike data. */
1617
1618static void
1619append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1620{
1621 size_t byte_len = len * sizeof (uint32_t);
1622#if (__BYTE_ORDER == __BIG_ENDIAN)
cb93dc7f 1623 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
bb903df0
PL
1624 size_t i;
1625
1626 for (i = 0; i < len; i++)
1627 le_buf[i] = htole32 (buf[i]);
1628
4196ab2a 1629 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
bb903df0
PL
1630
1631 xfree (le_buf);
1632#else
4196ab2a 1633 target_write_memory (*to, (const unsigned char *) buf, byte_len);
bb903df0
PL
1634#endif
1635
1636 *to += byte_len;
1637}
1638
0badd99f
YQ
1639/* Sub-class of struct aarch64_insn_data, store information of
1640 instruction relocation for fast tracepoint. Visitor can
1641 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1642 the relocated instructions in buffer pointed by INSN_PTR. */
bb903df0 1643
0badd99f
YQ
1644struct aarch64_insn_relocation_data
1645{
1646 struct aarch64_insn_data base;
1647
1648 /* The new address the instruction is relocated to. */
1649 CORE_ADDR new_addr;
1650 /* Pointer to the buffer of relocated instruction(s). */
1651 uint32_t *insn_ptr;
1652};
1653
1654/* Implementation of aarch64_insn_visitor method "b". */
1655
1656static void
1657aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1658 struct aarch64_insn_data *data)
1659{
1660 struct aarch64_insn_relocation_data *insn_reloc
1661 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1662 int64_t new_offset
0badd99f
YQ
1663 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1664
1665 if (can_encode_int32 (new_offset, 28))
1666 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1667}
1668
1669/* Implementation of aarch64_insn_visitor method "b_cond". */
1670
1671static void
1672aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1673 struct aarch64_insn_data *data)
1674{
1675 struct aarch64_insn_relocation_data *insn_reloc
1676 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1677 int64_t new_offset
0badd99f
YQ
1678 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1679
1680 if (can_encode_int32 (new_offset, 21))
1681 {
1682 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1683 new_offset);
bb903df0 1684 }
0badd99f 1685 else if (can_encode_int32 (new_offset, 28))
bb903df0 1686 {
0badd99f
YQ
1687 /* The offset is out of range for a conditional branch
1688 instruction but not for a unconditional branch. We can use
1689 the following instructions instead:
bb903df0 1690
0badd99f
YQ
1691 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1692 B NOT_TAKEN ; Else jump over TAKEN and continue.
1693 TAKEN:
1694 B #(offset - 8)
1695 NOT_TAKEN:
1696
1697 */
1698
1699 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1700 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1701 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
bb903df0 1702 }
0badd99f 1703}
bb903df0 1704
0badd99f
YQ
1705/* Implementation of aarch64_insn_visitor method "cb". */
1706
1707static void
1708aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1709 const unsigned rn, int is64,
1710 struct aarch64_insn_data *data)
1711{
1712 struct aarch64_insn_relocation_data *insn_reloc
1713 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1714 int64_t new_offset
0badd99f
YQ
1715 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1716
1717 if (can_encode_int32 (new_offset, 21))
1718 {
1719 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1720 aarch64_register (rn, is64), new_offset);
bb903df0 1721 }
0badd99f 1722 else if (can_encode_int32 (new_offset, 28))
bb903df0 1723 {
0badd99f
YQ
1724 /* The offset is out of range for a compare and branch
1725 instruction but not for a unconditional branch. We can use
1726 the following instructions instead:
1727
1728 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1729 B NOT_TAKEN ; Else jump over TAKEN and continue.
1730 TAKEN:
1731 B #(offset - 8)
1732 NOT_TAKEN:
1733
1734 */
1735 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1736 aarch64_register (rn, is64), 8);
1737 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1738 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1739 }
1740}
bb903df0 1741
0badd99f 1742/* Implementation of aarch64_insn_visitor method "tb". */
bb903df0 1743
0badd99f
YQ
1744static void
1745aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1746 const unsigned rt, unsigned bit,
1747 struct aarch64_insn_data *data)
1748{
1749 struct aarch64_insn_relocation_data *insn_reloc
1750 = (struct aarch64_insn_relocation_data *) data;
2ac09a5b 1751 int64_t new_offset
0badd99f
YQ
1752 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1753
1754 if (can_encode_int32 (new_offset, 16))
1755 {
1756 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1757 aarch64_register (rt, 1), new_offset);
bb903df0 1758 }
0badd99f 1759 else if (can_encode_int32 (new_offset, 28))
bb903df0 1760 {
0badd99f
YQ
1761 /* The offset is out of range for a test bit and branch
1762 instruction but not for a unconditional branch. We can use
1763 the following instructions instead:
1764
1765 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1766 B NOT_TAKEN ; Else jump over TAKEN and continue.
1767 TAKEN:
1768 B #(offset - 8)
1769 NOT_TAKEN:
1770
1771 */
1772 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1773 aarch64_register (rt, 1), 8);
1774 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1775 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1776 new_offset - 8);
1777 }
1778}
bb903df0 1779
0badd99f 1780/* Implementation of aarch64_insn_visitor method "adr". */
bb903df0 1781
0badd99f
YQ
1782static void
1783aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1784 const int is_adrp,
1785 struct aarch64_insn_data *data)
1786{
1787 struct aarch64_insn_relocation_data *insn_reloc
1788 = (struct aarch64_insn_relocation_data *) data;
1789 /* We know exactly the address the ADR{P,} instruction will compute.
1790 We can just write it to the destination register. */
1791 CORE_ADDR address = data->insn_addr + offset;
bb903df0 1792
0badd99f
YQ
1793 if (is_adrp)
1794 {
1795 /* Clear the lower 12 bits of the offset to get the 4K page. */
1796 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1797 aarch64_register (rd, 1),
1798 address & ~0xfff);
1799 }
1800 else
1801 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1802 aarch64_register (rd, 1), address);
1803}
bb903df0 1804
0badd99f 1805/* Implementation of aarch64_insn_visitor method "ldr_literal". */
bb903df0 1806
0badd99f
YQ
1807static void
1808aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1809 const unsigned rt, const int is64,
1810 struct aarch64_insn_data *data)
1811{
1812 struct aarch64_insn_relocation_data *insn_reloc
1813 = (struct aarch64_insn_relocation_data *) data;
1814 CORE_ADDR address = data->insn_addr + offset;
1815
1816 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1817 aarch64_register (rt, 1), address);
1818
1819 /* We know exactly what address to load from, and what register we
1820 can use:
1821
1822 MOV xd, #(oldloc + offset)
1823 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1824 ...
1825
1826 LDR xd, [xd] ; or LDRSW xd, [xd]
1827
1828 */
1829
1830 if (is_sw)
1831 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1832 aarch64_register (rt, 1),
1833 aarch64_register (rt, 1),
1834 offset_memory_operand (0));
bb903df0 1835 else
0badd99f
YQ
1836 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1837 aarch64_register (rt, is64),
1838 aarch64_register (rt, 1),
1839 offset_memory_operand (0));
1840}
1841
1842/* Implementation of aarch64_insn_visitor method "others". */
1843
1844static void
1845aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1846 struct aarch64_insn_data *data)
1847{
1848 struct aarch64_insn_relocation_data *insn_reloc
1849 = (struct aarch64_insn_relocation_data *) data;
bb903df0 1850
0badd99f
YQ
1851 /* The instruction is not PC relative. Just re-emit it at the new
1852 location. */
e1c587c3 1853 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
0badd99f
YQ
1854}
1855
1856static const struct aarch64_insn_visitor visitor =
1857{
1858 aarch64_ftrace_insn_reloc_b,
1859 aarch64_ftrace_insn_reloc_b_cond,
1860 aarch64_ftrace_insn_reloc_cb,
1861 aarch64_ftrace_insn_reloc_tb,
1862 aarch64_ftrace_insn_reloc_adr,
1863 aarch64_ftrace_insn_reloc_ldr_literal,
1864 aarch64_ftrace_insn_reloc_others,
1865};
1866
bb903df0
PL
1867/* Implementation of linux_target_ops method
1868 "install_fast_tracepoint_jump_pad". */
1869
1870static int
1871aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1872 CORE_ADDR tpaddr,
1873 CORE_ADDR collector,
1874 CORE_ADDR lockaddr,
1875 ULONGEST orig_size,
1876 CORE_ADDR *jump_entry,
1877 CORE_ADDR *trampoline,
1878 ULONGEST *trampoline_size,
1879 unsigned char *jjump_pad_insn,
1880 ULONGEST *jjump_pad_insn_size,
1881 CORE_ADDR *adjusted_insn_addr,
1882 CORE_ADDR *adjusted_insn_addr_end,
1883 char *err)
1884{
1885 uint32_t buf[256];
1886 uint32_t *p = buf;
2ac09a5b 1887 int64_t offset;
bb903df0 1888 int i;
70b439f0 1889 uint32_t insn;
bb903df0 1890 CORE_ADDR buildaddr = *jump_entry;
0badd99f 1891 struct aarch64_insn_relocation_data insn_data;
bb903df0
PL
1892
1893 /* We need to save the current state on the stack both to restore it
1894 later and to collect register values when the tracepoint is hit.
1895
1896 The saved registers are pushed in a layout that needs to be in sync
1897 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1898 the supply_fast_tracepoint_registers function will fill in the
1899 register cache from a pointer to saved registers on the stack we build
1900 here.
1901
1902 For simplicity, we set the size of each cell on the stack to 16 bytes.
1903 This way one cell can hold any register type, from system registers
1904 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1905 has to be 16 bytes aligned anyway.
1906
1907 Note that the CPSR register does not exist on AArch64. Instead we
1908 can access system bits describing the process state with the
1909 MRS/MSR instructions, namely the condition flags. We save them as
1910 if they are part of a CPSR register because that's how GDB
1911 interprets these system bits. At the moment, only the condition
1912 flags are saved in CPSR (NZCV).
1913
1914 Stack layout, each cell is 16 bytes (descending):
1915
1916 High *-------- SIMD&FP registers from 31 down to 0. --------*
1917 | q31 |
1918 . .
1919 . . 32 cells
1920 . .
1921 | q0 |
1922 *---- General purpose registers from 30 down to 0. ----*
1923 | x30 |
1924 . .
1925 . . 31 cells
1926 . .
1927 | x0 |
1928 *------------- Special purpose registers. -------------*
1929 | SP |
1930 | PC |
1931 | CPSR (NZCV) | 5 cells
1932 | FPSR |
1933 | FPCR | <- SP + 16
1934 *------------- collecting_t object --------------------*
1935 | TPIDR_EL0 | struct tracepoint * |
1936 Low *------------------------------------------------------*
1937
1938 After this stack is set up, we issue a call to the collector, passing
1939 it the saved registers at (SP + 16). */
1940
1941 /* Push SIMD&FP registers on the stack:
1942
1943 SUB sp, sp, #(32 * 16)
1944
1945 STP q30, q31, [sp, #(30 * 16)]
1946 ...
1947 STP q0, q1, [sp]
1948
1949 */
1950 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1951 for (i = 30; i >= 0; i -= 2)
1952 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1953
1954 /* Push general puspose registers on the stack. Note that we do not need
1955 to push x31 as it represents the xzr register and not the stack
1956 pointer in a STR instruction.
1957
1958 SUB sp, sp, #(31 * 16)
1959
1960 STR x30, [sp, #(30 * 16)]
1961 ...
1962 STR x0, [sp]
1963
1964 */
1965 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1966 for (i = 30; i >= 0; i -= 1)
1967 p += emit_str (p, aarch64_register (i, 1), sp,
1968 offset_memory_operand (i * 16));
1969
1970 /* Make space for 5 more cells.
1971
1972 SUB sp, sp, #(5 * 16)
1973
1974 */
1975 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1976
1977
1978 /* Save SP:
1979
1980 ADD x4, sp, #((32 + 31 + 5) * 16)
1981 STR x4, [sp, #(4 * 16)]
1982
1983 */
1984 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1985 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1986
1987 /* Save PC (tracepoint address):
1988
1989 MOV x3, #(tpaddr)
1990 ...
1991
1992 STR x3, [sp, #(3 * 16)]
1993
1994 */
1995
1996 p += emit_mov_addr (p, x3, tpaddr);
1997 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1998
1999 /* Save CPSR (NZCV), FPSR and FPCR:
2000
2001 MRS x2, nzcv
2002 MRS x1, fpsr
2003 MRS x0, fpcr
2004
2005 STR x2, [sp, #(2 * 16)]
2006 STR x1, [sp, #(1 * 16)]
2007 STR x0, [sp, #(0 * 16)]
2008
2009 */
2010 p += emit_mrs (p, x2, NZCV);
2011 p += emit_mrs (p, x1, FPSR);
2012 p += emit_mrs (p, x0, FPCR);
2013 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2014 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2015 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2016
2017 /* Push the collecting_t object. It consist of the address of the
2018 tracepoint and an ID for the current thread. We get the latter by
2019 reading the tpidr_el0 system register. It corresponds to the
2020 NT_ARM_TLS register accessible with ptrace.
2021
2022 MOV x0, #(tpoint)
2023 ...
2024
2025 MRS x1, tpidr_el0
2026
2027 STP x0, x1, [sp, #-16]!
2028
2029 */
2030
2031 p += emit_mov_addr (p, x0, tpoint);
2032 p += emit_mrs (p, x1, TPIDR_EL0);
2033 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2034
2035 /* Spin-lock:
2036
2037 The shared memory for the lock is at lockaddr. It will hold zero
2038 if no-one is holding the lock, otherwise it contains the address of
2039 the collecting_t object on the stack of the thread which acquired it.
2040
2041 At this stage, the stack pointer points to this thread's collecting_t
2042 object.
2043
2044 We use the following registers:
2045 - x0: Address of the lock.
2046 - x1: Pointer to collecting_t object.
2047 - x2: Scratch register.
2048
2049 MOV x0, #(lockaddr)
2050 ...
2051 MOV x1, sp
2052
2053 ; Trigger an event local to this core. So the following WFE
2054 ; instruction is ignored.
2055 SEVL
2056 again:
2057 ; Wait for an event. The event is triggered by either the SEVL
2058 ; or STLR instructions (store release).
2059 WFE
2060
2061 ; Atomically read at lockaddr. This marks the memory location as
2062 ; exclusive. This instruction also has memory constraints which
2063 ; make sure all previous data reads and writes are done before
2064 ; executing it.
2065 LDAXR x2, [x0]
2066
2067 ; Try again if another thread holds the lock.
2068 CBNZ x2, again
2069
2070 ; We can lock it! Write the address of the collecting_t object.
2071 ; This instruction will fail if the memory location is not marked
2072 ; as exclusive anymore. If it succeeds, it will remove the
2073 ; exclusive mark on the memory location. This way, if another
2074 ; thread executes this instruction before us, we will fail and try
2075 ; all over again.
2076 STXR w2, x1, [x0]
2077 CBNZ w2, again
2078
2079 */
2080
2081 p += emit_mov_addr (p, x0, lockaddr);
2082 p += emit_mov (p, x1, register_operand (sp));
2083
2084 p += emit_sevl (p);
2085 p += emit_wfe (p);
2086 p += emit_ldaxr (p, x2, x0);
2087 p += emit_cb (p, 1, w2, -2 * 4);
2088 p += emit_stxr (p, w2, x1, x0);
2089 p += emit_cb (p, 1, x2, -4 * 4);
2090
2091 /* Call collector (struct tracepoint *, unsigned char *):
2092
2093 MOV x0, #(tpoint)
2094 ...
2095
2096 ; Saved registers start after the collecting_t object.
2097 ADD x1, sp, #16
2098
2099 ; We use an intra-procedure-call scratch register.
2100 MOV ip0, #(collector)
2101 ...
2102
2103 ; And call back to C!
2104 BLR ip0
2105
2106 */
2107
2108 p += emit_mov_addr (p, x0, tpoint);
2109 p += emit_add (p, x1, sp, immediate_operand (16));
2110
2111 p += emit_mov_addr (p, ip0, collector);
2112 p += emit_blr (p, ip0);
2113
2114 /* Release the lock.
2115
2116 MOV x0, #(lockaddr)
2117 ...
2118
2119 ; This instruction is a normal store with memory ordering
2120 ; constraints. Thanks to this we do not have to put a data
2121 ; barrier instruction to make sure all data read and writes are done
2122 ; before this instruction is executed. Furthermore, this instrucion
2123 ; will trigger an event, letting other threads know they can grab
2124 ; the lock.
2125 STLR xzr, [x0]
2126
2127 */
2128 p += emit_mov_addr (p, x0, lockaddr);
2129 p += emit_stlr (p, xzr, x0);
2130
2131 /* Free collecting_t object:
2132
2133 ADD sp, sp, #16
2134
2135 */
2136 p += emit_add (p, sp, sp, immediate_operand (16));
2137
2138 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2139 registers from the stack.
2140
2141 LDR x2, [sp, #(2 * 16)]
2142 LDR x1, [sp, #(1 * 16)]
2143 LDR x0, [sp, #(0 * 16)]
2144
2145 MSR NZCV, x2
2146 MSR FPSR, x1
2147 MSR FPCR, x0
2148
2149 ADD sp, sp #(5 * 16)
2150
2151 */
2152 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2153 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2154 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2155 p += emit_msr (p, NZCV, x2);
2156 p += emit_msr (p, FPSR, x1);
2157 p += emit_msr (p, FPCR, x0);
2158
2159 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2160
2161 /* Pop general purpose registers:
2162
2163 LDR x0, [sp]
2164 ...
2165 LDR x30, [sp, #(30 * 16)]
2166
2167 ADD sp, sp, #(31 * 16)
2168
2169 */
2170 for (i = 0; i <= 30; i += 1)
2171 p += emit_ldr (p, aarch64_register (i, 1), sp,
2172 offset_memory_operand (i * 16));
2173 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2174
2175 /* Pop SIMD&FP registers:
2176
2177 LDP q0, q1, [sp]
2178 ...
2179 LDP q30, q31, [sp, #(30 * 16)]
2180
2181 ADD sp, sp, #(32 * 16)
2182
2183 */
2184 for (i = 0; i <= 30; i += 2)
2185 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2186 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2187
2188 /* Write the code into the inferior memory. */
2189 append_insns (&buildaddr, p - buf, buf);
2190
2191 /* Now emit the relocated instruction. */
2192 *adjusted_insn_addr = buildaddr;
70b439f0 2193 target_read_uint32 (tpaddr, &insn);
0badd99f
YQ
2194
2195 insn_data.base.insn_addr = tpaddr;
2196 insn_data.new_addr = buildaddr;
2197 insn_data.insn_ptr = buf;
2198
2199 aarch64_relocate_instruction (insn, &visitor,
2200 (struct aarch64_insn_data *) &insn_data);
2201
bb903df0 2202 /* We may not have been able to relocate the instruction. */
0badd99f 2203 if (insn_data.insn_ptr == buf)
bb903df0
PL
2204 {
2205 sprintf (err,
2206 "E.Could not relocate instruction from %s to %s.",
2207 core_addr_to_string_nz (tpaddr),
2208 core_addr_to_string_nz (buildaddr));
2209 return 1;
2210 }
dfaffe9d 2211 else
0badd99f 2212 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
dfaffe9d 2213 *adjusted_insn_addr_end = buildaddr;
bb903df0
PL
2214
2215 /* Go back to the start of the buffer. */
2216 p = buf;
2217
2218 /* Emit a branch back from the jump pad. */
2219 offset = (tpaddr + orig_size - buildaddr);
2220 if (!can_encode_int32 (offset, 28))
2221 {
2222 sprintf (err,
2223 "E.Jump back from jump pad too far from tracepoint "
2ac09a5b 2224 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2225 offset);
2226 return 1;
2227 }
2228
2229 p += emit_b (p, 0, offset);
2230 append_insns (&buildaddr, p - buf, buf);
2231
2232 /* Give the caller a branch instruction into the jump pad. */
2233 offset = (*jump_entry - tpaddr);
2234 if (!can_encode_int32 (offset, 28))
2235 {
2236 sprintf (err,
2237 "E.Jump pad too far from tracepoint "
2ac09a5b 2238 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
bb903df0
PL
2239 offset);
2240 return 1;
2241 }
2242
2243 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2244 *jjump_pad_insn_size = 4;
2245
2246 /* Return the end address of our pad. */
2247 *jump_entry = buildaddr;
2248
2249 return 0;
2250}
2251
afbe19f8
PL
2252/* Helper function writing LEN instructions from START into
2253 current_insn_ptr. */
2254
2255static void
2256emit_ops_insns (const uint32_t *start, int len)
2257{
2258 CORE_ADDR buildaddr = current_insn_ptr;
2259
2260 if (debug_threads)
2261 debug_printf ("Adding %d instrucions at %s\n",
2262 len, paddress (buildaddr));
2263
2264 append_insns (&buildaddr, len, start);
2265 current_insn_ptr = buildaddr;
2266}
2267
2268/* Pop a register from the stack. */
2269
2270static int
2271emit_pop (uint32_t *buf, struct aarch64_register rt)
2272{
2273 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2274}
2275
2276/* Push a register on the stack. */
2277
2278static int
2279emit_push (uint32_t *buf, struct aarch64_register rt)
2280{
2281 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2282}
2283
2284/* Implementation of emit_ops method "emit_prologue". */
2285
2286static void
2287aarch64_emit_prologue (void)
2288{
2289 uint32_t buf[16];
2290 uint32_t *p = buf;
2291
2292 /* This function emit a prologue for the following function prototype:
2293
2294 enum eval_result_type f (unsigned char *regs,
2295 ULONGEST *value);
2296
2297 The first argument is a buffer of raw registers. The second
2298 argument is the result of
2299 evaluating the expression, which will be set to whatever is on top of
2300 the stack at the end.
2301
2302 The stack set up by the prologue is as such:
2303
2304 High *------------------------------------------------------*
2305 | LR |
2306 | FP | <- FP
2307 | x1 (ULONGEST *value) |
2308 | x0 (unsigned char *regs) |
2309 Low *------------------------------------------------------*
2310
2311 As we are implementing a stack machine, each opcode can expand the
2312 stack so we never know how far we are from the data saved by this
2313 prologue. In order to be able refer to value and regs later, we save
2314 the current stack pointer in the frame pointer. This way, it is not
2315 clobbered when calling C functions.
2316
2317 Finally, throughtout every operation, we are using register x0 as the
2318 top of the stack, and x1 as a scratch register. */
2319
2320 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2321 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2322 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2323
2324 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2325
2326
2327 emit_ops_insns (buf, p - buf);
2328}
2329
2330/* Implementation of emit_ops method "emit_epilogue". */
2331
2332static void
2333aarch64_emit_epilogue (void)
2334{
2335 uint32_t buf[16];
2336 uint32_t *p = buf;
2337
2338 /* Store the result of the expression (x0) in *value. */
2339 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2340 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2341 p += emit_str (p, x0, x1, offset_memory_operand (0));
2342
2343 /* Restore the previous state. */
2344 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2345 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2346
2347 /* Return expr_eval_no_error. */
2348 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2349 p += emit_ret (p, lr);
2350
2351 emit_ops_insns (buf, p - buf);
2352}
2353
2354/* Implementation of emit_ops method "emit_add". */
2355
2356static void
2357aarch64_emit_add (void)
2358{
2359 uint32_t buf[16];
2360 uint32_t *p = buf;
2361
2362 p += emit_pop (p, x1);
45e3745e 2363 p += emit_add (p, x0, x1, register_operand (x0));
afbe19f8
PL
2364
2365 emit_ops_insns (buf, p - buf);
2366}
2367
2368/* Implementation of emit_ops method "emit_sub". */
2369
2370static void
2371aarch64_emit_sub (void)
2372{
2373 uint32_t buf[16];
2374 uint32_t *p = buf;
2375
2376 p += emit_pop (p, x1);
45e3745e 2377 p += emit_sub (p, x0, x1, register_operand (x0));
afbe19f8
PL
2378
2379 emit_ops_insns (buf, p - buf);
2380}
2381
2382/* Implementation of emit_ops method "emit_mul". */
2383
2384static void
2385aarch64_emit_mul (void)
2386{
2387 uint32_t buf[16];
2388 uint32_t *p = buf;
2389
2390 p += emit_pop (p, x1);
2391 p += emit_mul (p, x0, x1, x0);
2392
2393 emit_ops_insns (buf, p - buf);
2394}
2395
2396/* Implementation of emit_ops method "emit_lsh". */
2397
2398static void
2399aarch64_emit_lsh (void)
2400{
2401 uint32_t buf[16];
2402 uint32_t *p = buf;
2403
2404 p += emit_pop (p, x1);
2405 p += emit_lslv (p, x0, x1, x0);
2406
2407 emit_ops_insns (buf, p - buf);
2408}
2409
2410/* Implementation of emit_ops method "emit_rsh_signed". */
2411
2412static void
2413aarch64_emit_rsh_signed (void)
2414{
2415 uint32_t buf[16];
2416 uint32_t *p = buf;
2417
2418 p += emit_pop (p, x1);
2419 p += emit_asrv (p, x0, x1, x0);
2420
2421 emit_ops_insns (buf, p - buf);
2422}
2423
2424/* Implementation of emit_ops method "emit_rsh_unsigned". */
2425
2426static void
2427aarch64_emit_rsh_unsigned (void)
2428{
2429 uint32_t buf[16];
2430 uint32_t *p = buf;
2431
2432 p += emit_pop (p, x1);
2433 p += emit_lsrv (p, x0, x1, x0);
2434
2435 emit_ops_insns (buf, p - buf);
2436}
2437
2438/* Implementation of emit_ops method "emit_ext". */
2439
2440static void
2441aarch64_emit_ext (int arg)
2442{
2443 uint32_t buf[16];
2444 uint32_t *p = buf;
2445
2446 p += emit_sbfx (p, x0, x0, 0, arg);
2447
2448 emit_ops_insns (buf, p - buf);
2449}
2450
2451/* Implementation of emit_ops method "emit_log_not". */
2452
2453static void
2454aarch64_emit_log_not (void)
2455{
2456 uint32_t buf[16];
2457 uint32_t *p = buf;
2458
2459 /* If the top of the stack is 0, replace it with 1. Else replace it with
2460 0. */
2461
2462 p += emit_cmp (p, x0, immediate_operand (0));
2463 p += emit_cset (p, x0, EQ);
2464
2465 emit_ops_insns (buf, p - buf);
2466}
2467
2468/* Implementation of emit_ops method "emit_bit_and". */
2469
2470static void
2471aarch64_emit_bit_and (void)
2472{
2473 uint32_t buf[16];
2474 uint32_t *p = buf;
2475
2476 p += emit_pop (p, x1);
2477 p += emit_and (p, x0, x0, x1);
2478
2479 emit_ops_insns (buf, p - buf);
2480}
2481
2482/* Implementation of emit_ops method "emit_bit_or". */
2483
2484static void
2485aarch64_emit_bit_or (void)
2486{
2487 uint32_t buf[16];
2488 uint32_t *p = buf;
2489
2490 p += emit_pop (p, x1);
2491 p += emit_orr (p, x0, x0, x1);
2492
2493 emit_ops_insns (buf, p - buf);
2494}
2495
2496/* Implementation of emit_ops method "emit_bit_xor". */
2497
2498static void
2499aarch64_emit_bit_xor (void)
2500{
2501 uint32_t buf[16];
2502 uint32_t *p = buf;
2503
2504 p += emit_pop (p, x1);
2505 p += emit_eor (p, x0, x0, x1);
2506
2507 emit_ops_insns (buf, p - buf);
2508}
2509
2510/* Implementation of emit_ops method "emit_bit_not". */
2511
2512static void
2513aarch64_emit_bit_not (void)
2514{
2515 uint32_t buf[16];
2516 uint32_t *p = buf;
2517
2518 p += emit_mvn (p, x0, x0);
2519
2520 emit_ops_insns (buf, p - buf);
2521}
2522
2523/* Implementation of emit_ops method "emit_equal". */
2524
2525static void
2526aarch64_emit_equal (void)
2527{
2528 uint32_t buf[16];
2529 uint32_t *p = buf;
2530
2531 p += emit_pop (p, x1);
2532 p += emit_cmp (p, x0, register_operand (x1));
2533 p += emit_cset (p, x0, EQ);
2534
2535 emit_ops_insns (buf, p - buf);
2536}
2537
2538/* Implementation of emit_ops method "emit_less_signed". */
2539
2540static void
2541aarch64_emit_less_signed (void)
2542{
2543 uint32_t buf[16];
2544 uint32_t *p = buf;
2545
2546 p += emit_pop (p, x1);
2547 p += emit_cmp (p, x1, register_operand (x0));
2548 p += emit_cset (p, x0, LT);
2549
2550 emit_ops_insns (buf, p - buf);
2551}
2552
2553/* Implementation of emit_ops method "emit_less_unsigned". */
2554
2555static void
2556aarch64_emit_less_unsigned (void)
2557{
2558 uint32_t buf[16];
2559 uint32_t *p = buf;
2560
2561 p += emit_pop (p, x1);
2562 p += emit_cmp (p, x1, register_operand (x0));
2563 p += emit_cset (p, x0, LO);
2564
2565 emit_ops_insns (buf, p - buf);
2566}
2567
2568/* Implementation of emit_ops method "emit_ref". */
2569
2570static void
2571aarch64_emit_ref (int size)
2572{
2573 uint32_t buf[16];
2574 uint32_t *p = buf;
2575
2576 switch (size)
2577 {
2578 case 1:
2579 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2580 break;
2581 case 2:
2582 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2583 break;
2584 case 4:
2585 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2586 break;
2587 case 8:
2588 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2589 break;
2590 default:
2591 /* Unknown size, bail on compilation. */
2592 emit_error = 1;
2593 break;
2594 }
2595
2596 emit_ops_insns (buf, p - buf);
2597}
2598
2599/* Implementation of emit_ops method "emit_if_goto". */
2600
2601static void
2602aarch64_emit_if_goto (int *offset_p, int *size_p)
2603{
2604 uint32_t buf[16];
2605 uint32_t *p = buf;
2606
2607 /* The Z flag is set or cleared here. */
2608 p += emit_cmp (p, x0, immediate_operand (0));
2609 /* This instruction must not change the Z flag. */
2610 p += emit_pop (p, x0);
2611 /* Branch over the next instruction if x0 == 0. */
2612 p += emit_bcond (p, EQ, 8);
2613
2614 /* The NOP instruction will be patched with an unconditional branch. */
2615 if (offset_p)
2616 *offset_p = (p - buf) * 4;
2617 if (size_p)
2618 *size_p = 4;
2619 p += emit_nop (p);
2620
2621 emit_ops_insns (buf, p - buf);
2622}
2623
2624/* Implementation of emit_ops method "emit_goto". */
2625
2626static void
2627aarch64_emit_goto (int *offset_p, int *size_p)
2628{
2629 uint32_t buf[16];
2630 uint32_t *p = buf;
2631
2632 /* The NOP instruction will be patched with an unconditional branch. */
2633 if (offset_p)
2634 *offset_p = 0;
2635 if (size_p)
2636 *size_p = 4;
2637 p += emit_nop (p);
2638
2639 emit_ops_insns (buf, p - buf);
2640}
2641
2642/* Implementation of emit_ops method "write_goto_address". */
2643
2644void
2645aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2646{
2647 uint32_t insn;
2648
2649 emit_b (&insn, 0, to - from);
2650 append_insns (&from, 1, &insn);
2651}
2652
2653/* Implementation of emit_ops method "emit_const". */
2654
2655static void
2656aarch64_emit_const (LONGEST num)
2657{
2658 uint32_t buf[16];
2659 uint32_t *p = buf;
2660
2661 p += emit_mov_addr (p, x0, num);
2662
2663 emit_ops_insns (buf, p - buf);
2664}
2665
2666/* Implementation of emit_ops method "emit_call". */
2667
2668static void
2669aarch64_emit_call (CORE_ADDR fn)
2670{
2671 uint32_t buf[16];
2672 uint32_t *p = buf;
2673
2674 p += emit_mov_addr (p, ip0, fn);
2675 p += emit_blr (p, ip0);
2676
2677 emit_ops_insns (buf, p - buf);
2678}
2679
2680/* Implementation of emit_ops method "emit_reg". */
2681
2682static void
2683aarch64_emit_reg (int reg)
2684{
2685 uint32_t buf[16];
2686 uint32_t *p = buf;
2687
2688 /* Set x0 to unsigned char *regs. */
2689 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2690 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2691 p += emit_mov (p, x1, immediate_operand (reg));
2692
2693 emit_ops_insns (buf, p - buf);
2694
2695 aarch64_emit_call (get_raw_reg_func_addr ());
2696}
2697
2698/* Implementation of emit_ops method "emit_pop". */
2699
2700static void
2701aarch64_emit_pop (void)
2702{
2703 uint32_t buf[16];
2704 uint32_t *p = buf;
2705
2706 p += emit_pop (p, x0);
2707
2708 emit_ops_insns (buf, p - buf);
2709}
2710
2711/* Implementation of emit_ops method "emit_stack_flush". */
2712
2713static void
2714aarch64_emit_stack_flush (void)
2715{
2716 uint32_t buf[16];
2717 uint32_t *p = buf;
2718
2719 p += emit_push (p, x0);
2720
2721 emit_ops_insns (buf, p - buf);
2722}
2723
2724/* Implementation of emit_ops method "emit_zero_ext". */
2725
2726static void
2727aarch64_emit_zero_ext (int arg)
2728{
2729 uint32_t buf[16];
2730 uint32_t *p = buf;
2731
2732 p += emit_ubfx (p, x0, x0, 0, arg);
2733
2734 emit_ops_insns (buf, p - buf);
2735}
2736
2737/* Implementation of emit_ops method "emit_swap". */
2738
2739static void
2740aarch64_emit_swap (void)
2741{
2742 uint32_t buf[16];
2743 uint32_t *p = buf;
2744
2745 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2746 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2747 p += emit_mov (p, x0, register_operand (x1));
2748
2749 emit_ops_insns (buf, p - buf);
2750}
2751
2752/* Implementation of emit_ops method "emit_stack_adjust". */
2753
2754static void
2755aarch64_emit_stack_adjust (int n)
2756{
2757 /* This is not needed with our design. */
2758 uint32_t buf[16];
2759 uint32_t *p = buf;
2760
2761 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2762
2763 emit_ops_insns (buf, p - buf);
2764}
2765
2766/* Implementation of emit_ops method "emit_int_call_1". */
2767
2768static void
2769aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2770{
2771 uint32_t buf[16];
2772 uint32_t *p = buf;
2773
2774 p += emit_mov (p, x0, immediate_operand (arg1));
2775
2776 emit_ops_insns (buf, p - buf);
2777
2778 aarch64_emit_call (fn);
2779}
2780
2781/* Implementation of emit_ops method "emit_void_call_2". */
2782
2783static void
2784aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2785{
2786 uint32_t buf[16];
2787 uint32_t *p = buf;
2788
2789 /* Push x0 on the stack. */
2790 aarch64_emit_stack_flush ();
2791
2792 /* Setup arguments for the function call:
2793
2794 x0: arg1
2795 x1: top of the stack
2796
2797 MOV x1, x0
2798 MOV x0, #arg1 */
2799
2800 p += emit_mov (p, x1, register_operand (x0));
2801 p += emit_mov (p, x0, immediate_operand (arg1));
2802
2803 emit_ops_insns (buf, p - buf);
2804
2805 aarch64_emit_call (fn);
2806
2807 /* Restore x0. */
2808 aarch64_emit_pop ();
2809}
2810
2811/* Implementation of emit_ops method "emit_eq_goto". */
2812
2813static void
2814aarch64_emit_eq_goto (int *offset_p, int *size_p)
2815{
2816 uint32_t buf[16];
2817 uint32_t *p = buf;
2818
2819 p += emit_pop (p, x1);
2820 p += emit_cmp (p, x1, register_operand (x0));
2821 /* Branch over the next instruction if x0 != x1. */
2822 p += emit_bcond (p, NE, 8);
2823 /* The NOP instruction will be patched with an unconditional branch. */
2824 if (offset_p)
2825 *offset_p = (p - buf) * 4;
2826 if (size_p)
2827 *size_p = 4;
2828 p += emit_nop (p);
2829
2830 emit_ops_insns (buf, p - buf);
2831}
2832
2833/* Implementation of emit_ops method "emit_ne_goto". */
2834
2835static void
2836aarch64_emit_ne_goto (int *offset_p, int *size_p)
2837{
2838 uint32_t buf[16];
2839 uint32_t *p = buf;
2840
2841 p += emit_pop (p, x1);
2842 p += emit_cmp (p, x1, register_operand (x0));
2843 /* Branch over the next instruction if x0 == x1. */
2844 p += emit_bcond (p, EQ, 8);
2845 /* The NOP instruction will be patched with an unconditional branch. */
2846 if (offset_p)
2847 *offset_p = (p - buf) * 4;
2848 if (size_p)
2849 *size_p = 4;
2850 p += emit_nop (p);
2851
2852 emit_ops_insns (buf, p - buf);
2853}
2854
2855/* Implementation of emit_ops method "emit_lt_goto". */
2856
2857static void
2858aarch64_emit_lt_goto (int *offset_p, int *size_p)
2859{
2860 uint32_t buf[16];
2861 uint32_t *p = buf;
2862
2863 p += emit_pop (p, x1);
2864 p += emit_cmp (p, x1, register_operand (x0));
2865 /* Branch over the next instruction if x0 >= x1. */
2866 p += emit_bcond (p, GE, 8);
2867 /* The NOP instruction will be patched with an unconditional branch. */
2868 if (offset_p)
2869 *offset_p = (p - buf) * 4;
2870 if (size_p)
2871 *size_p = 4;
2872 p += emit_nop (p);
2873
2874 emit_ops_insns (buf, p - buf);
2875}
2876
2877/* Implementation of emit_ops method "emit_le_goto". */
2878
2879static void
2880aarch64_emit_le_goto (int *offset_p, int *size_p)
2881{
2882 uint32_t buf[16];
2883 uint32_t *p = buf;
2884
2885 p += emit_pop (p, x1);
2886 p += emit_cmp (p, x1, register_operand (x0));
2887 /* Branch over the next instruction if x0 > x1. */
2888 p += emit_bcond (p, GT, 8);
2889 /* The NOP instruction will be patched with an unconditional branch. */
2890 if (offset_p)
2891 *offset_p = (p - buf) * 4;
2892 if (size_p)
2893 *size_p = 4;
2894 p += emit_nop (p);
2895
2896 emit_ops_insns (buf, p - buf);
2897}
2898
2899/* Implementation of emit_ops method "emit_gt_goto". */
2900
2901static void
2902aarch64_emit_gt_goto (int *offset_p, int *size_p)
2903{
2904 uint32_t buf[16];
2905 uint32_t *p = buf;
2906
2907 p += emit_pop (p, x1);
2908 p += emit_cmp (p, x1, register_operand (x0));
2909 /* Branch over the next instruction if x0 <= x1. */
2910 p += emit_bcond (p, LE, 8);
2911 /* The NOP instruction will be patched with an unconditional branch. */
2912 if (offset_p)
2913 *offset_p = (p - buf) * 4;
2914 if (size_p)
2915 *size_p = 4;
2916 p += emit_nop (p);
2917
2918 emit_ops_insns (buf, p - buf);
2919}
2920
2921/* Implementation of emit_ops method "emit_ge_got". */
2922
2923static void
2924aarch64_emit_ge_got (int *offset_p, int *size_p)
2925{
2926 uint32_t buf[16];
2927 uint32_t *p = buf;
2928
2929 p += emit_pop (p, x1);
2930 p += emit_cmp (p, x1, register_operand (x0));
2931 /* Branch over the next instruction if x0 <= x1. */
2932 p += emit_bcond (p, LT, 8);
2933 /* The NOP instruction will be patched with an unconditional branch. */
2934 if (offset_p)
2935 *offset_p = (p - buf) * 4;
2936 if (size_p)
2937 *size_p = 4;
2938 p += emit_nop (p);
2939
2940 emit_ops_insns (buf, p - buf);
2941}
2942
2943static struct emit_ops aarch64_emit_ops_impl =
2944{
2945 aarch64_emit_prologue,
2946 aarch64_emit_epilogue,
2947 aarch64_emit_add,
2948 aarch64_emit_sub,
2949 aarch64_emit_mul,
2950 aarch64_emit_lsh,
2951 aarch64_emit_rsh_signed,
2952 aarch64_emit_rsh_unsigned,
2953 aarch64_emit_ext,
2954 aarch64_emit_log_not,
2955 aarch64_emit_bit_and,
2956 aarch64_emit_bit_or,
2957 aarch64_emit_bit_xor,
2958 aarch64_emit_bit_not,
2959 aarch64_emit_equal,
2960 aarch64_emit_less_signed,
2961 aarch64_emit_less_unsigned,
2962 aarch64_emit_ref,
2963 aarch64_emit_if_goto,
2964 aarch64_emit_goto,
2965 aarch64_write_goto_address,
2966 aarch64_emit_const,
2967 aarch64_emit_call,
2968 aarch64_emit_reg,
2969 aarch64_emit_pop,
2970 aarch64_emit_stack_flush,
2971 aarch64_emit_zero_ext,
2972 aarch64_emit_swap,
2973 aarch64_emit_stack_adjust,
2974 aarch64_emit_int_call_1,
2975 aarch64_emit_void_call_2,
2976 aarch64_emit_eq_goto,
2977 aarch64_emit_ne_goto,
2978 aarch64_emit_lt_goto,
2979 aarch64_emit_le_goto,
2980 aarch64_emit_gt_goto,
2981 aarch64_emit_ge_got,
2982};
2983
2984/* Implementation of linux_target_ops method "emit_ops". */
2985
2986static struct emit_ops *
2987aarch64_emit_ops (void)
2988{
2989 return &aarch64_emit_ops_impl;
2990}
2991
bb903df0
PL
2992/* Implementation of linux_target_ops method
2993 "get_min_fast_tracepoint_insn_len". */
2994
2995static int
2996aarch64_get_min_fast_tracepoint_insn_len (void)
2997{
2998 return 4;
2999}
3000
d1d0aea1
PL
3001/* Implementation of linux_target_ops method "supports_range_stepping". */
3002
3003static int
3004aarch64_supports_range_stepping (void)
3005{
3006 return 1;
3007}
3008
dd373349
AT
3009/* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3010
3011static const gdb_byte *
3012aarch64_sw_breakpoint_from_kind (int kind, int *size)
3013{
17b1509a
YQ
3014 if (is_64bit_tdesc ())
3015 {
3016 *size = aarch64_breakpoint_len;
3017 return aarch64_breakpoint;
3018 }
3019 else
3020 return arm_sw_breakpoint_from_kind (kind, size);
3021}
3022
3023/* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
3024
3025static int
3026aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
3027{
3028 if (is_64bit_tdesc ())
3029 return aarch64_breakpoint_len;
3030 else
3031 return arm_breakpoint_kind_from_pc (pcptr);
3032}
3033
3034/* Implementation of the linux_target_ops method
3035 "breakpoint_kind_from_current_state". */
3036
3037static int
3038aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
3039{
3040 if (is_64bit_tdesc ())
3041 return aarch64_breakpoint_len;
3042 else
3043 return arm_breakpoint_kind_from_current_state (pcptr);
dd373349
AT
3044}
3045
7d00775e
AT
3046/* Support for hardware single step. */
3047
3048static int
3049aarch64_supports_hardware_single_step (void)
3050{
3051 return 1;
3052}
3053
176eb98c
MS
3054struct linux_target_ops the_low_target =
3055{
3056 aarch64_arch_setup,
3aee8918 3057 aarch64_regs_info,
50138245
AH
3058 NULL, /* cannot_fetch_register */
3059 NULL, /* cannot_store_register */
421530db 3060 NULL, /* fetch_register */
176eb98c
MS
3061 aarch64_get_pc,
3062 aarch64_set_pc,
17b1509a 3063 aarch64_breakpoint_kind_from_pc,
dd373349 3064 aarch64_sw_breakpoint_from_kind,
fa5308bd 3065 NULL, /* get_next_pcs */
421530db 3066 0, /* decr_pc_after_break */
176eb98c 3067 aarch64_breakpoint_at,
802e8e6d 3068 aarch64_supports_z_point_type,
176eb98c
MS
3069 aarch64_insert_point,
3070 aarch64_remove_point,
3071 aarch64_stopped_by_watchpoint,
3072 aarch64_stopped_data_address,
421530db
PL
3073 NULL, /* collect_ptrace_register */
3074 NULL, /* supply_ptrace_register */
ade90bde 3075 aarch64_linux_siginfo_fixup,
176eb98c 3076 aarch64_linux_new_process,
04ec7890 3077 aarch64_linux_delete_process,
176eb98c 3078 aarch64_linux_new_thread,
466eecee 3079 aarch64_linux_delete_thread,
3a8a0396 3080 aarch64_linux_new_fork,
176eb98c 3081 aarch64_linux_prepare_to_resume,
421530db 3082 NULL, /* process_qsupported */
7671bf47 3083 aarch64_supports_tracepoints,
bb903df0
PL
3084 aarch64_get_thread_area,
3085 aarch64_install_fast_tracepoint_jump_pad,
afbe19f8 3086 aarch64_emit_ops,
bb903df0 3087 aarch64_get_min_fast_tracepoint_insn_len,
d1d0aea1 3088 aarch64_supports_range_stepping,
17b1509a 3089 aarch64_breakpoint_kind_from_current_state,
7d00775e 3090 aarch64_supports_hardware_single_step,
061fc021 3091 aarch64_get_syscall_trapinfo,
176eb98c 3092};
3aee8918
PA
3093
3094void
3095initialize_low_arch (void)
3096{
3b53ae99
YQ
3097 initialize_low_arch_aarch32 ();
3098
3aee8918 3099 initialize_regsets_info (&aarch64_regsets_info);
02895270 3100 initialize_regsets_info (&aarch64_sve_regsets_info);
3aee8918 3101}