]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdbserver/linux-aarch64-low.cc
9c2a8349e866add9a210c850a5a4094a14174fbe
[thirdparty/binutils-gdb.git] / gdbserver / linux-aarch64-low.cc
1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
4 Copyright (C) 2009-2020 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23 #include "linux-low.h"
24 #include "nat/aarch64-cap-linux.h" /* For Morello */
25 #include "nat/aarch64-linux.h"
26 #include "nat/aarch64-linux-hw-point.h"
27 #include "arch/aarch64-cap-linux.h"
28 #include "arch/aarch64-insn.h"
29 #include "linux-aarch32-low.h"
30 #include "elf/common.h"
31 #include "ax.h"
32 #include "tracepoint.h"
33 #include "debug.h"
34
35 #include <signal.h>
36 #include <sys/user.h>
37 #include "nat/gdb_ptrace.h"
38 #include <asm/ptrace.h>
39 #include <inttypes.h>
40 #include <endian.h>
41 #include <sys/uio.h>
42
43 #include "gdb_proc_service.h"
44 #include "arch/aarch64.h"
45 #include "linux-aarch32-tdesc.h"
46 #include "linux-aarch64-tdesc.h"
47 #include "nat/aarch64-sve-linux-ptrace.h"
48 #include "tdesc.h"
49
50 #ifdef HAVE_SYS_REG_H
51 #include <sys/reg.h>
52 #endif
53
54 /* Linux target op definitions for the AArch64 architecture. */
55
56 class aarch64_target : public linux_process_target
57 {
58 public:
59
60 const regs_info *get_regs_info () override;
61
62 int breakpoint_kind_from_pc (CORE_ADDR *pcptr) override;
63
64 int breakpoint_kind_from_current_state (CORE_ADDR *pcptr) override;
65
66 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
67
68 bool supports_z_point_type (char z_type) override;
69
70 bool supports_tracepoints () override;
71
72 bool supports_fast_tracepoints () override;
73
74 int install_fast_tracepoint_jump_pad
75 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
76 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
77 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
78 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
79 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
80 char *err) override;
81
82 int get_min_fast_tracepoint_insn_len () override;
83
84 struct emit_ops *emit_ops () override;
85
86 bool supports_qxfer_capability () override;
87
88 int qxfer_capability (const CORE_ADDR address, unsigned char *readbuf,
89 unsigned const char *writebuf,
90 CORE_ADDR offset, int len) override;
91
92 protected:
93
94 void low_arch_setup () override;
95
96 bool low_cannot_fetch_register (int regno) override;
97
98 bool low_cannot_store_register (int regno) override;
99
100 bool low_supports_breakpoints () override;
101
102 CORE_ADDR low_get_pc (regcache *regcache) override;
103
104 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
105
106 bool low_breakpoint_at (CORE_ADDR pc) override;
107
108 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
109 int size, raw_breakpoint *bp) override;
110
111 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
112 int size, raw_breakpoint *bp) override;
113
114 bool low_stopped_by_watchpoint () override;
115
116 CORE_ADDR low_stopped_data_address () override;
117
118 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
119 int direction) override;
120
121 arch_process_info *low_new_process () override;
122
123 void low_delete_process (arch_process_info *info) override;
124
125 void low_new_thread (lwp_info *) override;
126
127 void low_delete_thread (arch_lwp_info *) override;
128
129 void low_new_fork (process_info *parent, process_info *child) override;
130
131 void low_prepare_to_resume (lwp_info *lwp) override;
132
133 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
134
135 bool low_supports_range_stepping () override;
136
137 bool low_supports_catch_syscall () override;
138
139 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
140
141 const struct link_map_offsets *low_fetch_linkmap_offsets (int is_elf64) override;
142 };
143
144 /* The singleton target ops object. */
145
146 static aarch64_target the_aarch64_target;
147
148 bool
149 aarch64_target::low_cannot_fetch_register (int regno)
150 {
151 gdb_assert_not_reached ("linux target op low_cannot_fetch_register "
152 "is not implemented by the target");
153 }
154
155 bool
156 aarch64_target::low_cannot_store_register (int regno)
157 {
158 gdb_assert_not_reached ("linux target op low_cannot_store_register "
159 "is not implemented by the target");
160 }
161
162 void
163 aarch64_target::low_prepare_to_resume (lwp_info *lwp)
164 {
165 aarch64_linux_prepare_to_resume (lwp);
166 }
167
168 /* Per-process arch-specific data we want to keep. */
169
170 struct arch_process_info
171 {
172 /* Hardware breakpoint/watchpoint data.
173 The reason for them to be per-process rather than per-thread is
174 due to the lack of information in the gdbserver environment;
175 gdbserver is not told that whether a requested hardware
176 breakpoint/watchpoint is thread specific or not, so it has to set
177 each hw bp/wp for every thread in the current process. The
178 higher level bp/wp management in gdb will resume a thread if a hw
179 bp/wp trap is not expected for it. Since the hw bp/wp setting is
180 same for each thread, it is reasonable for the data to live here.
181 */
182 struct aarch64_debug_reg_state debug_reg_state;
183 };
184
185 /* Return true if the size of register 0 is 8 byte. */
186
187 static int
188 is_64bit_tdesc (void)
189 {
190 struct regcache *regcache = get_thread_regcache (current_thread, 0);
191
192 return register_size (regcache->tdesc, 0) == 8;
193 }
194
195 /* Return true if the regcache contains the number of SVE registers. */
196
197 static bool
198 is_sve_tdesc (void)
199 {
200 struct regcache *regcache = get_thread_regcache (current_thread, 0);
201
202 return tdesc_contains_feature (regcache->tdesc, "org.gnu.gdb.aarch64.sve");
203 }
204
205 static bool gpr_changed = false;
206
207 static bool
208 gpr_set_changed (struct regcache *regcache, void *buf)
209 {
210 size_t gpr_size = (AARCH64_X_REGS_NUM + 2) * 8 + 4;
211 bool changed
212 = memcmp (regcache->registers, buf, gpr_size) != 0;
213
214 return changed;
215 }
216
217 static void
218 aarch64_fill_gregset (struct regcache *regcache, void *buf)
219 {
220 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
221 int i;
222
223 /* Right now, regcache contains the updated contents of the registers.
224 Check if anything has changed in the GPR's. If nothing has changed,
225 don't update anything.
226
227 Otherwise, update the contents. */
228
229 gpr_changed = gpr_set_changed (regcache, buf);
230
231 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
232 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
233 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
234 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
235 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
236 }
237
238 static void
239 aarch64_store_gregset (struct regcache *regcache, const void *buf)
240 {
241 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
242 int i;
243
244 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
245 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
246 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
247 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
248 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
249 }
250
251 static void
252 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
253 {
254 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
255 int i;
256
257 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
258 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
259 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
260 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
261 }
262
263 static void
264 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
265 {
266 const struct user_fpsimd_state *regset
267 = (const struct user_fpsimd_state *) buf;
268 int i;
269
270 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
271 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
272 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
273 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
274 }
275
276 /* Store the pauth registers to regcache. */
277
278 static void
279 aarch64_store_pauthregset (struct regcache *regcache, const void *buf)
280 {
281 uint64_t *pauth_regset = (uint64_t *) buf;
282 int pauth_base = find_regno (regcache->tdesc, "pauth_dmask");
283
284 if (pauth_base == 0)
285 return;
286
287 supply_register (regcache, AARCH64_PAUTH_DMASK_REGNUM (pauth_base),
288 &pauth_regset[0]);
289 supply_register (regcache, AARCH64_PAUTH_CMASK_REGNUM (pauth_base),
290 &pauth_regset[1]);
291 }
292
293 /* Capability registers fill hook implementation. */
294
295 static void
296 aarch64_fill_cregset (struct regcache *regcache, void *buf)
297 {
298 /* If the GPR's have changed, don't attempt to change the C registers. */
299 if (gpr_changed)
300 {
301 /* Reset the flag. */
302 gpr_changed = false;
303 return;
304 }
305
306 struct user_morello_state *cregset
307 = (struct user_morello_state *) buf;
308
309 int cregs_base = find_regno (regcache->tdesc, "c0");
310
311 /* Store the C registers to the buffer. */
312 int i, regno;
313 for (regno = cregs_base, i = 0;
314 regno < cregs_base + AARCH64_C_REGS_NUM;
315 regno++, i++)
316 collect_register (regcache, regno, &cregset->cregs[i]);
317
318 /* Store the other registers to the buffer. */
319 collect_register (regcache, regno++, &cregset->csp);
320 collect_register (regcache, regno++, &cregset->pcc);
321 collect_register (regcache, regno++, &cregset->ddc);
322 collect_register (regcache, regno++, &cregset->ctpidr);
323 collect_register (regcache, regno++, &cregset->rcsp);
324 collect_register (regcache, regno++, &cregset->rddc);
325 collect_register (regcache, regno++, &cregset->rctpidr);
326 collect_register (regcache, regno++, &cregset->cid);
327 collect_register (regcache, regno++, &cregset->tag_map);
328 collect_register (regcache, regno++, &cregset->cctlr);
329 }
330
331 /* Capability registers store hook implementation. */
332
333 static void
334 aarch64_store_cregset (struct regcache *regcache, const void *buf)
335 {
336 const struct user_morello_state *cregset
337 = (const struct user_morello_state *) buf;
338
339 int cregs_base = find_regno (regcache->tdesc, "c0");
340
341 /* Fetch the C registers. */
342 int i, regno;
343 for (regno = cregs_base, i = 0;
344 regno < cregs_base + AARCH64_C_REGS_NUM;
345 regno++, i++)
346 supply_register (regcache, regno, &cregset->cregs[i]);
347
348 /* Fetch the other registers. */
349 supply_register (regcache, regno++, &cregset->csp);
350 supply_register (regcache, regno++, &cregset->pcc);
351 supply_register (regcache, regno++, &cregset->ddc);
352 supply_register (regcache, regno++, &cregset->ctpidr);
353 supply_register (regcache, regno++, &cregset->rcsp);
354 supply_register (regcache, regno++, &cregset->rddc);
355 supply_register (regcache, regno++, &cregset->rctpidr);
356 supply_register (regcache, regno++, &cregset->cid);
357 supply_register (regcache, regno++, &cregset->tag_map);
358 supply_register (regcache, regno++, &cregset->cctlr);
359 }
360
361 bool
362 aarch64_target::low_supports_breakpoints ()
363 {
364 return true;
365 }
366
367 /* Implementation of linux_target_ops method "get_pc". */
368
369 CORE_ADDR
370 aarch64_target::low_get_pc (regcache *regcache)
371 {
372 if (register_size (regcache->tdesc, 0) == 8)
373 return linux_get_pc_64bit (regcache);
374 else
375 return linux_get_pc_32bit (regcache);
376 }
377
378 /* Implementation of linux target ops method "low_set_pc". */
379
380 void
381 aarch64_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
382 {
383 if (register_size (regcache->tdesc, 0) == 8)
384 linux_set_pc_64bit (regcache, pc);
385 else
386 linux_set_pc_32bit (regcache, pc);
387 }
388
389 #define aarch64_breakpoint_len 4
390
391 /* AArch64 BRK software debug mode instruction.
392 This instruction needs to match gdb/aarch64-tdep.c
393 (aarch64_default_breakpoint). */
394 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
395
396 /* Implementation of linux target ops method "low_breakpoint_at". */
397
398 bool
399 aarch64_target::low_breakpoint_at (CORE_ADDR where)
400 {
401 if (is_64bit_tdesc ())
402 {
403 gdb_byte insn[aarch64_breakpoint_len];
404
405 read_memory (where, (unsigned char *) &insn, aarch64_breakpoint_len);
406 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
407 return true;
408
409 return false;
410 }
411 else
412 return arm_breakpoint_at (where);
413 }
414
415 static void
416 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
417 {
418 int i;
419
420 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
421 {
422 state->dr_addr_bp[i] = 0;
423 state->dr_ctrl_bp[i] = 0;
424 state->dr_ref_count_bp[i] = 0;
425 }
426
427 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
428 {
429 state->dr_addr_wp[i] = 0;
430 state->dr_ctrl_wp[i] = 0;
431 state->dr_ref_count_wp[i] = 0;
432 }
433 }
434
435 /* Return the pointer to the debug register state structure in the
436 current process' arch-specific data area. */
437
438 struct aarch64_debug_reg_state *
439 aarch64_get_debug_reg_state (pid_t pid)
440 {
441 struct process_info *proc = find_process_pid (pid);
442
443 return &proc->priv->arch_private->debug_reg_state;
444 }
445
446 /* Implementation of target ops method "supports_z_point_type". */
447
448 bool
449 aarch64_target::supports_z_point_type (char z_type)
450 {
451 switch (z_type)
452 {
453 case Z_PACKET_SW_BP:
454 case Z_PACKET_HW_BP:
455 case Z_PACKET_WRITE_WP:
456 case Z_PACKET_READ_WP:
457 case Z_PACKET_ACCESS_WP:
458 return true;
459 default:
460 return false;
461 }
462 }
463
464 /* Implementation of linux target ops method "low_insert_point".
465
466 It actually only records the info of the to-be-inserted bp/wp;
467 the actual insertion will happen when threads are resumed. */
468
469 int
470 aarch64_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
471 int len, raw_breakpoint *bp)
472 {
473 int ret;
474 enum target_hw_bp_type targ_type;
475 struct aarch64_debug_reg_state *state
476 = aarch64_get_debug_reg_state (pid_of (current_thread));
477
478 if (show_debug_regs)
479 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
480 (unsigned long) addr, len);
481
482 /* Determine the type from the raw breakpoint type. */
483 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
484
485 if (targ_type != hw_execute)
486 {
487 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
488 ret = aarch64_handle_watchpoint (targ_type, addr, len,
489 1 /* is_insert */, state);
490 else
491 ret = -1;
492 }
493 else
494 {
495 if (len == 3)
496 {
497 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
498 instruction. Set it to 2 to correctly encode length bit
499 mask in hardware/watchpoint control register. */
500 len = 2;
501 }
502 ret = aarch64_handle_breakpoint (targ_type, addr, len,
503 1 /* is_insert */, state);
504 }
505
506 if (show_debug_regs)
507 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
508 targ_type);
509
510 return ret;
511 }
512
513 /* Implementation of linux target ops method "low_remove_point".
514
515 It actually only records the info of the to-be-removed bp/wp,
516 the actual removal will be done when threads are resumed. */
517
518 int
519 aarch64_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
520 int len, raw_breakpoint *bp)
521 {
522 int ret;
523 enum target_hw_bp_type targ_type;
524 struct aarch64_debug_reg_state *state
525 = aarch64_get_debug_reg_state (pid_of (current_thread));
526
527 if (show_debug_regs)
528 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
529 (unsigned long) addr, len);
530
531 /* Determine the type from the raw breakpoint type. */
532 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
533
534 /* Set up state pointers. */
535 if (targ_type != hw_execute)
536 ret =
537 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
538 state);
539 else
540 {
541 if (len == 3)
542 {
543 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
544 instruction. Set it to 2 to correctly encode length bit
545 mask in hardware/watchpoint control register. */
546 len = 2;
547 }
548 ret = aarch64_handle_breakpoint (targ_type, addr, len,
549 0 /* is_insert */, state);
550 }
551
552 if (show_debug_regs)
553 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
554 targ_type);
555
556 return ret;
557 }
558
559 /* Implementation of linux target ops method "low_stopped_data_address". */
560
561 CORE_ADDR
562 aarch64_target::low_stopped_data_address ()
563 {
564 siginfo_t siginfo;
565 int pid, i;
566 struct aarch64_debug_reg_state *state;
567
568 pid = lwpid_of (current_thread);
569
570 /* Get the siginfo. */
571 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
572 return (CORE_ADDR) 0;
573
574 /* Need to be a hardware breakpoint/watchpoint trap. */
575 if (siginfo.si_signo != SIGTRAP
576 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
577 return (CORE_ADDR) 0;
578
579 /* Check if the address matches any watched address. */
580 state = aarch64_get_debug_reg_state (pid_of (current_thread));
581 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
582 {
583 const unsigned int offset
584 = aarch64_watchpoint_offset (state->dr_ctrl_wp[i]);
585 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
586 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
587 const CORE_ADDR addr_watch = state->dr_addr_wp[i] + offset;
588 const CORE_ADDR addr_watch_aligned = align_down (state->dr_addr_wp[i], 8);
589 const CORE_ADDR addr_orig = state->dr_addr_orig_wp[i];
590
591 if (state->dr_ref_count_wp[i]
592 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
593 && addr_trap >= addr_watch_aligned
594 && addr_trap < addr_watch + len)
595 {
596 /* ADDR_TRAP reports the first address of the memory range
597 accessed by the CPU, regardless of what was the memory
598 range watched. Thus, a large CPU access that straddles
599 the ADDR_WATCH..ADDR_WATCH+LEN range may result in an
600 ADDR_TRAP that is lower than the
601 ADDR_WATCH..ADDR_WATCH+LEN range. E.g.:
602
603 addr: | 4 | 5 | 6 | 7 | 8 |
604 |---- range watched ----|
605 |----------- range accessed ------------|
606
607 In this case, ADDR_TRAP will be 4.
608
609 To match a watchpoint known to GDB core, we must never
610 report *ADDR_P outside of any ADDR_WATCH..ADDR_WATCH+LEN
611 range. ADDR_WATCH <= ADDR_TRAP < ADDR_ORIG is a false
612 positive on kernels older than 4.10. See PR
613 external/20207. */
614 return addr_orig;
615 }
616 }
617
618 return (CORE_ADDR) 0;
619 }
620
621 /* Implementation of linux target ops method "low_stopped_by_watchpoint". */
622
623 bool
624 aarch64_target::low_stopped_by_watchpoint ()
625 {
626 return (low_stopped_data_address () != 0);
627 }
628
629 /* Fetch the thread-local storage pointer for libthread_db. */
630
631 ps_err_e
632 ps_get_thread_area (struct ps_prochandle *ph,
633 lwpid_t lwpid, int idx, void **base)
634 {
635 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
636 is_64bit_tdesc ());
637 }
638
639 /* Implementation of linux target ops method "low_siginfo_fixup". */
640
641 bool
642 aarch64_target::low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
643 int direction)
644 {
645 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
646 if (!is_64bit_tdesc ())
647 {
648 if (direction == 0)
649 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
650 native);
651 else
652 aarch64_siginfo_from_compat_siginfo (native,
653 (struct compat_siginfo *) inf);
654
655 return true;
656 }
657
658 return false;
659 }
660
661 /* Implementation of linux target ops method "low_new_process". */
662
663 arch_process_info *
664 aarch64_target::low_new_process ()
665 {
666 struct arch_process_info *info = XCNEW (struct arch_process_info);
667
668 aarch64_init_debug_reg_state (&info->debug_reg_state);
669
670 return info;
671 }
672
673 /* Implementation of linux target ops method "low_delete_process". */
674
675 void
676 aarch64_target::low_delete_process (arch_process_info *info)
677 {
678 xfree (info);
679 }
680
681 void
682 aarch64_target::low_new_thread (lwp_info *lwp)
683 {
684 aarch64_linux_new_thread (lwp);
685 }
686
687 void
688 aarch64_target::low_delete_thread (arch_lwp_info *arch_lwp)
689 {
690 aarch64_linux_delete_thread (arch_lwp);
691 }
692
693 /* Implementation of linux target ops method "low_new_fork". */
694
695 void
696 aarch64_target::low_new_fork (process_info *parent,
697 process_info *child)
698 {
699 /* These are allocated by linux_add_process. */
700 gdb_assert (parent->priv != NULL
701 && parent->priv->arch_private != NULL);
702 gdb_assert (child->priv != NULL
703 && child->priv->arch_private != NULL);
704
705 /* Linux kernel before 2.6.33 commit
706 72f674d203cd230426437cdcf7dd6f681dad8b0d
707 will inherit hardware debug registers from parent
708 on fork/vfork/clone. Newer Linux kernels create such tasks with
709 zeroed debug registers.
710
711 GDB core assumes the child inherits the watchpoints/hw
712 breakpoints of the parent, and will remove them all from the
713 forked off process. Copy the debug registers mirrors into the
714 new process so that all breakpoints and watchpoints can be
715 removed together. The debug registers mirror will become zeroed
716 in the end before detaching the forked off process, thus making
717 this compatible with older Linux kernels too. */
718
719 *child->priv->arch_private = *parent->priv->arch_private;
720 }
721
722 /* Wrapper for aarch64_sve_regs_copy_to_reg_buf. */
723
724 static void
725 aarch64_sve_regs_copy_to_regcache (struct regcache *regcache, const void *buf)
726 {
727 return aarch64_sve_regs_copy_to_reg_buf (regcache, buf);
728 }
729
730 /* Wrapper for aarch64_sve_regs_copy_from_reg_buf. */
731
732 static void
733 aarch64_sve_regs_copy_from_regcache (struct regcache *regcache, void *buf)
734 {
735 return aarch64_sve_regs_copy_from_reg_buf (regcache, buf);
736 }
737
738 static struct regset_info aarch64_regsets[] =
739 {
740 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
741 sizeof (struct user_pt_regs), GENERAL_REGS,
742 aarch64_fill_gregset, aarch64_store_gregset },
743 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
744 sizeof (struct user_fpsimd_state), FP_REGS,
745 aarch64_fill_fpregset, aarch64_store_fpregset
746 },
747 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
748 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
749 NULL, aarch64_store_pauthregset },
750 /* FIXME-Morello: Fixup the register set size. */
751 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_MORELLO,
752 AARCH64_LINUX_CREGS_SIZE, OPTIONAL_REGS,
753 aarch64_fill_cregset, aarch64_store_cregset, nullptr,
754 "cheri.ptrace_forge_cap", "capability"
755 },
756 NULL_REGSET
757 };
758
759 static struct regsets_info aarch64_regsets_info =
760 {
761 aarch64_regsets, /* regsets */
762 0, /* num_regsets */
763 NULL, /* disabled_regsets */
764 };
765
766 static struct regs_info regs_info_aarch64 =
767 {
768 NULL, /* regset_bitmap */
769 NULL, /* usrregs */
770 &aarch64_regsets_info,
771 };
772
773 static struct regset_info aarch64_sve_regsets[] =
774 {
775 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
776 sizeof (struct user_pt_regs), GENERAL_REGS,
777 aarch64_fill_gregset, aarch64_store_gregset },
778 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_SVE,
779 SVE_PT_SIZE (AARCH64_MAX_SVE_VQ, SVE_PT_REGS_SVE), EXTENDED_REGS,
780 aarch64_sve_regs_copy_from_regcache, aarch64_sve_regs_copy_to_regcache
781 },
782 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_PAC_MASK,
783 AARCH64_PAUTH_REGS_SIZE, OPTIONAL_REGS,
784 NULL, aarch64_store_pauthregset },
785 /* FIXME-Morello: Fixup the register set size. */
786 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_ARM_MORELLO,
787 AARCH64_LINUX_CREGS_SIZE, OPTIONAL_REGS,
788 aarch64_fill_cregset, aarch64_store_cregset, nullptr,
789 "cheri.ptrace_forge_cap", "capability"
790 },
791 NULL_REGSET
792 };
793
794 static struct regsets_info aarch64_sve_regsets_info =
795 {
796 aarch64_sve_regsets, /* regsets. */
797 0, /* num_regsets. */
798 NULL, /* disabled_regsets. */
799 };
800
801 static struct regs_info regs_info_aarch64_sve =
802 {
803 NULL, /* regset_bitmap. */
804 NULL, /* usrregs. */
805 &aarch64_sve_regsets_info,
806 };
807
808 /* Matches HWCAP_PACA in kernel header arch/arm64/include/uapi/asm/hwcap.h. */
809 #define AARCH64_HWCAP_PACA (1 << 30)
810
811 /* Implementation of linux target ops method "low_arch_setup". */
812
813 void
814 aarch64_target::low_arch_setup ()
815 {
816 unsigned int machine;
817 int is_elf64;
818 int tid;
819
820 tid = lwpid_of (current_thread);
821
822 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
823
824 if (is_elf64)
825 {
826 uint64_t vq = aarch64_sve_get_vq (tid);
827 unsigned long hwcap = linux_get_hwcap (8);
828 unsigned long hwcap2 = linux_get_hwcap2 (8);
829 bool pauth_p = hwcap & AARCH64_HWCAP_PACA;
830 bool capability_p = hwcap2 & HWCAP2_MORELLO;
831
832 current_process ()->tdesc = aarch64_linux_read_description (vq, pauth_p,
833 capability_p);
834
835 /* Re-enable warnings for register sets with sysctl settings. */
836 aarch64_regsets[4].sysctl_write_should_warn = true;
837 aarch64_sve_regsets[4].sysctl_write_should_warn = true;
838 }
839 else
840 current_process ()->tdesc = aarch32_linux_read_description ();
841
842 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
843 }
844
845
846
847 /* Implementation of linux target ops method "get_regs_info". */
848
849 const regs_info *
850 aarch64_target::get_regs_info ()
851 {
852 if (!is_64bit_tdesc ())
853 return &regs_info_aarch32;
854
855 if (is_sve_tdesc ())
856 return &regs_info_aarch64_sve;
857
858 return &regs_info_aarch64;
859 }
860
861 /* Implementation of target ops method "supports_tracepoints". */
862
863 bool
864 aarch64_target::supports_tracepoints ()
865 {
866 if (current_thread == NULL)
867 return true;
868 else
869 {
870 /* We don't support tracepoints on aarch32 now. */
871 return is_64bit_tdesc ();
872 }
873 }
874
875 /* Implementation of linux target ops method "low_get_thread_area". */
876
877 int
878 aarch64_target::low_get_thread_area (int lwpid, CORE_ADDR *addrp)
879 {
880 struct iovec iovec;
881 uint64_t reg;
882
883 iovec.iov_base = &reg;
884 iovec.iov_len = sizeof (reg);
885
886 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
887 return -1;
888
889 *addrp = reg;
890
891 return 0;
892 }
893
894 bool
895 aarch64_target::low_supports_catch_syscall ()
896 {
897 return true;
898 }
899
900 /* Implementation of linux target ops method "low_get_syscall_trapinfo". */
901
902 void
903 aarch64_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
904 {
905 int use_64bit = register_size (regcache->tdesc, 0) == 8;
906
907 if (use_64bit)
908 {
909 long l_sysno;
910
911 collect_register_by_name (regcache, "x8", &l_sysno);
912 *sysno = (int) l_sysno;
913 }
914 else
915 collect_register_by_name (regcache, "r7", sysno);
916 }
917
918 static const struct link_map_offsets lmo_64bit_morello_offsets =
919 {
920 0, /* r_version offset. */
921 16, /* r_debug.r_map offset. */
922 0, /* l_addr offset in link_map. */
923 16, /* l_name offset in link_map. */
924 32, /* l_ld offset in link_map. */
925 48, /* l_next offset in link_map. */
926 64 /* l_prev offset in link_map. */
927 };
928
929 const struct link_map_offsets *
930 aarch64_target::low_fetch_linkmap_offsets (int is_elf64)
931 {
932 if (is_elf64)
933 {
934 CORE_ADDR entry_addr = linux_get_at_entry (8);
935
936 /* If the LSB of AT_ENTRY is 1, then we have a pure capability Morello
937 ELF. */
938 if (entry_addr & 1)
939 return &lmo_64bit_morello_offsets;
940 }
941
942 return linux_process_target::low_fetch_linkmap_offsets (is_elf64);
943 }
944
945 /* List of condition codes that we need. */
946
947 enum aarch64_condition_codes
948 {
949 EQ = 0x0,
950 NE = 0x1,
951 LO = 0x3,
952 GE = 0xa,
953 LT = 0xb,
954 GT = 0xc,
955 LE = 0xd,
956 };
957
958 enum aarch64_operand_type
959 {
960 OPERAND_IMMEDIATE,
961 OPERAND_REGISTER,
962 };
963
964 /* Representation of an operand. At this time, it only supports register
965 and immediate types. */
966
967 struct aarch64_operand
968 {
969 /* Type of the operand. */
970 enum aarch64_operand_type type;
971
972 /* Value of the operand according to the type. */
973 union
974 {
975 uint32_t imm;
976 struct aarch64_register reg;
977 };
978 };
979
980 /* List of registers that we are currently using, we can add more here as
981 we need to use them. */
982
983 /* General purpose scratch registers (64 bit). */
984 static const struct aarch64_register x0 = { 0, 1 };
985 static const struct aarch64_register x1 = { 1, 1 };
986 static const struct aarch64_register x2 = { 2, 1 };
987 static const struct aarch64_register x3 = { 3, 1 };
988 static const struct aarch64_register x4 = { 4, 1 };
989
990 /* General purpose scratch registers (32 bit). */
991 static const struct aarch64_register w0 = { 0, 0 };
992 static const struct aarch64_register w2 = { 2, 0 };
993
994 /* Intra-procedure scratch registers. */
995 static const struct aarch64_register ip0 = { 16, 1 };
996
997 /* Special purpose registers. */
998 static const struct aarch64_register fp = { 29, 1 };
999 static const struct aarch64_register lr = { 30, 1 };
1000 static const struct aarch64_register sp = { 31, 1 };
1001 static const struct aarch64_register xzr = { 31, 1 };
1002
1003 /* Dynamically allocate a new register. If we know the register
1004 statically, we should make it a global as above instead of using this
1005 helper function. */
1006
1007 static struct aarch64_register
1008 aarch64_register (unsigned num, int is64)
1009 {
1010 return (struct aarch64_register) { num, is64 };
1011 }
1012
1013 /* Helper function to create a register operand, for instructions with
1014 different types of operands.
1015
1016 For example:
1017 p += emit_mov (p, x0, register_operand (x1)); */
1018
1019 static struct aarch64_operand
1020 register_operand (struct aarch64_register reg)
1021 {
1022 struct aarch64_operand operand;
1023
1024 operand.type = OPERAND_REGISTER;
1025 operand.reg = reg;
1026
1027 return operand;
1028 }
1029
1030 /* Helper function to create an immediate operand, for instructions with
1031 different types of operands.
1032
1033 For example:
1034 p += emit_mov (p, x0, immediate_operand (12)); */
1035
1036 static struct aarch64_operand
1037 immediate_operand (uint32_t imm)
1038 {
1039 struct aarch64_operand operand;
1040
1041 operand.type = OPERAND_IMMEDIATE;
1042 operand.imm = imm;
1043
1044 return operand;
1045 }
1046
1047 /* Helper function to create an offset memory operand.
1048
1049 For example:
1050 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
1051
1052 static struct aarch64_memory_operand
1053 offset_memory_operand (int32_t offset)
1054 {
1055 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
1056 }
1057
1058 /* Helper function to create a pre-index memory operand.
1059
1060 For example:
1061 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
1062
1063 static struct aarch64_memory_operand
1064 preindex_memory_operand (int32_t index)
1065 {
1066 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
1067 }
1068
1069 /* Helper function to create a post-index memory operand.
1070
1071 For example:
1072 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
1073
1074 static struct aarch64_memory_operand
1075 postindex_memory_operand (int32_t index)
1076 {
1077 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
1078 }
1079
1080 /* System control registers. These special registers can be written and
1081 read with the MRS and MSR instructions.
1082
1083 - NZCV: Condition flags. GDB refers to this register under the CPSR
1084 name.
1085 - FPSR: Floating-point status register.
1086 - FPCR: Floating-point control registers.
1087 - TPIDR_EL0: Software thread ID register. */
1088
1089 enum aarch64_system_control_registers
1090 {
1091 /* op0 op1 crn crm op2 */
1092 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
1093 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
1094 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
1095 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
1096 };
1097
1098 /* Write a BLR instruction into *BUF.
1099
1100 BLR rn
1101
1102 RN is the register to branch to. */
1103
1104 static int
1105 emit_blr (uint32_t *buf, struct aarch64_register rn)
1106 {
1107 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
1108 }
1109
1110 /* Write a RET instruction into *BUF.
1111
1112 RET xn
1113
1114 RN is the register to branch to. */
1115
1116 static int
1117 emit_ret (uint32_t *buf, struct aarch64_register rn)
1118 {
1119 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
1120 }
1121
1122 static int
1123 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
1124 struct aarch64_register rt,
1125 struct aarch64_register rt2,
1126 struct aarch64_register rn,
1127 struct aarch64_memory_operand operand)
1128 {
1129 uint32_t opc;
1130 uint32_t pre_index;
1131 uint32_t write_back;
1132
1133 if (rt.is64)
1134 opc = ENCODE (2, 2, 30);
1135 else
1136 opc = ENCODE (0, 2, 30);
1137
1138 switch (operand.type)
1139 {
1140 case MEMORY_OPERAND_OFFSET:
1141 {
1142 pre_index = ENCODE (1, 1, 24);
1143 write_back = ENCODE (0, 1, 23);
1144 break;
1145 }
1146 case MEMORY_OPERAND_POSTINDEX:
1147 {
1148 pre_index = ENCODE (0, 1, 24);
1149 write_back = ENCODE (1, 1, 23);
1150 break;
1151 }
1152 case MEMORY_OPERAND_PREINDEX:
1153 {
1154 pre_index = ENCODE (1, 1, 24);
1155 write_back = ENCODE (1, 1, 23);
1156 break;
1157 }
1158 default:
1159 return 0;
1160 }
1161
1162 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
1163 | ENCODE (operand.index >> 3, 7, 15)
1164 | ENCODE (rt2.num, 5, 10)
1165 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1166 }
1167
1168 /* Write a STP instruction into *BUF.
1169
1170 STP rt, rt2, [rn, #offset]
1171 STP rt, rt2, [rn, #index]!
1172 STP rt, rt2, [rn], #index
1173
1174 RT and RT2 are the registers to store.
1175 RN is the base address register.
1176 OFFSET is the immediate to add to the base address. It is limited to a
1177 -512 .. 504 range (7 bits << 3). */
1178
1179 static int
1180 emit_stp (uint32_t *buf, struct aarch64_register rt,
1181 struct aarch64_register rt2, struct aarch64_register rn,
1182 struct aarch64_memory_operand operand)
1183 {
1184 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
1185 }
1186
1187 /* Write a LDP instruction into *BUF.
1188
1189 LDP rt, rt2, [rn, #offset]
1190 LDP rt, rt2, [rn, #index]!
1191 LDP rt, rt2, [rn], #index
1192
1193 RT and RT2 are the registers to store.
1194 RN is the base address register.
1195 OFFSET is the immediate to add to the base address. It is limited to a
1196 -512 .. 504 range (7 bits << 3). */
1197
1198 static int
1199 emit_ldp (uint32_t *buf, struct aarch64_register rt,
1200 struct aarch64_register rt2, struct aarch64_register rn,
1201 struct aarch64_memory_operand operand)
1202 {
1203 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
1204 }
1205
1206 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
1207
1208 LDP qt, qt2, [rn, #offset]
1209
1210 RT and RT2 are the Q registers to store.
1211 RN is the base address register.
1212 OFFSET is the immediate to add to the base address. It is limited to
1213 -1024 .. 1008 range (7 bits << 4). */
1214
1215 static int
1216 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1217 struct aarch64_register rn, int32_t offset)
1218 {
1219 uint32_t opc = ENCODE (2, 2, 30);
1220 uint32_t pre_index = ENCODE (1, 1, 24);
1221
1222 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
1223 | ENCODE (offset >> 4, 7, 15)
1224 | ENCODE (rt2, 5, 10)
1225 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
1226 }
1227
1228 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
1229
1230 STP qt, qt2, [rn, #offset]
1231
1232 RT and RT2 are the Q registers to store.
1233 RN is the base address register.
1234 OFFSET is the immediate to add to the base address. It is limited to
1235 -1024 .. 1008 range (7 bits << 4). */
1236
1237 static int
1238 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
1239 struct aarch64_register rn, int32_t offset)
1240 {
1241 uint32_t opc = ENCODE (2, 2, 30);
1242 uint32_t pre_index = ENCODE (1, 1, 24);
1243
1244 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
1245 | ENCODE (offset >> 4, 7, 15)
1246 | ENCODE (rt2, 5, 10)
1247 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
1248 }
1249
1250 /* Write a LDRH instruction into *BUF.
1251
1252 LDRH wt, [xn, #offset]
1253 LDRH wt, [xn, #index]!
1254 LDRH wt, [xn], #index
1255
1256 RT is the register to store.
1257 RN is the base address register.
1258 OFFSET is the immediate to add to the base address. It is limited to
1259 0 .. 32760 range (12 bits << 3). */
1260
1261 static int
1262 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
1263 struct aarch64_register rn,
1264 struct aarch64_memory_operand operand)
1265 {
1266 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
1267 }
1268
1269 /* Write a LDRB instruction into *BUF.
1270
1271 LDRB wt, [xn, #offset]
1272 LDRB wt, [xn, #index]!
1273 LDRB wt, [xn], #index
1274
1275 RT is the register to store.
1276 RN is the base address register.
1277 OFFSET is the immediate to add to the base address. It is limited to
1278 0 .. 32760 range (12 bits << 3). */
1279
1280 static int
1281 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
1282 struct aarch64_register rn,
1283 struct aarch64_memory_operand operand)
1284 {
1285 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
1286 }
1287
1288
1289
1290 /* Write a STR instruction into *BUF.
1291
1292 STR rt, [rn, #offset]
1293 STR rt, [rn, #index]!
1294 STR rt, [rn], #index
1295
1296 RT is the register to store.
1297 RN is the base address register.
1298 OFFSET is the immediate to add to the base address. It is limited to
1299 0 .. 32760 range (12 bits << 3). */
1300
1301 static int
1302 emit_str (uint32_t *buf, struct aarch64_register rt,
1303 struct aarch64_register rn,
1304 struct aarch64_memory_operand operand)
1305 {
1306 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
1307 }
1308
1309 /* Helper function emitting an exclusive load or store instruction. */
1310
1311 static int
1312 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
1313 enum aarch64_opcodes opcode,
1314 struct aarch64_register rs,
1315 struct aarch64_register rt,
1316 struct aarch64_register rt2,
1317 struct aarch64_register rn)
1318 {
1319 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
1320 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
1321 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
1322 }
1323
1324 /* Write a LAXR instruction into *BUF.
1325
1326 LDAXR rt, [xn]
1327
1328 RT is the destination register.
1329 RN is the base address register. */
1330
1331 static int
1332 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
1333 struct aarch64_register rn)
1334 {
1335 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
1336 xzr, rn);
1337 }
1338
1339 /* Write a STXR instruction into *BUF.
1340
1341 STXR ws, rt, [xn]
1342
1343 RS is the result register, it indicates if the store succeeded or not.
1344 RT is the destination register.
1345 RN is the base address register. */
1346
1347 static int
1348 emit_stxr (uint32_t *buf, struct aarch64_register rs,
1349 struct aarch64_register rt, struct aarch64_register rn)
1350 {
1351 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1352 xzr, rn);
1353 }
1354
1355 /* Write a STLR instruction into *BUF.
1356
1357 STLR rt, [xn]
1358
1359 RT is the register to store.
1360 RN is the base address register. */
1361
1362 static int
1363 emit_stlr (uint32_t *buf, struct aarch64_register rt,
1364 struct aarch64_register rn)
1365 {
1366 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1367 xzr, rn);
1368 }
1369
1370 /* Helper function for data processing instructions with register sources. */
1371
1372 static int
1373 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1374 struct aarch64_register rd,
1375 struct aarch64_register rn,
1376 struct aarch64_register rm)
1377 {
1378 uint32_t size = ENCODE (rd.is64, 1, 31);
1379
1380 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1381 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1382 }
1383
1384 /* Helper function for data processing instructions taking either a register
1385 or an immediate. */
1386
1387 static int
1388 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1389 struct aarch64_register rd,
1390 struct aarch64_register rn,
1391 struct aarch64_operand operand)
1392 {
1393 uint32_t size = ENCODE (rd.is64, 1, 31);
1394 /* The opcode is different for register and immediate source operands. */
1395 uint32_t operand_opcode;
1396
1397 if (operand.type == OPERAND_IMMEDIATE)
1398 {
1399 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1400 operand_opcode = ENCODE (8, 4, 25);
1401
1402 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1403 | ENCODE (operand.imm, 12, 10)
1404 | ENCODE (rn.num, 5, 5)
1405 | ENCODE (rd.num, 5, 0));
1406 }
1407 else
1408 {
1409 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1410 operand_opcode = ENCODE (5, 4, 25);
1411
1412 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1413 rn, operand.reg);
1414 }
1415 }
1416
1417 /* Write an ADD instruction into *BUF.
1418
1419 ADD rd, rn, #imm
1420 ADD rd, rn, rm
1421
1422 This function handles both an immediate and register add.
1423
1424 RD is the destination register.
1425 RN is the input register.
1426 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1427 OPERAND_REGISTER. */
1428
1429 static int
1430 emit_add (uint32_t *buf, struct aarch64_register rd,
1431 struct aarch64_register rn, struct aarch64_operand operand)
1432 {
1433 return emit_data_processing (buf, ADD, rd, rn, operand);
1434 }
1435
1436 /* Write a SUB instruction into *BUF.
1437
1438 SUB rd, rn, #imm
1439 SUB rd, rn, rm
1440
1441 This function handles both an immediate and register sub.
1442
1443 RD is the destination register.
1444 RN is the input register.
1445 IMM is the immediate to substract to RN. */
1446
1447 static int
1448 emit_sub (uint32_t *buf, struct aarch64_register rd,
1449 struct aarch64_register rn, struct aarch64_operand operand)
1450 {
1451 return emit_data_processing (buf, SUB, rd, rn, operand);
1452 }
1453
1454 /* Write a MOV instruction into *BUF.
1455
1456 MOV rd, #imm
1457 MOV rd, rm
1458
1459 This function handles both a wide immediate move and a register move,
1460 with the condition that the source register is not xzr. xzr and the
1461 stack pointer share the same encoding and this function only supports
1462 the stack pointer.
1463
1464 RD is the destination register.
1465 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1466 OPERAND_REGISTER. */
1467
1468 static int
1469 emit_mov (uint32_t *buf, struct aarch64_register rd,
1470 struct aarch64_operand operand)
1471 {
1472 if (operand.type == OPERAND_IMMEDIATE)
1473 {
1474 uint32_t size = ENCODE (rd.is64, 1, 31);
1475 /* Do not shift the immediate. */
1476 uint32_t shift = ENCODE (0, 2, 21);
1477
1478 return aarch64_emit_insn (buf, MOV | size | shift
1479 | ENCODE (operand.imm, 16, 5)
1480 | ENCODE (rd.num, 5, 0));
1481 }
1482 else
1483 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1484 }
1485
1486 /* Write a MOVK instruction into *BUF.
1487
1488 MOVK rd, #imm, lsl #shift
1489
1490 RD is the destination register.
1491 IMM is the immediate.
1492 SHIFT is the logical shift left to apply to IMM. */
1493
1494 static int
1495 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1496 unsigned shift)
1497 {
1498 uint32_t size = ENCODE (rd.is64, 1, 31);
1499
1500 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1501 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1502 }
1503
1504 /* Write instructions into *BUF in order to move ADDR into a register.
1505 ADDR can be a 64-bit value.
1506
1507 This function will emit a series of MOV and MOVK instructions, such as:
1508
1509 MOV xd, #(addr)
1510 MOVK xd, #(addr >> 16), lsl #16
1511 MOVK xd, #(addr >> 32), lsl #32
1512 MOVK xd, #(addr >> 48), lsl #48 */
1513
1514 static int
1515 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1516 {
1517 uint32_t *p = buf;
1518
1519 /* The MOV (wide immediate) instruction clears to top bits of the
1520 register. */
1521 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1522
1523 if ((addr >> 16) != 0)
1524 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1525 else
1526 return p - buf;
1527
1528 if ((addr >> 32) != 0)
1529 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1530 else
1531 return p - buf;
1532
1533 if ((addr >> 48) != 0)
1534 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1535
1536 return p - buf;
1537 }
1538
1539 /* Write a SUBS instruction into *BUF.
1540
1541 SUBS rd, rn, rm
1542
1543 This instruction update the condition flags.
1544
1545 RD is the destination register.
1546 RN and RM are the source registers. */
1547
1548 static int
1549 emit_subs (uint32_t *buf, struct aarch64_register rd,
1550 struct aarch64_register rn, struct aarch64_operand operand)
1551 {
1552 return emit_data_processing (buf, SUBS, rd, rn, operand);
1553 }
1554
1555 /* Write a CMP instruction into *BUF.
1556
1557 CMP rn, rm
1558
1559 This instruction is an alias of SUBS xzr, rn, rm.
1560
1561 RN and RM are the registers to compare. */
1562
1563 static int
1564 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1565 struct aarch64_operand operand)
1566 {
1567 return emit_subs (buf, xzr, rn, operand);
1568 }
1569
1570 /* Write a AND instruction into *BUF.
1571
1572 AND rd, rn, rm
1573
1574 RD is the destination register.
1575 RN and RM are the source registers. */
1576
1577 static int
1578 emit_and (uint32_t *buf, struct aarch64_register rd,
1579 struct aarch64_register rn, struct aarch64_register rm)
1580 {
1581 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1582 }
1583
1584 /* Write a ORR instruction into *BUF.
1585
1586 ORR rd, rn, rm
1587
1588 RD is the destination register.
1589 RN and RM are the source registers. */
1590
1591 static int
1592 emit_orr (uint32_t *buf, struct aarch64_register rd,
1593 struct aarch64_register rn, struct aarch64_register rm)
1594 {
1595 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1596 }
1597
1598 /* Write a ORN instruction into *BUF.
1599
1600 ORN rd, rn, rm
1601
1602 RD is the destination register.
1603 RN and RM are the source registers. */
1604
1605 static int
1606 emit_orn (uint32_t *buf, struct aarch64_register rd,
1607 struct aarch64_register rn, struct aarch64_register rm)
1608 {
1609 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1610 }
1611
1612 /* Write a EOR instruction into *BUF.
1613
1614 EOR rd, rn, rm
1615
1616 RD is the destination register.
1617 RN and RM are the source registers. */
1618
1619 static int
1620 emit_eor (uint32_t *buf, struct aarch64_register rd,
1621 struct aarch64_register rn, struct aarch64_register rm)
1622 {
1623 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1624 }
1625
1626 /* Write a MVN instruction into *BUF.
1627
1628 MVN rd, rm
1629
1630 This is an alias for ORN rd, xzr, rm.
1631
1632 RD is the destination register.
1633 RM is the source register. */
1634
1635 static int
1636 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1637 struct aarch64_register rm)
1638 {
1639 return emit_orn (buf, rd, xzr, rm);
1640 }
1641
1642 /* Write a LSLV instruction into *BUF.
1643
1644 LSLV rd, rn, rm
1645
1646 RD is the destination register.
1647 RN and RM are the source registers. */
1648
1649 static int
1650 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1651 struct aarch64_register rn, struct aarch64_register rm)
1652 {
1653 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1654 }
1655
1656 /* Write a LSRV instruction into *BUF.
1657
1658 LSRV rd, rn, rm
1659
1660 RD is the destination register.
1661 RN and RM are the source registers. */
1662
1663 static int
1664 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1665 struct aarch64_register rn, struct aarch64_register rm)
1666 {
1667 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1668 }
1669
1670 /* Write a ASRV instruction into *BUF.
1671
1672 ASRV rd, rn, rm
1673
1674 RD is the destination register.
1675 RN and RM are the source registers. */
1676
1677 static int
1678 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1679 struct aarch64_register rn, struct aarch64_register rm)
1680 {
1681 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1682 }
1683
1684 /* Write a MUL instruction into *BUF.
1685
1686 MUL rd, rn, rm
1687
1688 RD is the destination register.
1689 RN and RM are the source registers. */
1690
1691 static int
1692 emit_mul (uint32_t *buf, struct aarch64_register rd,
1693 struct aarch64_register rn, struct aarch64_register rm)
1694 {
1695 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1696 }
1697
1698 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1699
1700 MRS xt, system_reg
1701
1702 RT is the destination register.
1703 SYSTEM_REG is special purpose register to read. */
1704
1705 static int
1706 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1707 enum aarch64_system_control_registers system_reg)
1708 {
1709 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1710 | ENCODE (rt.num, 5, 0));
1711 }
1712
1713 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1714
1715 MSR system_reg, xt
1716
1717 SYSTEM_REG is special purpose register to write.
1718 RT is the input register. */
1719
1720 static int
1721 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1722 struct aarch64_register rt)
1723 {
1724 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1725 | ENCODE (rt.num, 5, 0));
1726 }
1727
1728 /* Write a SEVL instruction into *BUF.
1729
1730 This is a hint instruction telling the hardware to trigger an event. */
1731
1732 static int
1733 emit_sevl (uint32_t *buf)
1734 {
1735 return aarch64_emit_insn (buf, SEVL);
1736 }
1737
1738 /* Write a WFE instruction into *BUF.
1739
1740 This is a hint instruction telling the hardware to wait for an event. */
1741
1742 static int
1743 emit_wfe (uint32_t *buf)
1744 {
1745 return aarch64_emit_insn (buf, WFE);
1746 }
1747
1748 /* Write a SBFM instruction into *BUF.
1749
1750 SBFM rd, rn, #immr, #imms
1751
1752 This instruction moves the bits from #immr to #imms into the
1753 destination, sign extending the result.
1754
1755 RD is the destination register.
1756 RN is the source register.
1757 IMMR is the bit number to start at (least significant bit).
1758 IMMS is the bit number to stop at (most significant bit). */
1759
1760 static int
1761 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1762 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1763 {
1764 uint32_t size = ENCODE (rd.is64, 1, 31);
1765 uint32_t n = ENCODE (rd.is64, 1, 22);
1766
1767 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1768 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1769 | ENCODE (rd.num, 5, 0));
1770 }
1771
1772 /* Write a SBFX instruction into *BUF.
1773
1774 SBFX rd, rn, #lsb, #width
1775
1776 This instruction moves #width bits from #lsb into the destination, sign
1777 extending the result. This is an alias for:
1778
1779 SBFM rd, rn, #lsb, #(lsb + width - 1)
1780
1781 RD is the destination register.
1782 RN is the source register.
1783 LSB is the bit number to start at (least significant bit).
1784 WIDTH is the number of bits to move. */
1785
1786 static int
1787 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1788 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1789 {
1790 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1791 }
1792
1793 /* Write a UBFM instruction into *BUF.
1794
1795 UBFM rd, rn, #immr, #imms
1796
1797 This instruction moves the bits from #immr to #imms into the
1798 destination, extending the result with zeros.
1799
1800 RD is the destination register.
1801 RN is the source register.
1802 IMMR is the bit number to start at (least significant bit).
1803 IMMS is the bit number to stop at (most significant bit). */
1804
1805 static int
1806 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1807 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1808 {
1809 uint32_t size = ENCODE (rd.is64, 1, 31);
1810 uint32_t n = ENCODE (rd.is64, 1, 22);
1811
1812 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1813 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1814 | ENCODE (rd.num, 5, 0));
1815 }
1816
1817 /* Write a UBFX instruction into *BUF.
1818
1819 UBFX rd, rn, #lsb, #width
1820
1821 This instruction moves #width bits from #lsb into the destination,
1822 extending the result with zeros. This is an alias for:
1823
1824 UBFM rd, rn, #lsb, #(lsb + width - 1)
1825
1826 RD is the destination register.
1827 RN is the source register.
1828 LSB is the bit number to start at (least significant bit).
1829 WIDTH is the number of bits to move. */
1830
1831 static int
1832 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1833 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1834 {
1835 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1836 }
1837
1838 /* Write a CSINC instruction into *BUF.
1839
1840 CSINC rd, rn, rm, cond
1841
1842 This instruction conditionally increments rn or rm and places the result
1843 in rd. rn is chosen is the condition is true.
1844
1845 RD is the destination register.
1846 RN and RM are the source registers.
1847 COND is the encoded condition. */
1848
1849 static int
1850 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1851 struct aarch64_register rn, struct aarch64_register rm,
1852 unsigned cond)
1853 {
1854 uint32_t size = ENCODE (rd.is64, 1, 31);
1855
1856 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1857 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1858 | ENCODE (rd.num, 5, 0));
1859 }
1860
1861 /* Write a CSET instruction into *BUF.
1862
1863 CSET rd, cond
1864
1865 This instruction conditionally write 1 or 0 in the destination register.
1866 1 is written if the condition is true. This is an alias for:
1867
1868 CSINC rd, xzr, xzr, !cond
1869
1870 Note that the condition needs to be inverted.
1871
1872 RD is the destination register.
1873 RN and RM are the source registers.
1874 COND is the encoded condition. */
1875
1876 static int
1877 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1878 {
1879 /* The least significant bit of the condition needs toggling in order to
1880 invert it. */
1881 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1882 }
1883
1884 /* Write LEN instructions from BUF into the inferior memory at *TO.
1885
1886 Note instructions are always little endian on AArch64, unlike data. */
1887
1888 static void
1889 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1890 {
1891 size_t byte_len = len * sizeof (uint32_t);
1892 #if (__BYTE_ORDER == __BIG_ENDIAN)
1893 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
1894 size_t i;
1895
1896 for (i = 0; i < len; i++)
1897 le_buf[i] = htole32 (buf[i]);
1898
1899 target_write_memory (*to, (const unsigned char *) le_buf, byte_len);
1900
1901 xfree (le_buf);
1902 #else
1903 target_write_memory (*to, (const unsigned char *) buf, byte_len);
1904 #endif
1905
1906 *to += byte_len;
1907 }
1908
1909 /* Sub-class of struct aarch64_insn_data, store information of
1910 instruction relocation for fast tracepoint. Visitor can
1911 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1912 the relocated instructions in buffer pointed by INSN_PTR. */
1913
1914 struct aarch64_insn_relocation_data
1915 {
1916 struct aarch64_insn_data base;
1917
1918 /* The new address the instruction is relocated to. */
1919 CORE_ADDR new_addr;
1920 /* Pointer to the buffer of relocated instruction(s). */
1921 uint32_t *insn_ptr;
1922 };
1923
1924 /* Implementation of aarch64_insn_visitor method "b". */
1925
1926 static void
1927 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1928 struct aarch64_insn_data *data)
1929 {
1930 struct aarch64_insn_relocation_data *insn_reloc
1931 = (struct aarch64_insn_relocation_data *) data;
1932 int64_t new_offset
1933 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1934
1935 if (can_encode_int32 (new_offset, 28))
1936 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1937 }
1938
1939 /* Implementation of aarch64_insn_visitor method "b_cond". */
1940
1941 static void
1942 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1943 struct aarch64_insn_data *data)
1944 {
1945 struct aarch64_insn_relocation_data *insn_reloc
1946 = (struct aarch64_insn_relocation_data *) data;
1947 int64_t new_offset
1948 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1949
1950 if (can_encode_int32 (new_offset, 21))
1951 {
1952 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1953 new_offset);
1954 }
1955 else if (can_encode_int32 (new_offset, 28))
1956 {
1957 /* The offset is out of range for a conditional branch
1958 instruction but not for a unconditional branch. We can use
1959 the following instructions instead:
1960
1961 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1962 B NOT_TAKEN ; Else jump over TAKEN and continue.
1963 TAKEN:
1964 B #(offset - 8)
1965 NOT_TAKEN:
1966
1967 */
1968
1969 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1970 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1971 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1972 }
1973 }
1974
1975 /* Implementation of aarch64_insn_visitor method "cb". */
1976
1977 static void
1978 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1979 const unsigned rn, int is64,
1980 struct aarch64_insn_data *data)
1981 {
1982 struct aarch64_insn_relocation_data *insn_reloc
1983 = (struct aarch64_insn_relocation_data *) data;
1984 int64_t new_offset
1985 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1986
1987 if (can_encode_int32 (new_offset, 21))
1988 {
1989 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1990 aarch64_register (rn, is64), new_offset);
1991 }
1992 else if (can_encode_int32 (new_offset, 28))
1993 {
1994 /* The offset is out of range for a compare and branch
1995 instruction but not for a unconditional branch. We can use
1996 the following instructions instead:
1997
1998 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1999 B NOT_TAKEN ; Else jump over TAKEN and continue.
2000 TAKEN:
2001 B #(offset - 8)
2002 NOT_TAKEN:
2003
2004 */
2005 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
2006 aarch64_register (rn, is64), 8);
2007 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
2008 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
2009 }
2010 }
2011
2012 /* Implementation of aarch64_insn_visitor method "tb". */
2013
2014 static void
2015 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
2016 const unsigned rt, unsigned bit,
2017 struct aarch64_insn_data *data)
2018 {
2019 struct aarch64_insn_relocation_data *insn_reloc
2020 = (struct aarch64_insn_relocation_data *) data;
2021 int64_t new_offset
2022 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
2023
2024 if (can_encode_int32 (new_offset, 16))
2025 {
2026 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
2027 aarch64_register (rt, 1), new_offset);
2028 }
2029 else if (can_encode_int32 (new_offset, 28))
2030 {
2031 /* The offset is out of range for a test bit and branch
2032 instruction but not for a unconditional branch. We can use
2033 the following instructions instead:
2034
2035 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
2036 B NOT_TAKEN ; Else jump over TAKEN and continue.
2037 TAKEN:
2038 B #(offset - 8)
2039 NOT_TAKEN:
2040
2041 */
2042 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
2043 aarch64_register (rt, 1), 8);
2044 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
2045 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
2046 new_offset - 8);
2047 }
2048 }
2049
2050 /* Implementation of aarch64_insn_visitor method "adr". */
2051
2052 static void
2053 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
2054 const int is_adrp,
2055 struct aarch64_insn_data *data)
2056 {
2057 struct aarch64_insn_relocation_data *insn_reloc
2058 = (struct aarch64_insn_relocation_data *) data;
2059 /* We know exactly the address the ADR{P,} instruction will compute.
2060 We can just write it to the destination register. */
2061 CORE_ADDR address = data->insn_addr + offset;
2062
2063 if (is_adrp)
2064 {
2065 /* Clear the lower 12 bits of the offset to get the 4K page. */
2066 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
2067 aarch64_register (rd, 1),
2068 address & ~0xfff);
2069 }
2070 else
2071 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
2072 aarch64_register (rd, 1), address);
2073 }
2074
2075 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
2076
2077 static void
2078 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
2079 const unsigned rt, const int is64,
2080 struct aarch64_insn_data *data)
2081 {
2082 struct aarch64_insn_relocation_data *insn_reloc
2083 = (struct aarch64_insn_relocation_data *) data;
2084 CORE_ADDR address = data->insn_addr + offset;
2085
2086 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
2087 aarch64_register (rt, 1), address);
2088
2089 /* We know exactly what address to load from, and what register we
2090 can use:
2091
2092 MOV xd, #(oldloc + offset)
2093 MOVK xd, #((oldloc + offset) >> 16), lsl #16
2094 ...
2095
2096 LDR xd, [xd] ; or LDRSW xd, [xd]
2097
2098 */
2099
2100 if (is_sw)
2101 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
2102 aarch64_register (rt, 1),
2103 aarch64_register (rt, 1),
2104 offset_memory_operand (0));
2105 else
2106 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
2107 aarch64_register (rt, is64),
2108 aarch64_register (rt, 1),
2109 offset_memory_operand (0));
2110 }
2111
2112 /* Implementation of aarch64_insn_visitor method "others". */
2113
2114 static void
2115 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
2116 struct aarch64_insn_data *data)
2117 {
2118 struct aarch64_insn_relocation_data *insn_reloc
2119 = (struct aarch64_insn_relocation_data *) data;
2120
2121 /* The instruction is not PC relative. Just re-emit it at the new
2122 location. */
2123 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
2124 }
2125
2126 static const struct aarch64_insn_visitor visitor =
2127 {
2128 aarch64_ftrace_insn_reloc_b,
2129 aarch64_ftrace_insn_reloc_b_cond,
2130 aarch64_ftrace_insn_reloc_cb,
2131 aarch64_ftrace_insn_reloc_tb,
2132 aarch64_ftrace_insn_reloc_adr,
2133 aarch64_ftrace_insn_reloc_ldr_literal,
2134 aarch64_ftrace_insn_reloc_others,
2135 };
2136
2137 bool
2138 aarch64_target::supports_fast_tracepoints ()
2139 {
2140 return true;
2141 }
2142
2143 /* Implementation of target ops method
2144 "install_fast_tracepoint_jump_pad". */
2145
2146 int
2147 aarch64_target::install_fast_tracepoint_jump_pad
2148 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
2149 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
2150 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
2151 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
2152 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
2153 char *err)
2154 {
2155 uint32_t buf[256];
2156 uint32_t *p = buf;
2157 int64_t offset;
2158 int i;
2159 uint32_t insn;
2160 CORE_ADDR buildaddr = *jump_entry;
2161 struct aarch64_insn_relocation_data insn_data;
2162
2163 /* We need to save the current state on the stack both to restore it
2164 later and to collect register values when the tracepoint is hit.
2165
2166 The saved registers are pushed in a layout that needs to be in sync
2167 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
2168 the supply_fast_tracepoint_registers function will fill in the
2169 register cache from a pointer to saved registers on the stack we build
2170 here.
2171
2172 For simplicity, we set the size of each cell on the stack to 16 bytes.
2173 This way one cell can hold any register type, from system registers
2174 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
2175 has to be 16 bytes aligned anyway.
2176
2177 Note that the CPSR register does not exist on AArch64. Instead we
2178 can access system bits describing the process state with the
2179 MRS/MSR instructions, namely the condition flags. We save them as
2180 if they are part of a CPSR register because that's how GDB
2181 interprets these system bits. At the moment, only the condition
2182 flags are saved in CPSR (NZCV).
2183
2184 Stack layout, each cell is 16 bytes (descending):
2185
2186 High *-------- SIMD&FP registers from 31 down to 0. --------*
2187 | q31 |
2188 . .
2189 . . 32 cells
2190 . .
2191 | q0 |
2192 *---- General purpose registers from 30 down to 0. ----*
2193 | x30 |
2194 . .
2195 . . 31 cells
2196 . .
2197 | x0 |
2198 *------------- Special purpose registers. -------------*
2199 | SP |
2200 | PC |
2201 | CPSR (NZCV) | 5 cells
2202 | FPSR |
2203 | FPCR | <- SP + 16
2204 *------------- collecting_t object --------------------*
2205 | TPIDR_EL0 | struct tracepoint * |
2206 Low *------------------------------------------------------*
2207
2208 After this stack is set up, we issue a call to the collector, passing
2209 it the saved registers at (SP + 16). */
2210
2211 /* Push SIMD&FP registers on the stack:
2212
2213 SUB sp, sp, #(32 * 16)
2214
2215 STP q30, q31, [sp, #(30 * 16)]
2216 ...
2217 STP q0, q1, [sp]
2218
2219 */
2220 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
2221 for (i = 30; i >= 0; i -= 2)
2222 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
2223
2224 /* Push general purpose registers on the stack. Note that we do not need
2225 to push x31 as it represents the xzr register and not the stack
2226 pointer in a STR instruction.
2227
2228 SUB sp, sp, #(31 * 16)
2229
2230 STR x30, [sp, #(30 * 16)]
2231 ...
2232 STR x0, [sp]
2233
2234 */
2235 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
2236 for (i = 30; i >= 0; i -= 1)
2237 p += emit_str (p, aarch64_register (i, 1), sp,
2238 offset_memory_operand (i * 16));
2239
2240 /* Make space for 5 more cells.
2241
2242 SUB sp, sp, #(5 * 16)
2243
2244 */
2245 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
2246
2247
2248 /* Save SP:
2249
2250 ADD x4, sp, #((32 + 31 + 5) * 16)
2251 STR x4, [sp, #(4 * 16)]
2252
2253 */
2254 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
2255 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
2256
2257 /* Save PC (tracepoint address):
2258
2259 MOV x3, #(tpaddr)
2260 ...
2261
2262 STR x3, [sp, #(3 * 16)]
2263
2264 */
2265
2266 p += emit_mov_addr (p, x3, tpaddr);
2267 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
2268
2269 /* Save CPSR (NZCV), FPSR and FPCR:
2270
2271 MRS x2, nzcv
2272 MRS x1, fpsr
2273 MRS x0, fpcr
2274
2275 STR x2, [sp, #(2 * 16)]
2276 STR x1, [sp, #(1 * 16)]
2277 STR x0, [sp, #(0 * 16)]
2278
2279 */
2280 p += emit_mrs (p, x2, NZCV);
2281 p += emit_mrs (p, x1, FPSR);
2282 p += emit_mrs (p, x0, FPCR);
2283 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
2284 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
2285 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2286
2287 /* Push the collecting_t object. It consist of the address of the
2288 tracepoint and an ID for the current thread. We get the latter by
2289 reading the tpidr_el0 system register. It corresponds to the
2290 NT_ARM_TLS register accessible with ptrace.
2291
2292 MOV x0, #(tpoint)
2293 ...
2294
2295 MRS x1, tpidr_el0
2296
2297 STP x0, x1, [sp, #-16]!
2298
2299 */
2300
2301 p += emit_mov_addr (p, x0, tpoint);
2302 p += emit_mrs (p, x1, TPIDR_EL0);
2303 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
2304
2305 /* Spin-lock:
2306
2307 The shared memory for the lock is at lockaddr. It will hold zero
2308 if no-one is holding the lock, otherwise it contains the address of
2309 the collecting_t object on the stack of the thread which acquired it.
2310
2311 At this stage, the stack pointer points to this thread's collecting_t
2312 object.
2313
2314 We use the following registers:
2315 - x0: Address of the lock.
2316 - x1: Pointer to collecting_t object.
2317 - x2: Scratch register.
2318
2319 MOV x0, #(lockaddr)
2320 ...
2321 MOV x1, sp
2322
2323 ; Trigger an event local to this core. So the following WFE
2324 ; instruction is ignored.
2325 SEVL
2326 again:
2327 ; Wait for an event. The event is triggered by either the SEVL
2328 ; or STLR instructions (store release).
2329 WFE
2330
2331 ; Atomically read at lockaddr. This marks the memory location as
2332 ; exclusive. This instruction also has memory constraints which
2333 ; make sure all previous data reads and writes are done before
2334 ; executing it.
2335 LDAXR x2, [x0]
2336
2337 ; Try again if another thread holds the lock.
2338 CBNZ x2, again
2339
2340 ; We can lock it! Write the address of the collecting_t object.
2341 ; This instruction will fail if the memory location is not marked
2342 ; as exclusive anymore. If it succeeds, it will remove the
2343 ; exclusive mark on the memory location. This way, if another
2344 ; thread executes this instruction before us, we will fail and try
2345 ; all over again.
2346 STXR w2, x1, [x0]
2347 CBNZ w2, again
2348
2349 */
2350
2351 p += emit_mov_addr (p, x0, lockaddr);
2352 p += emit_mov (p, x1, register_operand (sp));
2353
2354 p += emit_sevl (p);
2355 p += emit_wfe (p);
2356 p += emit_ldaxr (p, x2, x0);
2357 p += emit_cb (p, 1, w2, -2 * 4);
2358 p += emit_stxr (p, w2, x1, x0);
2359 p += emit_cb (p, 1, x2, -4 * 4);
2360
2361 /* Call collector (struct tracepoint *, unsigned char *):
2362
2363 MOV x0, #(tpoint)
2364 ...
2365
2366 ; Saved registers start after the collecting_t object.
2367 ADD x1, sp, #16
2368
2369 ; We use an intra-procedure-call scratch register.
2370 MOV ip0, #(collector)
2371 ...
2372
2373 ; And call back to C!
2374 BLR ip0
2375
2376 */
2377
2378 p += emit_mov_addr (p, x0, tpoint);
2379 p += emit_add (p, x1, sp, immediate_operand (16));
2380
2381 p += emit_mov_addr (p, ip0, collector);
2382 p += emit_blr (p, ip0);
2383
2384 /* Release the lock.
2385
2386 MOV x0, #(lockaddr)
2387 ...
2388
2389 ; This instruction is a normal store with memory ordering
2390 ; constraints. Thanks to this we do not have to put a data
2391 ; barrier instruction to make sure all data read and writes are done
2392 ; before this instruction is executed. Furthermore, this instruction
2393 ; will trigger an event, letting other threads know they can grab
2394 ; the lock.
2395 STLR xzr, [x0]
2396
2397 */
2398 p += emit_mov_addr (p, x0, lockaddr);
2399 p += emit_stlr (p, xzr, x0);
2400
2401 /* Free collecting_t object:
2402
2403 ADD sp, sp, #16
2404
2405 */
2406 p += emit_add (p, sp, sp, immediate_operand (16));
2407
2408 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2409 registers from the stack.
2410
2411 LDR x2, [sp, #(2 * 16)]
2412 LDR x1, [sp, #(1 * 16)]
2413 LDR x0, [sp, #(0 * 16)]
2414
2415 MSR NZCV, x2
2416 MSR FPSR, x1
2417 MSR FPCR, x0
2418
2419 ADD sp, sp #(5 * 16)
2420
2421 */
2422 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2423 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2424 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2425 p += emit_msr (p, NZCV, x2);
2426 p += emit_msr (p, FPSR, x1);
2427 p += emit_msr (p, FPCR, x0);
2428
2429 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2430
2431 /* Pop general purpose registers:
2432
2433 LDR x0, [sp]
2434 ...
2435 LDR x30, [sp, #(30 * 16)]
2436
2437 ADD sp, sp, #(31 * 16)
2438
2439 */
2440 for (i = 0; i <= 30; i += 1)
2441 p += emit_ldr (p, aarch64_register (i, 1), sp,
2442 offset_memory_operand (i * 16));
2443 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2444
2445 /* Pop SIMD&FP registers:
2446
2447 LDP q0, q1, [sp]
2448 ...
2449 LDP q30, q31, [sp, #(30 * 16)]
2450
2451 ADD sp, sp, #(32 * 16)
2452
2453 */
2454 for (i = 0; i <= 30; i += 2)
2455 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2456 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2457
2458 /* Write the code into the inferior memory. */
2459 append_insns (&buildaddr, p - buf, buf);
2460
2461 /* Now emit the relocated instruction. */
2462 *adjusted_insn_addr = buildaddr;
2463 target_read_uint32 (tpaddr, &insn);
2464
2465 insn_data.base.insn_addr = tpaddr;
2466 insn_data.new_addr = buildaddr;
2467 insn_data.insn_ptr = buf;
2468
2469 aarch64_relocate_instruction (insn, &visitor,
2470 (struct aarch64_insn_data *) &insn_data);
2471
2472 /* We may not have been able to relocate the instruction. */
2473 if (insn_data.insn_ptr == buf)
2474 {
2475 sprintf (err,
2476 "E.Could not relocate instruction from %s to %s.",
2477 core_addr_to_string_nz (tpaddr),
2478 core_addr_to_string_nz (buildaddr));
2479 return 1;
2480 }
2481 else
2482 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2483 *adjusted_insn_addr_end = buildaddr;
2484
2485 /* Go back to the start of the buffer. */
2486 p = buf;
2487
2488 /* Emit a branch back from the jump pad. */
2489 offset = (tpaddr + orig_size - buildaddr);
2490 if (!can_encode_int32 (offset, 28))
2491 {
2492 sprintf (err,
2493 "E.Jump back from jump pad too far from tracepoint "
2494 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2495 offset);
2496 return 1;
2497 }
2498
2499 p += emit_b (p, 0, offset);
2500 append_insns (&buildaddr, p - buf, buf);
2501
2502 /* Give the caller a branch instruction into the jump pad. */
2503 offset = (*jump_entry - tpaddr);
2504 if (!can_encode_int32 (offset, 28))
2505 {
2506 sprintf (err,
2507 "E.Jump pad too far from tracepoint "
2508 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2509 offset);
2510 return 1;
2511 }
2512
2513 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2514 *jjump_pad_insn_size = 4;
2515
2516 /* Return the end address of our pad. */
2517 *jump_entry = buildaddr;
2518
2519 return 0;
2520 }
2521
2522 /* Helper function writing LEN instructions from START into
2523 current_insn_ptr. */
2524
2525 static void
2526 emit_ops_insns (const uint32_t *start, int len)
2527 {
2528 CORE_ADDR buildaddr = current_insn_ptr;
2529
2530 if (debug_threads)
2531 debug_printf ("Adding %d instrucions at %s\n",
2532 len, paddress (buildaddr));
2533
2534 append_insns (&buildaddr, len, start);
2535 current_insn_ptr = buildaddr;
2536 }
2537
2538 /* Pop a register from the stack. */
2539
2540 static int
2541 emit_pop (uint32_t *buf, struct aarch64_register rt)
2542 {
2543 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2544 }
2545
2546 /* Push a register on the stack. */
2547
2548 static int
2549 emit_push (uint32_t *buf, struct aarch64_register rt)
2550 {
2551 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2552 }
2553
2554 /* Implementation of emit_ops method "emit_prologue". */
2555
2556 static void
2557 aarch64_emit_prologue (void)
2558 {
2559 uint32_t buf[16];
2560 uint32_t *p = buf;
2561
2562 /* This function emit a prologue for the following function prototype:
2563
2564 enum eval_result_type f (unsigned char *regs,
2565 ULONGEST *value);
2566
2567 The first argument is a buffer of raw registers. The second
2568 argument is the result of
2569 evaluating the expression, which will be set to whatever is on top of
2570 the stack at the end.
2571
2572 The stack set up by the prologue is as such:
2573
2574 High *------------------------------------------------------*
2575 | LR |
2576 | FP | <- FP
2577 | x1 (ULONGEST *value) |
2578 | x0 (unsigned char *regs) |
2579 Low *------------------------------------------------------*
2580
2581 As we are implementing a stack machine, each opcode can expand the
2582 stack so we never know how far we are from the data saved by this
2583 prologue. In order to be able refer to value and regs later, we save
2584 the current stack pointer in the frame pointer. This way, it is not
2585 clobbered when calling C functions.
2586
2587 Finally, throughout every operation, we are using register x0 as the
2588 top of the stack, and x1 as a scratch register. */
2589
2590 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2591 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2592 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2593
2594 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2595
2596
2597 emit_ops_insns (buf, p - buf);
2598 }
2599
2600 /* Implementation of emit_ops method "emit_epilogue". */
2601
2602 static void
2603 aarch64_emit_epilogue (void)
2604 {
2605 uint32_t buf[16];
2606 uint32_t *p = buf;
2607
2608 /* Store the result of the expression (x0) in *value. */
2609 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2610 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2611 p += emit_str (p, x0, x1, offset_memory_operand (0));
2612
2613 /* Restore the previous state. */
2614 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2615 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2616
2617 /* Return expr_eval_no_error. */
2618 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2619 p += emit_ret (p, lr);
2620
2621 emit_ops_insns (buf, p - buf);
2622 }
2623
2624 /* Implementation of emit_ops method "emit_add". */
2625
2626 static void
2627 aarch64_emit_add (void)
2628 {
2629 uint32_t buf[16];
2630 uint32_t *p = buf;
2631
2632 p += emit_pop (p, x1);
2633 p += emit_add (p, x0, x1, register_operand (x0));
2634
2635 emit_ops_insns (buf, p - buf);
2636 }
2637
2638 /* Implementation of emit_ops method "emit_sub". */
2639
2640 static void
2641 aarch64_emit_sub (void)
2642 {
2643 uint32_t buf[16];
2644 uint32_t *p = buf;
2645
2646 p += emit_pop (p, x1);
2647 p += emit_sub (p, x0, x1, register_operand (x0));
2648
2649 emit_ops_insns (buf, p - buf);
2650 }
2651
2652 /* Implementation of emit_ops method "emit_mul". */
2653
2654 static void
2655 aarch64_emit_mul (void)
2656 {
2657 uint32_t buf[16];
2658 uint32_t *p = buf;
2659
2660 p += emit_pop (p, x1);
2661 p += emit_mul (p, x0, x1, x0);
2662
2663 emit_ops_insns (buf, p - buf);
2664 }
2665
2666 /* Implementation of emit_ops method "emit_lsh". */
2667
2668 static void
2669 aarch64_emit_lsh (void)
2670 {
2671 uint32_t buf[16];
2672 uint32_t *p = buf;
2673
2674 p += emit_pop (p, x1);
2675 p += emit_lslv (p, x0, x1, x0);
2676
2677 emit_ops_insns (buf, p - buf);
2678 }
2679
2680 /* Implementation of emit_ops method "emit_rsh_signed". */
2681
2682 static void
2683 aarch64_emit_rsh_signed (void)
2684 {
2685 uint32_t buf[16];
2686 uint32_t *p = buf;
2687
2688 p += emit_pop (p, x1);
2689 p += emit_asrv (p, x0, x1, x0);
2690
2691 emit_ops_insns (buf, p - buf);
2692 }
2693
2694 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2695
2696 static void
2697 aarch64_emit_rsh_unsigned (void)
2698 {
2699 uint32_t buf[16];
2700 uint32_t *p = buf;
2701
2702 p += emit_pop (p, x1);
2703 p += emit_lsrv (p, x0, x1, x0);
2704
2705 emit_ops_insns (buf, p - buf);
2706 }
2707
2708 /* Implementation of emit_ops method "emit_ext". */
2709
2710 static void
2711 aarch64_emit_ext (int arg)
2712 {
2713 uint32_t buf[16];
2714 uint32_t *p = buf;
2715
2716 p += emit_sbfx (p, x0, x0, 0, arg);
2717
2718 emit_ops_insns (buf, p - buf);
2719 }
2720
2721 /* Implementation of emit_ops method "emit_log_not". */
2722
2723 static void
2724 aarch64_emit_log_not (void)
2725 {
2726 uint32_t buf[16];
2727 uint32_t *p = buf;
2728
2729 /* If the top of the stack is 0, replace it with 1. Else replace it with
2730 0. */
2731
2732 p += emit_cmp (p, x0, immediate_operand (0));
2733 p += emit_cset (p, x0, EQ);
2734
2735 emit_ops_insns (buf, p - buf);
2736 }
2737
2738 /* Implementation of emit_ops method "emit_bit_and". */
2739
2740 static void
2741 aarch64_emit_bit_and (void)
2742 {
2743 uint32_t buf[16];
2744 uint32_t *p = buf;
2745
2746 p += emit_pop (p, x1);
2747 p += emit_and (p, x0, x0, x1);
2748
2749 emit_ops_insns (buf, p - buf);
2750 }
2751
2752 /* Implementation of emit_ops method "emit_bit_or". */
2753
2754 static void
2755 aarch64_emit_bit_or (void)
2756 {
2757 uint32_t buf[16];
2758 uint32_t *p = buf;
2759
2760 p += emit_pop (p, x1);
2761 p += emit_orr (p, x0, x0, x1);
2762
2763 emit_ops_insns (buf, p - buf);
2764 }
2765
2766 /* Implementation of emit_ops method "emit_bit_xor". */
2767
2768 static void
2769 aarch64_emit_bit_xor (void)
2770 {
2771 uint32_t buf[16];
2772 uint32_t *p = buf;
2773
2774 p += emit_pop (p, x1);
2775 p += emit_eor (p, x0, x0, x1);
2776
2777 emit_ops_insns (buf, p - buf);
2778 }
2779
2780 /* Implementation of emit_ops method "emit_bit_not". */
2781
2782 static void
2783 aarch64_emit_bit_not (void)
2784 {
2785 uint32_t buf[16];
2786 uint32_t *p = buf;
2787
2788 p += emit_mvn (p, x0, x0);
2789
2790 emit_ops_insns (buf, p - buf);
2791 }
2792
2793 /* Implementation of emit_ops method "emit_equal". */
2794
2795 static void
2796 aarch64_emit_equal (void)
2797 {
2798 uint32_t buf[16];
2799 uint32_t *p = buf;
2800
2801 p += emit_pop (p, x1);
2802 p += emit_cmp (p, x0, register_operand (x1));
2803 p += emit_cset (p, x0, EQ);
2804
2805 emit_ops_insns (buf, p - buf);
2806 }
2807
2808 /* Implementation of emit_ops method "emit_less_signed". */
2809
2810 static void
2811 aarch64_emit_less_signed (void)
2812 {
2813 uint32_t buf[16];
2814 uint32_t *p = buf;
2815
2816 p += emit_pop (p, x1);
2817 p += emit_cmp (p, x1, register_operand (x0));
2818 p += emit_cset (p, x0, LT);
2819
2820 emit_ops_insns (buf, p - buf);
2821 }
2822
2823 /* Implementation of emit_ops method "emit_less_unsigned". */
2824
2825 static void
2826 aarch64_emit_less_unsigned (void)
2827 {
2828 uint32_t buf[16];
2829 uint32_t *p = buf;
2830
2831 p += emit_pop (p, x1);
2832 p += emit_cmp (p, x1, register_operand (x0));
2833 p += emit_cset (p, x0, LO);
2834
2835 emit_ops_insns (buf, p - buf);
2836 }
2837
2838 /* Implementation of emit_ops method "emit_ref". */
2839
2840 static void
2841 aarch64_emit_ref (int size)
2842 {
2843 uint32_t buf[16];
2844 uint32_t *p = buf;
2845
2846 switch (size)
2847 {
2848 case 1:
2849 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2850 break;
2851 case 2:
2852 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2853 break;
2854 case 4:
2855 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2856 break;
2857 case 8:
2858 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2859 break;
2860 default:
2861 /* Unknown size, bail on compilation. */
2862 emit_error = 1;
2863 break;
2864 }
2865
2866 emit_ops_insns (buf, p - buf);
2867 }
2868
2869 /* Implementation of emit_ops method "emit_if_goto". */
2870
2871 static void
2872 aarch64_emit_if_goto (int *offset_p, int *size_p)
2873 {
2874 uint32_t buf[16];
2875 uint32_t *p = buf;
2876
2877 /* The Z flag is set or cleared here. */
2878 p += emit_cmp (p, x0, immediate_operand (0));
2879 /* This instruction must not change the Z flag. */
2880 p += emit_pop (p, x0);
2881 /* Branch over the next instruction if x0 == 0. */
2882 p += emit_bcond (p, EQ, 8);
2883
2884 /* The NOP instruction will be patched with an unconditional branch. */
2885 if (offset_p)
2886 *offset_p = (p - buf) * 4;
2887 if (size_p)
2888 *size_p = 4;
2889 p += emit_nop (p);
2890
2891 emit_ops_insns (buf, p - buf);
2892 }
2893
2894 /* Implementation of emit_ops method "emit_goto". */
2895
2896 static void
2897 aarch64_emit_goto (int *offset_p, int *size_p)
2898 {
2899 uint32_t buf[16];
2900 uint32_t *p = buf;
2901
2902 /* The NOP instruction will be patched with an unconditional branch. */
2903 if (offset_p)
2904 *offset_p = 0;
2905 if (size_p)
2906 *size_p = 4;
2907 p += emit_nop (p);
2908
2909 emit_ops_insns (buf, p - buf);
2910 }
2911
2912 /* Implementation of emit_ops method "write_goto_address". */
2913
2914 static void
2915 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2916 {
2917 uint32_t insn;
2918
2919 emit_b (&insn, 0, to - from);
2920 append_insns (&from, 1, &insn);
2921 }
2922
2923 /* Implementation of emit_ops method "emit_const". */
2924
2925 static void
2926 aarch64_emit_const (LONGEST num)
2927 {
2928 uint32_t buf[16];
2929 uint32_t *p = buf;
2930
2931 p += emit_mov_addr (p, x0, num);
2932
2933 emit_ops_insns (buf, p - buf);
2934 }
2935
2936 /* Implementation of emit_ops method "emit_call". */
2937
2938 static void
2939 aarch64_emit_call (CORE_ADDR fn)
2940 {
2941 uint32_t buf[16];
2942 uint32_t *p = buf;
2943
2944 p += emit_mov_addr (p, ip0, fn);
2945 p += emit_blr (p, ip0);
2946
2947 emit_ops_insns (buf, p - buf);
2948 }
2949
2950 /* Implementation of emit_ops method "emit_reg". */
2951
2952 static void
2953 aarch64_emit_reg (int reg)
2954 {
2955 uint32_t buf[16];
2956 uint32_t *p = buf;
2957
2958 /* Set x0 to unsigned char *regs. */
2959 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2960 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2961 p += emit_mov (p, x1, immediate_operand (reg));
2962
2963 emit_ops_insns (buf, p - buf);
2964
2965 aarch64_emit_call (get_raw_reg_func_addr ());
2966 }
2967
2968 /* Implementation of emit_ops method "emit_pop". */
2969
2970 static void
2971 aarch64_emit_pop (void)
2972 {
2973 uint32_t buf[16];
2974 uint32_t *p = buf;
2975
2976 p += emit_pop (p, x0);
2977
2978 emit_ops_insns (buf, p - buf);
2979 }
2980
2981 /* Implementation of emit_ops method "emit_stack_flush". */
2982
2983 static void
2984 aarch64_emit_stack_flush (void)
2985 {
2986 uint32_t buf[16];
2987 uint32_t *p = buf;
2988
2989 p += emit_push (p, x0);
2990
2991 emit_ops_insns (buf, p - buf);
2992 }
2993
2994 /* Implementation of emit_ops method "emit_zero_ext". */
2995
2996 static void
2997 aarch64_emit_zero_ext (int arg)
2998 {
2999 uint32_t buf[16];
3000 uint32_t *p = buf;
3001
3002 p += emit_ubfx (p, x0, x0, 0, arg);
3003
3004 emit_ops_insns (buf, p - buf);
3005 }
3006
3007 /* Implementation of emit_ops method "emit_swap". */
3008
3009 static void
3010 aarch64_emit_swap (void)
3011 {
3012 uint32_t buf[16];
3013 uint32_t *p = buf;
3014
3015 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
3016 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
3017 p += emit_mov (p, x0, register_operand (x1));
3018
3019 emit_ops_insns (buf, p - buf);
3020 }
3021
3022 /* Implementation of emit_ops method "emit_stack_adjust". */
3023
3024 static void
3025 aarch64_emit_stack_adjust (int n)
3026 {
3027 /* This is not needed with our design. */
3028 uint32_t buf[16];
3029 uint32_t *p = buf;
3030
3031 p += emit_add (p, sp, sp, immediate_operand (n * 16));
3032
3033 emit_ops_insns (buf, p - buf);
3034 }
3035
3036 /* Implementation of emit_ops method "emit_int_call_1". */
3037
3038 static void
3039 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
3040 {
3041 uint32_t buf[16];
3042 uint32_t *p = buf;
3043
3044 p += emit_mov (p, x0, immediate_operand (arg1));
3045
3046 emit_ops_insns (buf, p - buf);
3047
3048 aarch64_emit_call (fn);
3049 }
3050
3051 /* Implementation of emit_ops method "emit_void_call_2". */
3052
3053 static void
3054 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
3055 {
3056 uint32_t buf[16];
3057 uint32_t *p = buf;
3058
3059 /* Push x0 on the stack. */
3060 aarch64_emit_stack_flush ();
3061
3062 /* Setup arguments for the function call:
3063
3064 x0: arg1
3065 x1: top of the stack
3066
3067 MOV x1, x0
3068 MOV x0, #arg1 */
3069
3070 p += emit_mov (p, x1, register_operand (x0));
3071 p += emit_mov (p, x0, immediate_operand (arg1));
3072
3073 emit_ops_insns (buf, p - buf);
3074
3075 aarch64_emit_call (fn);
3076
3077 /* Restore x0. */
3078 aarch64_emit_pop ();
3079 }
3080
3081 /* Implementation of emit_ops method "emit_eq_goto". */
3082
3083 static void
3084 aarch64_emit_eq_goto (int *offset_p, int *size_p)
3085 {
3086 uint32_t buf[16];
3087 uint32_t *p = buf;
3088
3089 p += emit_pop (p, x1);
3090 p += emit_cmp (p, x1, register_operand (x0));
3091 /* Branch over the next instruction if x0 != x1. */
3092 p += emit_bcond (p, NE, 8);
3093 /* The NOP instruction will be patched with an unconditional branch. */
3094 if (offset_p)
3095 *offset_p = (p - buf) * 4;
3096 if (size_p)
3097 *size_p = 4;
3098 p += emit_nop (p);
3099
3100 emit_ops_insns (buf, p - buf);
3101 }
3102
3103 /* Implementation of emit_ops method "emit_ne_goto". */
3104
3105 static void
3106 aarch64_emit_ne_goto (int *offset_p, int *size_p)
3107 {
3108 uint32_t buf[16];
3109 uint32_t *p = buf;
3110
3111 p += emit_pop (p, x1);
3112 p += emit_cmp (p, x1, register_operand (x0));
3113 /* Branch over the next instruction if x0 == x1. */
3114 p += emit_bcond (p, EQ, 8);
3115 /* The NOP instruction will be patched with an unconditional branch. */
3116 if (offset_p)
3117 *offset_p = (p - buf) * 4;
3118 if (size_p)
3119 *size_p = 4;
3120 p += emit_nop (p);
3121
3122 emit_ops_insns (buf, p - buf);
3123 }
3124
3125 /* Implementation of emit_ops method "emit_lt_goto". */
3126
3127 static void
3128 aarch64_emit_lt_goto (int *offset_p, int *size_p)
3129 {
3130 uint32_t buf[16];
3131 uint32_t *p = buf;
3132
3133 p += emit_pop (p, x1);
3134 p += emit_cmp (p, x1, register_operand (x0));
3135 /* Branch over the next instruction if x0 >= x1. */
3136 p += emit_bcond (p, GE, 8);
3137 /* The NOP instruction will be patched with an unconditional branch. */
3138 if (offset_p)
3139 *offset_p = (p - buf) * 4;
3140 if (size_p)
3141 *size_p = 4;
3142 p += emit_nop (p);
3143
3144 emit_ops_insns (buf, p - buf);
3145 }
3146
3147 /* Implementation of emit_ops method "emit_le_goto". */
3148
3149 static void
3150 aarch64_emit_le_goto (int *offset_p, int *size_p)
3151 {
3152 uint32_t buf[16];
3153 uint32_t *p = buf;
3154
3155 p += emit_pop (p, x1);
3156 p += emit_cmp (p, x1, register_operand (x0));
3157 /* Branch over the next instruction if x0 > x1. */
3158 p += emit_bcond (p, GT, 8);
3159 /* The NOP instruction will be patched with an unconditional branch. */
3160 if (offset_p)
3161 *offset_p = (p - buf) * 4;
3162 if (size_p)
3163 *size_p = 4;
3164 p += emit_nop (p);
3165
3166 emit_ops_insns (buf, p - buf);
3167 }
3168
3169 /* Implementation of emit_ops method "emit_gt_goto". */
3170
3171 static void
3172 aarch64_emit_gt_goto (int *offset_p, int *size_p)
3173 {
3174 uint32_t buf[16];
3175 uint32_t *p = buf;
3176
3177 p += emit_pop (p, x1);
3178 p += emit_cmp (p, x1, register_operand (x0));
3179 /* Branch over the next instruction if x0 <= x1. */
3180 p += emit_bcond (p, LE, 8);
3181 /* The NOP instruction will be patched with an unconditional branch. */
3182 if (offset_p)
3183 *offset_p = (p - buf) * 4;
3184 if (size_p)
3185 *size_p = 4;
3186 p += emit_nop (p);
3187
3188 emit_ops_insns (buf, p - buf);
3189 }
3190
3191 /* Implementation of emit_ops method "emit_ge_got". */
3192
3193 static void
3194 aarch64_emit_ge_got (int *offset_p, int *size_p)
3195 {
3196 uint32_t buf[16];
3197 uint32_t *p = buf;
3198
3199 p += emit_pop (p, x1);
3200 p += emit_cmp (p, x1, register_operand (x0));
3201 /* Branch over the next instruction if x0 <= x1. */
3202 p += emit_bcond (p, LT, 8);
3203 /* The NOP instruction will be patched with an unconditional branch. */
3204 if (offset_p)
3205 *offset_p = (p - buf) * 4;
3206 if (size_p)
3207 *size_p = 4;
3208 p += emit_nop (p);
3209
3210 emit_ops_insns (buf, p - buf);
3211 }
3212
3213 static struct emit_ops aarch64_emit_ops_impl =
3214 {
3215 aarch64_emit_prologue,
3216 aarch64_emit_epilogue,
3217 aarch64_emit_add,
3218 aarch64_emit_sub,
3219 aarch64_emit_mul,
3220 aarch64_emit_lsh,
3221 aarch64_emit_rsh_signed,
3222 aarch64_emit_rsh_unsigned,
3223 aarch64_emit_ext,
3224 aarch64_emit_log_not,
3225 aarch64_emit_bit_and,
3226 aarch64_emit_bit_or,
3227 aarch64_emit_bit_xor,
3228 aarch64_emit_bit_not,
3229 aarch64_emit_equal,
3230 aarch64_emit_less_signed,
3231 aarch64_emit_less_unsigned,
3232 aarch64_emit_ref,
3233 aarch64_emit_if_goto,
3234 aarch64_emit_goto,
3235 aarch64_write_goto_address,
3236 aarch64_emit_const,
3237 aarch64_emit_call,
3238 aarch64_emit_reg,
3239 aarch64_emit_pop,
3240 aarch64_emit_stack_flush,
3241 aarch64_emit_zero_ext,
3242 aarch64_emit_swap,
3243 aarch64_emit_stack_adjust,
3244 aarch64_emit_int_call_1,
3245 aarch64_emit_void_call_2,
3246 aarch64_emit_eq_goto,
3247 aarch64_emit_ne_goto,
3248 aarch64_emit_lt_goto,
3249 aarch64_emit_le_goto,
3250 aarch64_emit_gt_goto,
3251 aarch64_emit_ge_got,
3252 };
3253
3254 /* Implementation of target ops method "emit_ops". */
3255
3256 emit_ops *
3257 aarch64_target::emit_ops ()
3258 {
3259 return &aarch64_emit_ops_impl;
3260 }
3261
3262 /* Implementation of target ops method
3263 "get_min_fast_tracepoint_insn_len". */
3264
3265 int
3266 aarch64_target::get_min_fast_tracepoint_insn_len ()
3267 {
3268 return 4;
3269 }
3270
3271 /* Implementation of linux target ops method "low_supports_range_stepping". */
3272
3273 bool
3274 aarch64_target::low_supports_range_stepping ()
3275 {
3276 return true;
3277 }
3278
3279 /* Implementation of target ops method "sw_breakpoint_from_kind". */
3280
3281 const gdb_byte *
3282 aarch64_target::sw_breakpoint_from_kind (int kind, int *size)
3283 {
3284 if (is_64bit_tdesc ())
3285 {
3286 *size = aarch64_breakpoint_len;
3287 return aarch64_breakpoint;
3288 }
3289 else
3290 return arm_sw_breakpoint_from_kind (kind, size);
3291 }
3292
3293 /* Implementation of target ops method "breakpoint_kind_from_pc". */
3294
3295 int
3296 aarch64_target::breakpoint_kind_from_pc (CORE_ADDR *pcptr)
3297 {
3298 if (is_64bit_tdesc ())
3299 return aarch64_breakpoint_len;
3300 else
3301 return arm_breakpoint_kind_from_pc (pcptr);
3302 }
3303
3304 /* Implementation of the target ops method
3305 "breakpoint_kind_from_current_state". */
3306
3307 int
3308 aarch64_target::breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
3309 {
3310 if (is_64bit_tdesc ())
3311 return aarch64_breakpoint_len;
3312 else
3313 return arm_breakpoint_kind_from_current_state (pcptr);
3314 }
3315
3316 /* Implementation of targets ops method "supports_qxfer_capability. */
3317
3318 bool
3319 aarch64_target::supports_qxfer_capability ()
3320 {
3321 unsigned long hwcap2 = linux_get_hwcap2 (8);
3322
3323 return (hwcap2 & HWCAP2_MORELLO) != 0;
3324 }
3325
3326 /* Implementation of targets ops method "qxfer_capability. */
3327
3328 int
3329 aarch64_target::qxfer_capability (const CORE_ADDR address,
3330 unsigned char *readbuf,
3331 unsigned const char *writebuf,
3332 CORE_ADDR offset, int len)
3333 {
3334 int tid = pid_of (current_thread);
3335
3336 struct user_cap cap;
3337
3338 if (readbuf != nullptr)
3339 {
3340 if (!aarch64_linux_read_capability (tid, address, cap))
3341 {
3342 warning (_("Unable to read capability from address."));
3343 return 0;
3344 }
3345
3346 /* Copy data to readbuf. */
3347 memcpy (readbuf, &cap.tag, 1);
3348 memcpy (readbuf + 1, &cap.val, 16);
3349 }
3350 else
3351 {
3352 /* Copy data from writebuf. */
3353 memcpy (&cap.tag, writebuf, 1);
3354 memcpy (&cap.val, writebuf + 1, 16);
3355 memset (&cap.__reserved, 0, 15);
3356
3357 if (!aarch64_linux_write_capability (tid, address, cap))
3358 {
3359 warning (_("Unable to write capability to address.\n"
3360 "Please run \"sysctl cheri.ptrace_forge_cap=1\"."));
3361 return 0;
3362 }
3363 }
3364
3365 return sizeof (cap.val) + 1;
3366 }
3367
3368 /* The linux target ops object. */
3369
3370 linux_process_target *the_linux_target = &the_aarch64_target;
3371
3372 void
3373 initialize_low_arch (void)
3374 {
3375 initialize_low_arch_aarch32 ();
3376
3377 initialize_regsets_info (&aarch64_regsets_info);
3378 initialize_regsets_info (&aarch64_sve_regsets_info);
3379 }