]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-aarch64-low.c
Update copyright year range in all GDB files
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-aarch64-low.c
1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
4 Copyright (C) 2009-2018 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
29 #include "ax.h"
30 #include "tracepoint.h"
31
32 #include <signal.h>
33 #include <sys/user.h>
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
36 #include <inttypes.h>
37 #include <endian.h>
38 #include <sys/uio.h>
39
40 #include "gdb_proc_service.h"
41 #include "arch/aarch64.h"
42 #include "linux-aarch64-tdesc.h"
43
44 #ifdef HAVE_SYS_REG_H
45 #include <sys/reg.h>
46 #endif
47
48 /* Per-process arch-specific data we want to keep. */
49
50 struct arch_process_info
51 {
52 /* Hardware breakpoint/watchpoint data.
53 The reason for them to be per-process rather than per-thread is
54 due to the lack of information in the gdbserver environment;
55 gdbserver is not told that whether a requested hardware
56 breakpoint/watchpoint is thread specific or not, so it has to set
57 each hw bp/wp for every thread in the current process. The
58 higher level bp/wp management in gdb will resume a thread if a hw
59 bp/wp trap is not expected for it. Since the hw bp/wp setting is
60 same for each thread, it is reasonable for the data to live here.
61 */
62 struct aarch64_debug_reg_state debug_reg_state;
63 };
64
65 /* Return true if the size of register 0 is 8 byte. */
66
67 static int
68 is_64bit_tdesc (void)
69 {
70 struct regcache *regcache = get_thread_regcache (current_thread, 0);
71
72 return register_size (regcache->tdesc, 0) == 8;
73 }
74
75 /* Implementation of linux_target_ops method "cannot_store_register". */
76
77 static int
78 aarch64_cannot_store_register (int regno)
79 {
80 return regno >= AARCH64_NUM_REGS;
81 }
82
83 /* Implementation of linux_target_ops method "cannot_fetch_register". */
84
85 static int
86 aarch64_cannot_fetch_register (int regno)
87 {
88 return regno >= AARCH64_NUM_REGS;
89 }
90
91 static void
92 aarch64_fill_gregset (struct regcache *regcache, void *buf)
93 {
94 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
95 int i;
96
97 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
98 collect_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
99 collect_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
100 collect_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
101 collect_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
102 }
103
104 static void
105 aarch64_store_gregset (struct regcache *regcache, const void *buf)
106 {
107 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
108 int i;
109
110 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
111 supply_register (regcache, AARCH64_X0_REGNUM + i, &regset->regs[i]);
112 supply_register (regcache, AARCH64_SP_REGNUM, &regset->sp);
113 supply_register (regcache, AARCH64_PC_REGNUM, &regset->pc);
114 supply_register (regcache, AARCH64_CPSR_REGNUM, &regset->pstate);
115 }
116
117 static void
118 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
119 {
120 struct user_fpsimd_state *regset = (struct user_fpsimd_state *) buf;
121 int i;
122
123 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
124 collect_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
125 collect_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
126 collect_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
127 }
128
129 static void
130 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
131 {
132 const struct user_fpsimd_state *regset
133 = (const struct user_fpsimd_state *) buf;
134 int i;
135
136 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
137 supply_register (regcache, AARCH64_V0_REGNUM + i, &regset->vregs[i]);
138 supply_register (regcache, AARCH64_FPSR_REGNUM, &regset->fpsr);
139 supply_register (regcache, AARCH64_FPCR_REGNUM, &regset->fpcr);
140 }
141
142 /* Enable miscellaneous debugging output. The name is historical - it
143 was originally used to debug LinuxThreads support. */
144 extern int debug_threads;
145
146 /* Implementation of linux_target_ops method "get_pc". */
147
148 static CORE_ADDR
149 aarch64_get_pc (struct regcache *regcache)
150 {
151 if (register_size (regcache->tdesc, 0) == 8)
152 return linux_get_pc_64bit (regcache);
153 else
154 return linux_get_pc_32bit (regcache);
155 }
156
157 /* Implementation of linux_target_ops method "set_pc". */
158
159 static void
160 aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
161 {
162 if (register_size (regcache->tdesc, 0) == 8)
163 linux_set_pc_64bit (regcache, pc);
164 else
165 linux_set_pc_32bit (regcache, pc);
166 }
167
168 #define aarch64_breakpoint_len 4
169
170 /* AArch64 BRK software debug mode instruction.
171 This instruction needs to match gdb/aarch64-tdep.c
172 (aarch64_default_breakpoint). */
173 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
174
175 /* Implementation of linux_target_ops method "breakpoint_at". */
176
177 static int
178 aarch64_breakpoint_at (CORE_ADDR where)
179 {
180 if (is_64bit_tdesc ())
181 {
182 gdb_byte insn[aarch64_breakpoint_len];
183
184 (*the_target->read_memory) (where, (unsigned char *) &insn,
185 aarch64_breakpoint_len);
186 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
187 return 1;
188
189 return 0;
190 }
191 else
192 return arm_breakpoint_at (where);
193 }
194
195 static void
196 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
197 {
198 int i;
199
200 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
201 {
202 state->dr_addr_bp[i] = 0;
203 state->dr_ctrl_bp[i] = 0;
204 state->dr_ref_count_bp[i] = 0;
205 }
206
207 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
208 {
209 state->dr_addr_wp[i] = 0;
210 state->dr_ctrl_wp[i] = 0;
211 state->dr_ref_count_wp[i] = 0;
212 }
213 }
214
215 /* Return the pointer to the debug register state structure in the
216 current process' arch-specific data area. */
217
218 struct aarch64_debug_reg_state *
219 aarch64_get_debug_reg_state (pid_t pid)
220 {
221 struct process_info *proc = find_process_pid (pid);
222
223 return &proc->priv->arch_private->debug_reg_state;
224 }
225
226 /* Implementation of linux_target_ops method "supports_z_point_type". */
227
228 static int
229 aarch64_supports_z_point_type (char z_type)
230 {
231 switch (z_type)
232 {
233 case Z_PACKET_SW_BP:
234 case Z_PACKET_HW_BP:
235 case Z_PACKET_WRITE_WP:
236 case Z_PACKET_READ_WP:
237 case Z_PACKET_ACCESS_WP:
238 return 1;
239 default:
240 return 0;
241 }
242 }
243
244 /* Implementation of linux_target_ops method "insert_point".
245
246 It actually only records the info of the to-be-inserted bp/wp;
247 the actual insertion will happen when threads are resumed. */
248
249 static int
250 aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
251 int len, struct raw_breakpoint *bp)
252 {
253 int ret;
254 enum target_hw_bp_type targ_type;
255 struct aarch64_debug_reg_state *state
256 = aarch64_get_debug_reg_state (pid_of (current_thread));
257
258 if (show_debug_regs)
259 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
260 (unsigned long) addr, len);
261
262 /* Determine the type from the raw breakpoint type. */
263 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
264
265 if (targ_type != hw_execute)
266 {
267 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
268 ret = aarch64_handle_watchpoint (targ_type, addr, len,
269 1 /* is_insert */, state);
270 else
271 ret = -1;
272 }
273 else
274 {
275 if (len == 3)
276 {
277 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
278 instruction. Set it to 2 to correctly encode length bit
279 mask in hardware/watchpoint control register. */
280 len = 2;
281 }
282 ret = aarch64_handle_breakpoint (targ_type, addr, len,
283 1 /* is_insert */, state);
284 }
285
286 if (show_debug_regs)
287 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
288 targ_type);
289
290 return ret;
291 }
292
293 /* Implementation of linux_target_ops method "remove_point".
294
295 It actually only records the info of the to-be-removed bp/wp,
296 the actual removal will be done when threads are resumed. */
297
298 static int
299 aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
300 int len, struct raw_breakpoint *bp)
301 {
302 int ret;
303 enum target_hw_bp_type targ_type;
304 struct aarch64_debug_reg_state *state
305 = aarch64_get_debug_reg_state (pid_of (current_thread));
306
307 if (show_debug_regs)
308 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
309 (unsigned long) addr, len);
310
311 /* Determine the type from the raw breakpoint type. */
312 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
313
314 /* Set up state pointers. */
315 if (targ_type != hw_execute)
316 ret =
317 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
318 state);
319 else
320 {
321 if (len == 3)
322 {
323 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
324 instruction. Set it to 2 to correctly encode length bit
325 mask in hardware/watchpoint control register. */
326 len = 2;
327 }
328 ret = aarch64_handle_breakpoint (targ_type, addr, len,
329 0 /* is_insert */, state);
330 }
331
332 if (show_debug_regs)
333 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
334 targ_type);
335
336 return ret;
337 }
338
339 /* Implementation of linux_target_ops method "stopped_data_address". */
340
341 static CORE_ADDR
342 aarch64_stopped_data_address (void)
343 {
344 siginfo_t siginfo;
345 int pid, i;
346 struct aarch64_debug_reg_state *state;
347
348 pid = lwpid_of (current_thread);
349
350 /* Get the siginfo. */
351 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
352 return (CORE_ADDR) 0;
353
354 /* Need to be a hardware breakpoint/watchpoint trap. */
355 if (siginfo.si_signo != SIGTRAP
356 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
357 return (CORE_ADDR) 0;
358
359 /* Check if the address matches any watched address. */
360 state = aarch64_get_debug_reg_state (pid_of (current_thread));
361 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
362 {
363 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
364 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
365 const CORE_ADDR addr_watch = state->dr_addr_wp[i];
366 if (state->dr_ref_count_wp[i]
367 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
368 && addr_trap >= addr_watch
369 && addr_trap < addr_watch + len)
370 return addr_trap;
371 }
372
373 return (CORE_ADDR) 0;
374 }
375
376 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
377
378 static int
379 aarch64_stopped_by_watchpoint (void)
380 {
381 if (aarch64_stopped_data_address () != 0)
382 return 1;
383 else
384 return 0;
385 }
386
387 /* Fetch the thread-local storage pointer for libthread_db. */
388
389 ps_err_e
390 ps_get_thread_area (struct ps_prochandle *ph,
391 lwpid_t lwpid, int idx, void **base)
392 {
393 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
394 is_64bit_tdesc ());
395 }
396
397 /* Implementation of linux_target_ops method "siginfo_fixup". */
398
399 static int
400 aarch64_linux_siginfo_fixup (siginfo_t *native, gdb_byte *inf, int direction)
401 {
402 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
403 if (!is_64bit_tdesc ())
404 {
405 if (direction == 0)
406 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
407 native);
408 else
409 aarch64_siginfo_from_compat_siginfo (native,
410 (struct compat_siginfo *) inf);
411
412 return 1;
413 }
414
415 return 0;
416 }
417
418 /* Implementation of linux_target_ops method "new_process". */
419
420 static struct arch_process_info *
421 aarch64_linux_new_process (void)
422 {
423 struct arch_process_info *info = XCNEW (struct arch_process_info);
424
425 aarch64_init_debug_reg_state (&info->debug_reg_state);
426
427 return info;
428 }
429
430 /* Implementation of linux_target_ops method "delete_process". */
431
432 static void
433 aarch64_linux_delete_process (struct arch_process_info *info)
434 {
435 xfree (info);
436 }
437
438 /* Implementation of linux_target_ops method "linux_new_fork". */
439
440 static void
441 aarch64_linux_new_fork (struct process_info *parent,
442 struct process_info *child)
443 {
444 /* These are allocated by linux_add_process. */
445 gdb_assert (parent->priv != NULL
446 && parent->priv->arch_private != NULL);
447 gdb_assert (child->priv != NULL
448 && child->priv->arch_private != NULL);
449
450 /* Linux kernel before 2.6.33 commit
451 72f674d203cd230426437cdcf7dd6f681dad8b0d
452 will inherit hardware debug registers from parent
453 on fork/vfork/clone. Newer Linux kernels create such tasks with
454 zeroed debug registers.
455
456 GDB core assumes the child inherits the watchpoints/hw
457 breakpoints of the parent, and will remove them all from the
458 forked off process. Copy the debug registers mirrors into the
459 new process so that all breakpoints and watchpoints can be
460 removed together. The debug registers mirror will become zeroed
461 in the end before detaching the forked off process, thus making
462 this compatible with older Linux kernels too. */
463
464 *child->priv->arch_private = *parent->priv->arch_private;
465 }
466
467 /* Implementation of linux_target_ops method "arch_setup". */
468
469 static void
470 aarch64_arch_setup (void)
471 {
472 unsigned int machine;
473 int is_elf64;
474 int tid;
475
476 tid = lwpid_of (current_thread);
477
478 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
479
480 if (is_elf64)
481 current_process ()->tdesc = aarch64_linux_read_description ();
482 else
483 current_process ()->tdesc = tdesc_arm_with_neon;
484
485 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
486 }
487
488 static struct regset_info aarch64_regsets[] =
489 {
490 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
491 sizeof (struct user_pt_regs), GENERAL_REGS,
492 aarch64_fill_gregset, aarch64_store_gregset },
493 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
494 sizeof (struct user_fpsimd_state), FP_REGS,
495 aarch64_fill_fpregset, aarch64_store_fpregset
496 },
497 NULL_REGSET
498 };
499
500 static struct regsets_info aarch64_regsets_info =
501 {
502 aarch64_regsets, /* regsets */
503 0, /* num_regsets */
504 NULL, /* disabled_regsets */
505 };
506
507 static struct regs_info regs_info_aarch64 =
508 {
509 NULL, /* regset_bitmap */
510 NULL, /* usrregs */
511 &aarch64_regsets_info,
512 };
513
514 /* Implementation of linux_target_ops method "regs_info". */
515
516 static const struct regs_info *
517 aarch64_regs_info (void)
518 {
519 if (is_64bit_tdesc ())
520 return &regs_info_aarch64;
521 else
522 return &regs_info_aarch32;
523 }
524
525 /* Implementation of linux_target_ops method "supports_tracepoints". */
526
527 static int
528 aarch64_supports_tracepoints (void)
529 {
530 if (current_thread == NULL)
531 return 1;
532 else
533 {
534 /* We don't support tracepoints on aarch32 now. */
535 return is_64bit_tdesc ();
536 }
537 }
538
539 /* Implementation of linux_target_ops method "get_thread_area". */
540
541 static int
542 aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
543 {
544 struct iovec iovec;
545 uint64_t reg;
546
547 iovec.iov_base = &reg;
548 iovec.iov_len = sizeof (reg);
549
550 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
551 return -1;
552
553 *addrp = reg;
554
555 return 0;
556 }
557
558 /* Implementation of linux_target_ops method "get_syscall_trapinfo". */
559
560 static void
561 aarch64_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
562 {
563 int use_64bit = register_size (regcache->tdesc, 0) == 8;
564
565 if (use_64bit)
566 {
567 long l_sysno;
568
569 collect_register_by_name (regcache, "x8", &l_sysno);
570 *sysno = (int) l_sysno;
571 }
572 else
573 collect_register_by_name (regcache, "r7", sysno);
574 }
575
576 /* List of condition codes that we need. */
577
578 enum aarch64_condition_codes
579 {
580 EQ = 0x0,
581 NE = 0x1,
582 LO = 0x3,
583 GE = 0xa,
584 LT = 0xb,
585 GT = 0xc,
586 LE = 0xd,
587 };
588
589 enum aarch64_operand_type
590 {
591 OPERAND_IMMEDIATE,
592 OPERAND_REGISTER,
593 };
594
595 /* Representation of an operand. At this time, it only supports register
596 and immediate types. */
597
598 struct aarch64_operand
599 {
600 /* Type of the operand. */
601 enum aarch64_operand_type type;
602
603 /* Value of the operand according to the type. */
604 union
605 {
606 uint32_t imm;
607 struct aarch64_register reg;
608 };
609 };
610
611 /* List of registers that we are currently using, we can add more here as
612 we need to use them. */
613
614 /* General purpose scratch registers (64 bit). */
615 static const struct aarch64_register x0 = { 0, 1 };
616 static const struct aarch64_register x1 = { 1, 1 };
617 static const struct aarch64_register x2 = { 2, 1 };
618 static const struct aarch64_register x3 = { 3, 1 };
619 static const struct aarch64_register x4 = { 4, 1 };
620
621 /* General purpose scratch registers (32 bit). */
622 static const struct aarch64_register w0 = { 0, 0 };
623 static const struct aarch64_register w2 = { 2, 0 };
624
625 /* Intra-procedure scratch registers. */
626 static const struct aarch64_register ip0 = { 16, 1 };
627
628 /* Special purpose registers. */
629 static const struct aarch64_register fp = { 29, 1 };
630 static const struct aarch64_register lr = { 30, 1 };
631 static const struct aarch64_register sp = { 31, 1 };
632 static const struct aarch64_register xzr = { 31, 1 };
633
634 /* Dynamically allocate a new register. If we know the register
635 statically, we should make it a global as above instead of using this
636 helper function. */
637
638 static struct aarch64_register
639 aarch64_register (unsigned num, int is64)
640 {
641 return (struct aarch64_register) { num, is64 };
642 }
643
644 /* Helper function to create a register operand, for instructions with
645 different types of operands.
646
647 For example:
648 p += emit_mov (p, x0, register_operand (x1)); */
649
650 static struct aarch64_operand
651 register_operand (struct aarch64_register reg)
652 {
653 struct aarch64_operand operand;
654
655 operand.type = OPERAND_REGISTER;
656 operand.reg = reg;
657
658 return operand;
659 }
660
661 /* Helper function to create an immediate operand, for instructions with
662 different types of operands.
663
664 For example:
665 p += emit_mov (p, x0, immediate_operand (12)); */
666
667 static struct aarch64_operand
668 immediate_operand (uint32_t imm)
669 {
670 struct aarch64_operand operand;
671
672 operand.type = OPERAND_IMMEDIATE;
673 operand.imm = imm;
674
675 return operand;
676 }
677
678 /* Helper function to create an offset memory operand.
679
680 For example:
681 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
682
683 static struct aarch64_memory_operand
684 offset_memory_operand (int32_t offset)
685 {
686 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
687 }
688
689 /* Helper function to create a pre-index memory operand.
690
691 For example:
692 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
693
694 static struct aarch64_memory_operand
695 preindex_memory_operand (int32_t index)
696 {
697 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
698 }
699
700 /* Helper function to create a post-index memory operand.
701
702 For example:
703 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
704
705 static struct aarch64_memory_operand
706 postindex_memory_operand (int32_t index)
707 {
708 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
709 }
710
711 /* System control registers. These special registers can be written and
712 read with the MRS and MSR instructions.
713
714 - NZCV: Condition flags. GDB refers to this register under the CPSR
715 name.
716 - FPSR: Floating-point status register.
717 - FPCR: Floating-point control registers.
718 - TPIDR_EL0: Software thread ID register. */
719
720 enum aarch64_system_control_registers
721 {
722 /* op0 op1 crn crm op2 */
723 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
724 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
725 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
726 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
727 };
728
729 /* Write a BLR instruction into *BUF.
730
731 BLR rn
732
733 RN is the register to branch to. */
734
735 static int
736 emit_blr (uint32_t *buf, struct aarch64_register rn)
737 {
738 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
739 }
740
741 /* Write a RET instruction into *BUF.
742
743 RET xn
744
745 RN is the register to branch to. */
746
747 static int
748 emit_ret (uint32_t *buf, struct aarch64_register rn)
749 {
750 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
751 }
752
753 static int
754 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
755 struct aarch64_register rt,
756 struct aarch64_register rt2,
757 struct aarch64_register rn,
758 struct aarch64_memory_operand operand)
759 {
760 uint32_t opc;
761 uint32_t pre_index;
762 uint32_t write_back;
763
764 if (rt.is64)
765 opc = ENCODE (2, 2, 30);
766 else
767 opc = ENCODE (0, 2, 30);
768
769 switch (operand.type)
770 {
771 case MEMORY_OPERAND_OFFSET:
772 {
773 pre_index = ENCODE (1, 1, 24);
774 write_back = ENCODE (0, 1, 23);
775 break;
776 }
777 case MEMORY_OPERAND_POSTINDEX:
778 {
779 pre_index = ENCODE (0, 1, 24);
780 write_back = ENCODE (1, 1, 23);
781 break;
782 }
783 case MEMORY_OPERAND_PREINDEX:
784 {
785 pre_index = ENCODE (1, 1, 24);
786 write_back = ENCODE (1, 1, 23);
787 break;
788 }
789 default:
790 return 0;
791 }
792
793 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
794 | ENCODE (operand.index >> 3, 7, 15)
795 | ENCODE (rt2.num, 5, 10)
796 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
797 }
798
799 /* Write a STP instruction into *BUF.
800
801 STP rt, rt2, [rn, #offset]
802 STP rt, rt2, [rn, #index]!
803 STP rt, rt2, [rn], #index
804
805 RT and RT2 are the registers to store.
806 RN is the base address register.
807 OFFSET is the immediate to add to the base address. It is limited to a
808 -512 .. 504 range (7 bits << 3). */
809
810 static int
811 emit_stp (uint32_t *buf, struct aarch64_register rt,
812 struct aarch64_register rt2, struct aarch64_register rn,
813 struct aarch64_memory_operand operand)
814 {
815 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
816 }
817
818 /* Write a LDP instruction into *BUF.
819
820 LDP rt, rt2, [rn, #offset]
821 LDP rt, rt2, [rn, #index]!
822 LDP rt, rt2, [rn], #index
823
824 RT and RT2 are the registers to store.
825 RN is the base address register.
826 OFFSET is the immediate to add to the base address. It is limited to a
827 -512 .. 504 range (7 bits << 3). */
828
829 static int
830 emit_ldp (uint32_t *buf, struct aarch64_register rt,
831 struct aarch64_register rt2, struct aarch64_register rn,
832 struct aarch64_memory_operand operand)
833 {
834 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
835 }
836
837 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
838
839 LDP qt, qt2, [rn, #offset]
840
841 RT and RT2 are the Q registers to store.
842 RN is the base address register.
843 OFFSET is the immediate to add to the base address. It is limited to
844 -1024 .. 1008 range (7 bits << 4). */
845
846 static int
847 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
848 struct aarch64_register rn, int32_t offset)
849 {
850 uint32_t opc = ENCODE (2, 2, 30);
851 uint32_t pre_index = ENCODE (1, 1, 24);
852
853 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
854 | ENCODE (offset >> 4, 7, 15)
855 | ENCODE (rt2, 5, 10)
856 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
857 }
858
859 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
860
861 STP qt, qt2, [rn, #offset]
862
863 RT and RT2 are the Q registers to store.
864 RN is the base address register.
865 OFFSET is the immediate to add to the base address. It is limited to
866 -1024 .. 1008 range (7 bits << 4). */
867
868 static int
869 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
870 struct aarch64_register rn, int32_t offset)
871 {
872 uint32_t opc = ENCODE (2, 2, 30);
873 uint32_t pre_index = ENCODE (1, 1, 24);
874
875 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
876 | ENCODE (offset >> 4, 7, 15)
877 | ENCODE (rt2, 5, 10)
878 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
879 }
880
881 /* Write a LDRH instruction into *BUF.
882
883 LDRH wt, [xn, #offset]
884 LDRH wt, [xn, #index]!
885 LDRH wt, [xn], #index
886
887 RT is the register to store.
888 RN is the base address register.
889 OFFSET is the immediate to add to the base address. It is limited to
890 0 .. 32760 range (12 bits << 3). */
891
892 static int
893 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
894 struct aarch64_register rn,
895 struct aarch64_memory_operand operand)
896 {
897 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
898 }
899
900 /* Write a LDRB instruction into *BUF.
901
902 LDRB wt, [xn, #offset]
903 LDRB wt, [xn, #index]!
904 LDRB wt, [xn], #index
905
906 RT is the register to store.
907 RN is the base address register.
908 OFFSET is the immediate to add to the base address. It is limited to
909 0 .. 32760 range (12 bits << 3). */
910
911 static int
912 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
913 struct aarch64_register rn,
914 struct aarch64_memory_operand operand)
915 {
916 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
917 }
918
919
920
921 /* Write a STR instruction into *BUF.
922
923 STR rt, [rn, #offset]
924 STR rt, [rn, #index]!
925 STR rt, [rn], #index
926
927 RT is the register to store.
928 RN is the base address register.
929 OFFSET is the immediate to add to the base address. It is limited to
930 0 .. 32760 range (12 bits << 3). */
931
932 static int
933 emit_str (uint32_t *buf, struct aarch64_register rt,
934 struct aarch64_register rn,
935 struct aarch64_memory_operand operand)
936 {
937 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
938 }
939
940 /* Helper function emitting an exclusive load or store instruction. */
941
942 static int
943 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
944 enum aarch64_opcodes opcode,
945 struct aarch64_register rs,
946 struct aarch64_register rt,
947 struct aarch64_register rt2,
948 struct aarch64_register rn)
949 {
950 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
951 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
952 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
953 }
954
955 /* Write a LAXR instruction into *BUF.
956
957 LDAXR rt, [xn]
958
959 RT is the destination register.
960 RN is the base address register. */
961
962 static int
963 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
964 struct aarch64_register rn)
965 {
966 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
967 xzr, rn);
968 }
969
970 /* Write a STXR instruction into *BUF.
971
972 STXR ws, rt, [xn]
973
974 RS is the result register, it indicates if the store succeeded or not.
975 RT is the destination register.
976 RN is the base address register. */
977
978 static int
979 emit_stxr (uint32_t *buf, struct aarch64_register rs,
980 struct aarch64_register rt, struct aarch64_register rn)
981 {
982 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
983 xzr, rn);
984 }
985
986 /* Write a STLR instruction into *BUF.
987
988 STLR rt, [xn]
989
990 RT is the register to store.
991 RN is the base address register. */
992
993 static int
994 emit_stlr (uint32_t *buf, struct aarch64_register rt,
995 struct aarch64_register rn)
996 {
997 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
998 xzr, rn);
999 }
1000
1001 /* Helper function for data processing instructions with register sources. */
1002
1003 static int
1004 emit_data_processing_reg (uint32_t *buf, uint32_t opcode,
1005 struct aarch64_register rd,
1006 struct aarch64_register rn,
1007 struct aarch64_register rm)
1008 {
1009 uint32_t size = ENCODE (rd.is64, 1, 31);
1010
1011 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1012 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1013 }
1014
1015 /* Helper function for data processing instructions taking either a register
1016 or an immediate. */
1017
1018 static int
1019 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1020 struct aarch64_register rd,
1021 struct aarch64_register rn,
1022 struct aarch64_operand operand)
1023 {
1024 uint32_t size = ENCODE (rd.is64, 1, 31);
1025 /* The opcode is different for register and immediate source operands. */
1026 uint32_t operand_opcode;
1027
1028 if (operand.type == OPERAND_IMMEDIATE)
1029 {
1030 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1031 operand_opcode = ENCODE (8, 4, 25);
1032
1033 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1034 | ENCODE (operand.imm, 12, 10)
1035 | ENCODE (rn.num, 5, 5)
1036 | ENCODE (rd.num, 5, 0));
1037 }
1038 else
1039 {
1040 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1041 operand_opcode = ENCODE (5, 4, 25);
1042
1043 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1044 rn, operand.reg);
1045 }
1046 }
1047
1048 /* Write an ADD instruction into *BUF.
1049
1050 ADD rd, rn, #imm
1051 ADD rd, rn, rm
1052
1053 This function handles both an immediate and register add.
1054
1055 RD is the destination register.
1056 RN is the input register.
1057 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1058 OPERAND_REGISTER. */
1059
1060 static int
1061 emit_add (uint32_t *buf, struct aarch64_register rd,
1062 struct aarch64_register rn, struct aarch64_operand operand)
1063 {
1064 return emit_data_processing (buf, ADD, rd, rn, operand);
1065 }
1066
1067 /* Write a SUB instruction into *BUF.
1068
1069 SUB rd, rn, #imm
1070 SUB rd, rn, rm
1071
1072 This function handles both an immediate and register sub.
1073
1074 RD is the destination register.
1075 RN is the input register.
1076 IMM is the immediate to substract to RN. */
1077
1078 static int
1079 emit_sub (uint32_t *buf, struct aarch64_register rd,
1080 struct aarch64_register rn, struct aarch64_operand operand)
1081 {
1082 return emit_data_processing (buf, SUB, rd, rn, operand);
1083 }
1084
1085 /* Write a MOV instruction into *BUF.
1086
1087 MOV rd, #imm
1088 MOV rd, rm
1089
1090 This function handles both a wide immediate move and a register move,
1091 with the condition that the source register is not xzr. xzr and the
1092 stack pointer share the same encoding and this function only supports
1093 the stack pointer.
1094
1095 RD is the destination register.
1096 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1097 OPERAND_REGISTER. */
1098
1099 static int
1100 emit_mov (uint32_t *buf, struct aarch64_register rd,
1101 struct aarch64_operand operand)
1102 {
1103 if (operand.type == OPERAND_IMMEDIATE)
1104 {
1105 uint32_t size = ENCODE (rd.is64, 1, 31);
1106 /* Do not shift the immediate. */
1107 uint32_t shift = ENCODE (0, 2, 21);
1108
1109 return aarch64_emit_insn (buf, MOV | size | shift
1110 | ENCODE (operand.imm, 16, 5)
1111 | ENCODE (rd.num, 5, 0));
1112 }
1113 else
1114 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1115 }
1116
1117 /* Write a MOVK instruction into *BUF.
1118
1119 MOVK rd, #imm, lsl #shift
1120
1121 RD is the destination register.
1122 IMM is the immediate.
1123 SHIFT is the logical shift left to apply to IMM. */
1124
1125 static int
1126 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1127 unsigned shift)
1128 {
1129 uint32_t size = ENCODE (rd.is64, 1, 31);
1130
1131 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1132 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1133 }
1134
1135 /* Write instructions into *BUF in order to move ADDR into a register.
1136 ADDR can be a 64-bit value.
1137
1138 This function will emit a series of MOV and MOVK instructions, such as:
1139
1140 MOV xd, #(addr)
1141 MOVK xd, #(addr >> 16), lsl #16
1142 MOVK xd, #(addr >> 32), lsl #32
1143 MOVK xd, #(addr >> 48), lsl #48 */
1144
1145 static int
1146 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1147 {
1148 uint32_t *p = buf;
1149
1150 /* The MOV (wide immediate) instruction clears to top bits of the
1151 register. */
1152 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1153
1154 if ((addr >> 16) != 0)
1155 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1156 else
1157 return p - buf;
1158
1159 if ((addr >> 32) != 0)
1160 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1161 else
1162 return p - buf;
1163
1164 if ((addr >> 48) != 0)
1165 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1166
1167 return p - buf;
1168 }
1169
1170 /* Write a SUBS instruction into *BUF.
1171
1172 SUBS rd, rn, rm
1173
1174 This instruction update the condition flags.
1175
1176 RD is the destination register.
1177 RN and RM are the source registers. */
1178
1179 static int
1180 emit_subs (uint32_t *buf, struct aarch64_register rd,
1181 struct aarch64_register rn, struct aarch64_operand operand)
1182 {
1183 return emit_data_processing (buf, SUBS, rd, rn, operand);
1184 }
1185
1186 /* Write a CMP instruction into *BUF.
1187
1188 CMP rn, rm
1189
1190 This instruction is an alias of SUBS xzr, rn, rm.
1191
1192 RN and RM are the registers to compare. */
1193
1194 static int
1195 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1196 struct aarch64_operand operand)
1197 {
1198 return emit_subs (buf, xzr, rn, operand);
1199 }
1200
1201 /* Write a AND instruction into *BUF.
1202
1203 AND rd, rn, rm
1204
1205 RD is the destination register.
1206 RN and RM are the source registers. */
1207
1208 static int
1209 emit_and (uint32_t *buf, struct aarch64_register rd,
1210 struct aarch64_register rn, struct aarch64_register rm)
1211 {
1212 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1213 }
1214
1215 /* Write a ORR instruction into *BUF.
1216
1217 ORR rd, rn, rm
1218
1219 RD is the destination register.
1220 RN and RM are the source registers. */
1221
1222 static int
1223 emit_orr (uint32_t *buf, struct aarch64_register rd,
1224 struct aarch64_register rn, struct aarch64_register rm)
1225 {
1226 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1227 }
1228
1229 /* Write a ORN instruction into *BUF.
1230
1231 ORN rd, rn, rm
1232
1233 RD is the destination register.
1234 RN and RM are the source registers. */
1235
1236 static int
1237 emit_orn (uint32_t *buf, struct aarch64_register rd,
1238 struct aarch64_register rn, struct aarch64_register rm)
1239 {
1240 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1241 }
1242
1243 /* Write a EOR instruction into *BUF.
1244
1245 EOR rd, rn, rm
1246
1247 RD is the destination register.
1248 RN and RM are the source registers. */
1249
1250 static int
1251 emit_eor (uint32_t *buf, struct aarch64_register rd,
1252 struct aarch64_register rn, struct aarch64_register rm)
1253 {
1254 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1255 }
1256
1257 /* Write a MVN instruction into *BUF.
1258
1259 MVN rd, rm
1260
1261 This is an alias for ORN rd, xzr, rm.
1262
1263 RD is the destination register.
1264 RM is the source register. */
1265
1266 static int
1267 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1268 struct aarch64_register rm)
1269 {
1270 return emit_orn (buf, rd, xzr, rm);
1271 }
1272
1273 /* Write a LSLV instruction into *BUF.
1274
1275 LSLV rd, rn, rm
1276
1277 RD is the destination register.
1278 RN and RM are the source registers. */
1279
1280 static int
1281 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1282 struct aarch64_register rn, struct aarch64_register rm)
1283 {
1284 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1285 }
1286
1287 /* Write a LSRV instruction into *BUF.
1288
1289 LSRV rd, rn, rm
1290
1291 RD is the destination register.
1292 RN and RM are the source registers. */
1293
1294 static int
1295 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1296 struct aarch64_register rn, struct aarch64_register rm)
1297 {
1298 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1299 }
1300
1301 /* Write a ASRV instruction into *BUF.
1302
1303 ASRV rd, rn, rm
1304
1305 RD is the destination register.
1306 RN and RM are the source registers. */
1307
1308 static int
1309 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1310 struct aarch64_register rn, struct aarch64_register rm)
1311 {
1312 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1313 }
1314
1315 /* Write a MUL instruction into *BUF.
1316
1317 MUL rd, rn, rm
1318
1319 RD is the destination register.
1320 RN and RM are the source registers. */
1321
1322 static int
1323 emit_mul (uint32_t *buf, struct aarch64_register rd,
1324 struct aarch64_register rn, struct aarch64_register rm)
1325 {
1326 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1327 }
1328
1329 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1330
1331 MRS xt, system_reg
1332
1333 RT is the destination register.
1334 SYSTEM_REG is special purpose register to read. */
1335
1336 static int
1337 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1338 enum aarch64_system_control_registers system_reg)
1339 {
1340 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1341 | ENCODE (rt.num, 5, 0));
1342 }
1343
1344 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1345
1346 MSR system_reg, xt
1347
1348 SYSTEM_REG is special purpose register to write.
1349 RT is the input register. */
1350
1351 static int
1352 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1353 struct aarch64_register rt)
1354 {
1355 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1356 | ENCODE (rt.num, 5, 0));
1357 }
1358
1359 /* Write a SEVL instruction into *BUF.
1360
1361 This is a hint instruction telling the hardware to trigger an event. */
1362
1363 static int
1364 emit_sevl (uint32_t *buf)
1365 {
1366 return aarch64_emit_insn (buf, SEVL);
1367 }
1368
1369 /* Write a WFE instruction into *BUF.
1370
1371 This is a hint instruction telling the hardware to wait for an event. */
1372
1373 static int
1374 emit_wfe (uint32_t *buf)
1375 {
1376 return aarch64_emit_insn (buf, WFE);
1377 }
1378
1379 /* Write a SBFM instruction into *BUF.
1380
1381 SBFM rd, rn, #immr, #imms
1382
1383 This instruction moves the bits from #immr to #imms into the
1384 destination, sign extending the result.
1385
1386 RD is the destination register.
1387 RN is the source register.
1388 IMMR is the bit number to start at (least significant bit).
1389 IMMS is the bit number to stop at (most significant bit). */
1390
1391 static int
1392 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1393 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1394 {
1395 uint32_t size = ENCODE (rd.is64, 1, 31);
1396 uint32_t n = ENCODE (rd.is64, 1, 22);
1397
1398 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1399 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1400 | ENCODE (rd.num, 5, 0));
1401 }
1402
1403 /* Write a SBFX instruction into *BUF.
1404
1405 SBFX rd, rn, #lsb, #width
1406
1407 This instruction moves #width bits from #lsb into the destination, sign
1408 extending the result. This is an alias for:
1409
1410 SBFM rd, rn, #lsb, #(lsb + width - 1)
1411
1412 RD is the destination register.
1413 RN is the source register.
1414 LSB is the bit number to start at (least significant bit).
1415 WIDTH is the number of bits to move. */
1416
1417 static int
1418 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1419 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1420 {
1421 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1422 }
1423
1424 /* Write a UBFM instruction into *BUF.
1425
1426 UBFM rd, rn, #immr, #imms
1427
1428 This instruction moves the bits from #immr to #imms into the
1429 destination, extending the result with zeros.
1430
1431 RD is the destination register.
1432 RN is the source register.
1433 IMMR is the bit number to start at (least significant bit).
1434 IMMS is the bit number to stop at (most significant bit). */
1435
1436 static int
1437 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1438 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1439 {
1440 uint32_t size = ENCODE (rd.is64, 1, 31);
1441 uint32_t n = ENCODE (rd.is64, 1, 22);
1442
1443 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1444 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1445 | ENCODE (rd.num, 5, 0));
1446 }
1447
1448 /* Write a UBFX instruction into *BUF.
1449
1450 UBFX rd, rn, #lsb, #width
1451
1452 This instruction moves #width bits from #lsb into the destination,
1453 extending the result with zeros. This is an alias for:
1454
1455 UBFM rd, rn, #lsb, #(lsb + width - 1)
1456
1457 RD is the destination register.
1458 RN is the source register.
1459 LSB is the bit number to start at (least significant bit).
1460 WIDTH is the number of bits to move. */
1461
1462 static int
1463 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1464 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1465 {
1466 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1467 }
1468
1469 /* Write a CSINC instruction into *BUF.
1470
1471 CSINC rd, rn, rm, cond
1472
1473 This instruction conditionally increments rn or rm and places the result
1474 in rd. rn is chosen is the condition is true.
1475
1476 RD is the destination register.
1477 RN and RM are the source registers.
1478 COND is the encoded condition. */
1479
1480 static int
1481 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1482 struct aarch64_register rn, struct aarch64_register rm,
1483 unsigned cond)
1484 {
1485 uint32_t size = ENCODE (rd.is64, 1, 31);
1486
1487 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1488 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1489 | ENCODE (rd.num, 5, 0));
1490 }
1491
1492 /* Write a CSET instruction into *BUF.
1493
1494 CSET rd, cond
1495
1496 This instruction conditionally write 1 or 0 in the destination register.
1497 1 is written if the condition is true. This is an alias for:
1498
1499 CSINC rd, xzr, xzr, !cond
1500
1501 Note that the condition needs to be inverted.
1502
1503 RD is the destination register.
1504 RN and RM are the source registers.
1505 COND is the encoded condition. */
1506
1507 static int
1508 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1509 {
1510 /* The least significant bit of the condition needs toggling in order to
1511 invert it. */
1512 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1513 }
1514
1515 /* Write LEN instructions from BUF into the inferior memory at *TO.
1516
1517 Note instructions are always little endian on AArch64, unlike data. */
1518
1519 static void
1520 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1521 {
1522 size_t byte_len = len * sizeof (uint32_t);
1523 #if (__BYTE_ORDER == __BIG_ENDIAN)
1524 uint32_t *le_buf = (uint32_t *) xmalloc (byte_len);
1525 size_t i;
1526
1527 for (i = 0; i < len; i++)
1528 le_buf[i] = htole32 (buf[i]);
1529
1530 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1531
1532 xfree (le_buf);
1533 #else
1534 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1535 #endif
1536
1537 *to += byte_len;
1538 }
1539
1540 /* Sub-class of struct aarch64_insn_data, store information of
1541 instruction relocation for fast tracepoint. Visitor can
1542 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1543 the relocated instructions in buffer pointed by INSN_PTR. */
1544
1545 struct aarch64_insn_relocation_data
1546 {
1547 struct aarch64_insn_data base;
1548
1549 /* The new address the instruction is relocated to. */
1550 CORE_ADDR new_addr;
1551 /* Pointer to the buffer of relocated instruction(s). */
1552 uint32_t *insn_ptr;
1553 };
1554
1555 /* Implementation of aarch64_insn_visitor method "b". */
1556
1557 static void
1558 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1559 struct aarch64_insn_data *data)
1560 {
1561 struct aarch64_insn_relocation_data *insn_reloc
1562 = (struct aarch64_insn_relocation_data *) data;
1563 int64_t new_offset
1564 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1565
1566 if (can_encode_int32 (new_offset, 28))
1567 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1568 }
1569
1570 /* Implementation of aarch64_insn_visitor method "b_cond". */
1571
1572 static void
1573 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1574 struct aarch64_insn_data *data)
1575 {
1576 struct aarch64_insn_relocation_data *insn_reloc
1577 = (struct aarch64_insn_relocation_data *) data;
1578 int64_t new_offset
1579 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1580
1581 if (can_encode_int32 (new_offset, 21))
1582 {
1583 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1584 new_offset);
1585 }
1586 else if (can_encode_int32 (new_offset, 28))
1587 {
1588 /* The offset is out of range for a conditional branch
1589 instruction but not for a unconditional branch. We can use
1590 the following instructions instead:
1591
1592 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1593 B NOT_TAKEN ; Else jump over TAKEN and continue.
1594 TAKEN:
1595 B #(offset - 8)
1596 NOT_TAKEN:
1597
1598 */
1599
1600 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1601 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1602 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1603 }
1604 }
1605
1606 /* Implementation of aarch64_insn_visitor method "cb". */
1607
1608 static void
1609 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1610 const unsigned rn, int is64,
1611 struct aarch64_insn_data *data)
1612 {
1613 struct aarch64_insn_relocation_data *insn_reloc
1614 = (struct aarch64_insn_relocation_data *) data;
1615 int64_t new_offset
1616 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1617
1618 if (can_encode_int32 (new_offset, 21))
1619 {
1620 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1621 aarch64_register (rn, is64), new_offset);
1622 }
1623 else if (can_encode_int32 (new_offset, 28))
1624 {
1625 /* The offset is out of range for a compare and branch
1626 instruction but not for a unconditional branch. We can use
1627 the following instructions instead:
1628
1629 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1630 B NOT_TAKEN ; Else jump over TAKEN and continue.
1631 TAKEN:
1632 B #(offset - 8)
1633 NOT_TAKEN:
1634
1635 */
1636 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1637 aarch64_register (rn, is64), 8);
1638 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1639 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1640 }
1641 }
1642
1643 /* Implementation of aarch64_insn_visitor method "tb". */
1644
1645 static void
1646 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1647 const unsigned rt, unsigned bit,
1648 struct aarch64_insn_data *data)
1649 {
1650 struct aarch64_insn_relocation_data *insn_reloc
1651 = (struct aarch64_insn_relocation_data *) data;
1652 int64_t new_offset
1653 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1654
1655 if (can_encode_int32 (new_offset, 16))
1656 {
1657 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1658 aarch64_register (rt, 1), new_offset);
1659 }
1660 else if (can_encode_int32 (new_offset, 28))
1661 {
1662 /* The offset is out of range for a test bit and branch
1663 instruction but not for a unconditional branch. We can use
1664 the following instructions instead:
1665
1666 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1667 B NOT_TAKEN ; Else jump over TAKEN and continue.
1668 TAKEN:
1669 B #(offset - 8)
1670 NOT_TAKEN:
1671
1672 */
1673 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1674 aarch64_register (rt, 1), 8);
1675 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1676 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1677 new_offset - 8);
1678 }
1679 }
1680
1681 /* Implementation of aarch64_insn_visitor method "adr". */
1682
1683 static void
1684 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1685 const int is_adrp,
1686 struct aarch64_insn_data *data)
1687 {
1688 struct aarch64_insn_relocation_data *insn_reloc
1689 = (struct aarch64_insn_relocation_data *) data;
1690 /* We know exactly the address the ADR{P,} instruction will compute.
1691 We can just write it to the destination register. */
1692 CORE_ADDR address = data->insn_addr + offset;
1693
1694 if (is_adrp)
1695 {
1696 /* Clear the lower 12 bits of the offset to get the 4K page. */
1697 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1698 aarch64_register (rd, 1),
1699 address & ~0xfff);
1700 }
1701 else
1702 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1703 aarch64_register (rd, 1), address);
1704 }
1705
1706 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1707
1708 static void
1709 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1710 const unsigned rt, const int is64,
1711 struct aarch64_insn_data *data)
1712 {
1713 struct aarch64_insn_relocation_data *insn_reloc
1714 = (struct aarch64_insn_relocation_data *) data;
1715 CORE_ADDR address = data->insn_addr + offset;
1716
1717 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1718 aarch64_register (rt, 1), address);
1719
1720 /* We know exactly what address to load from, and what register we
1721 can use:
1722
1723 MOV xd, #(oldloc + offset)
1724 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1725 ...
1726
1727 LDR xd, [xd] ; or LDRSW xd, [xd]
1728
1729 */
1730
1731 if (is_sw)
1732 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1733 aarch64_register (rt, 1),
1734 aarch64_register (rt, 1),
1735 offset_memory_operand (0));
1736 else
1737 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1738 aarch64_register (rt, is64),
1739 aarch64_register (rt, 1),
1740 offset_memory_operand (0));
1741 }
1742
1743 /* Implementation of aarch64_insn_visitor method "others". */
1744
1745 static void
1746 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1747 struct aarch64_insn_data *data)
1748 {
1749 struct aarch64_insn_relocation_data *insn_reloc
1750 = (struct aarch64_insn_relocation_data *) data;
1751
1752 /* The instruction is not PC relative. Just re-emit it at the new
1753 location. */
1754 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
1755 }
1756
1757 static const struct aarch64_insn_visitor visitor =
1758 {
1759 aarch64_ftrace_insn_reloc_b,
1760 aarch64_ftrace_insn_reloc_b_cond,
1761 aarch64_ftrace_insn_reloc_cb,
1762 aarch64_ftrace_insn_reloc_tb,
1763 aarch64_ftrace_insn_reloc_adr,
1764 aarch64_ftrace_insn_reloc_ldr_literal,
1765 aarch64_ftrace_insn_reloc_others,
1766 };
1767
1768 /* Implementation of linux_target_ops method
1769 "install_fast_tracepoint_jump_pad". */
1770
1771 static int
1772 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1773 CORE_ADDR tpaddr,
1774 CORE_ADDR collector,
1775 CORE_ADDR lockaddr,
1776 ULONGEST orig_size,
1777 CORE_ADDR *jump_entry,
1778 CORE_ADDR *trampoline,
1779 ULONGEST *trampoline_size,
1780 unsigned char *jjump_pad_insn,
1781 ULONGEST *jjump_pad_insn_size,
1782 CORE_ADDR *adjusted_insn_addr,
1783 CORE_ADDR *adjusted_insn_addr_end,
1784 char *err)
1785 {
1786 uint32_t buf[256];
1787 uint32_t *p = buf;
1788 int64_t offset;
1789 int i;
1790 uint32_t insn;
1791 CORE_ADDR buildaddr = *jump_entry;
1792 struct aarch64_insn_relocation_data insn_data;
1793
1794 /* We need to save the current state on the stack both to restore it
1795 later and to collect register values when the tracepoint is hit.
1796
1797 The saved registers are pushed in a layout that needs to be in sync
1798 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1799 the supply_fast_tracepoint_registers function will fill in the
1800 register cache from a pointer to saved registers on the stack we build
1801 here.
1802
1803 For simplicity, we set the size of each cell on the stack to 16 bytes.
1804 This way one cell can hold any register type, from system registers
1805 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1806 has to be 16 bytes aligned anyway.
1807
1808 Note that the CPSR register does not exist on AArch64. Instead we
1809 can access system bits describing the process state with the
1810 MRS/MSR instructions, namely the condition flags. We save them as
1811 if they are part of a CPSR register because that's how GDB
1812 interprets these system bits. At the moment, only the condition
1813 flags are saved in CPSR (NZCV).
1814
1815 Stack layout, each cell is 16 bytes (descending):
1816
1817 High *-------- SIMD&FP registers from 31 down to 0. --------*
1818 | q31 |
1819 . .
1820 . . 32 cells
1821 . .
1822 | q0 |
1823 *---- General purpose registers from 30 down to 0. ----*
1824 | x30 |
1825 . .
1826 . . 31 cells
1827 . .
1828 | x0 |
1829 *------------- Special purpose registers. -------------*
1830 | SP |
1831 | PC |
1832 | CPSR (NZCV) | 5 cells
1833 | FPSR |
1834 | FPCR | <- SP + 16
1835 *------------- collecting_t object --------------------*
1836 | TPIDR_EL0 | struct tracepoint * |
1837 Low *------------------------------------------------------*
1838
1839 After this stack is set up, we issue a call to the collector, passing
1840 it the saved registers at (SP + 16). */
1841
1842 /* Push SIMD&FP registers on the stack:
1843
1844 SUB sp, sp, #(32 * 16)
1845
1846 STP q30, q31, [sp, #(30 * 16)]
1847 ...
1848 STP q0, q1, [sp]
1849
1850 */
1851 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1852 for (i = 30; i >= 0; i -= 2)
1853 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1854
1855 /* Push general puspose registers on the stack. Note that we do not need
1856 to push x31 as it represents the xzr register and not the stack
1857 pointer in a STR instruction.
1858
1859 SUB sp, sp, #(31 * 16)
1860
1861 STR x30, [sp, #(30 * 16)]
1862 ...
1863 STR x0, [sp]
1864
1865 */
1866 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1867 for (i = 30; i >= 0; i -= 1)
1868 p += emit_str (p, aarch64_register (i, 1), sp,
1869 offset_memory_operand (i * 16));
1870
1871 /* Make space for 5 more cells.
1872
1873 SUB sp, sp, #(5 * 16)
1874
1875 */
1876 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1877
1878
1879 /* Save SP:
1880
1881 ADD x4, sp, #((32 + 31 + 5) * 16)
1882 STR x4, [sp, #(4 * 16)]
1883
1884 */
1885 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1886 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1887
1888 /* Save PC (tracepoint address):
1889
1890 MOV x3, #(tpaddr)
1891 ...
1892
1893 STR x3, [sp, #(3 * 16)]
1894
1895 */
1896
1897 p += emit_mov_addr (p, x3, tpaddr);
1898 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1899
1900 /* Save CPSR (NZCV), FPSR and FPCR:
1901
1902 MRS x2, nzcv
1903 MRS x1, fpsr
1904 MRS x0, fpcr
1905
1906 STR x2, [sp, #(2 * 16)]
1907 STR x1, [sp, #(1 * 16)]
1908 STR x0, [sp, #(0 * 16)]
1909
1910 */
1911 p += emit_mrs (p, x2, NZCV);
1912 p += emit_mrs (p, x1, FPSR);
1913 p += emit_mrs (p, x0, FPCR);
1914 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
1915 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
1916 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
1917
1918 /* Push the collecting_t object. It consist of the address of the
1919 tracepoint and an ID for the current thread. We get the latter by
1920 reading the tpidr_el0 system register. It corresponds to the
1921 NT_ARM_TLS register accessible with ptrace.
1922
1923 MOV x0, #(tpoint)
1924 ...
1925
1926 MRS x1, tpidr_el0
1927
1928 STP x0, x1, [sp, #-16]!
1929
1930 */
1931
1932 p += emit_mov_addr (p, x0, tpoint);
1933 p += emit_mrs (p, x1, TPIDR_EL0);
1934 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
1935
1936 /* Spin-lock:
1937
1938 The shared memory for the lock is at lockaddr. It will hold zero
1939 if no-one is holding the lock, otherwise it contains the address of
1940 the collecting_t object on the stack of the thread which acquired it.
1941
1942 At this stage, the stack pointer points to this thread's collecting_t
1943 object.
1944
1945 We use the following registers:
1946 - x0: Address of the lock.
1947 - x1: Pointer to collecting_t object.
1948 - x2: Scratch register.
1949
1950 MOV x0, #(lockaddr)
1951 ...
1952 MOV x1, sp
1953
1954 ; Trigger an event local to this core. So the following WFE
1955 ; instruction is ignored.
1956 SEVL
1957 again:
1958 ; Wait for an event. The event is triggered by either the SEVL
1959 ; or STLR instructions (store release).
1960 WFE
1961
1962 ; Atomically read at lockaddr. This marks the memory location as
1963 ; exclusive. This instruction also has memory constraints which
1964 ; make sure all previous data reads and writes are done before
1965 ; executing it.
1966 LDAXR x2, [x0]
1967
1968 ; Try again if another thread holds the lock.
1969 CBNZ x2, again
1970
1971 ; We can lock it! Write the address of the collecting_t object.
1972 ; This instruction will fail if the memory location is not marked
1973 ; as exclusive anymore. If it succeeds, it will remove the
1974 ; exclusive mark on the memory location. This way, if another
1975 ; thread executes this instruction before us, we will fail and try
1976 ; all over again.
1977 STXR w2, x1, [x0]
1978 CBNZ w2, again
1979
1980 */
1981
1982 p += emit_mov_addr (p, x0, lockaddr);
1983 p += emit_mov (p, x1, register_operand (sp));
1984
1985 p += emit_sevl (p);
1986 p += emit_wfe (p);
1987 p += emit_ldaxr (p, x2, x0);
1988 p += emit_cb (p, 1, w2, -2 * 4);
1989 p += emit_stxr (p, w2, x1, x0);
1990 p += emit_cb (p, 1, x2, -4 * 4);
1991
1992 /* Call collector (struct tracepoint *, unsigned char *):
1993
1994 MOV x0, #(tpoint)
1995 ...
1996
1997 ; Saved registers start after the collecting_t object.
1998 ADD x1, sp, #16
1999
2000 ; We use an intra-procedure-call scratch register.
2001 MOV ip0, #(collector)
2002 ...
2003
2004 ; And call back to C!
2005 BLR ip0
2006
2007 */
2008
2009 p += emit_mov_addr (p, x0, tpoint);
2010 p += emit_add (p, x1, sp, immediate_operand (16));
2011
2012 p += emit_mov_addr (p, ip0, collector);
2013 p += emit_blr (p, ip0);
2014
2015 /* Release the lock.
2016
2017 MOV x0, #(lockaddr)
2018 ...
2019
2020 ; This instruction is a normal store with memory ordering
2021 ; constraints. Thanks to this we do not have to put a data
2022 ; barrier instruction to make sure all data read and writes are done
2023 ; before this instruction is executed. Furthermore, this instrucion
2024 ; will trigger an event, letting other threads know they can grab
2025 ; the lock.
2026 STLR xzr, [x0]
2027
2028 */
2029 p += emit_mov_addr (p, x0, lockaddr);
2030 p += emit_stlr (p, xzr, x0);
2031
2032 /* Free collecting_t object:
2033
2034 ADD sp, sp, #16
2035
2036 */
2037 p += emit_add (p, sp, sp, immediate_operand (16));
2038
2039 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2040 registers from the stack.
2041
2042 LDR x2, [sp, #(2 * 16)]
2043 LDR x1, [sp, #(1 * 16)]
2044 LDR x0, [sp, #(0 * 16)]
2045
2046 MSR NZCV, x2
2047 MSR FPSR, x1
2048 MSR FPCR, x0
2049
2050 ADD sp, sp #(5 * 16)
2051
2052 */
2053 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2054 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2055 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2056 p += emit_msr (p, NZCV, x2);
2057 p += emit_msr (p, FPSR, x1);
2058 p += emit_msr (p, FPCR, x0);
2059
2060 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2061
2062 /* Pop general purpose registers:
2063
2064 LDR x0, [sp]
2065 ...
2066 LDR x30, [sp, #(30 * 16)]
2067
2068 ADD sp, sp, #(31 * 16)
2069
2070 */
2071 for (i = 0; i <= 30; i += 1)
2072 p += emit_ldr (p, aarch64_register (i, 1), sp,
2073 offset_memory_operand (i * 16));
2074 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2075
2076 /* Pop SIMD&FP registers:
2077
2078 LDP q0, q1, [sp]
2079 ...
2080 LDP q30, q31, [sp, #(30 * 16)]
2081
2082 ADD sp, sp, #(32 * 16)
2083
2084 */
2085 for (i = 0; i <= 30; i += 2)
2086 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2087 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2088
2089 /* Write the code into the inferior memory. */
2090 append_insns (&buildaddr, p - buf, buf);
2091
2092 /* Now emit the relocated instruction. */
2093 *adjusted_insn_addr = buildaddr;
2094 target_read_uint32 (tpaddr, &insn);
2095
2096 insn_data.base.insn_addr = tpaddr;
2097 insn_data.new_addr = buildaddr;
2098 insn_data.insn_ptr = buf;
2099
2100 aarch64_relocate_instruction (insn, &visitor,
2101 (struct aarch64_insn_data *) &insn_data);
2102
2103 /* We may not have been able to relocate the instruction. */
2104 if (insn_data.insn_ptr == buf)
2105 {
2106 sprintf (err,
2107 "E.Could not relocate instruction from %s to %s.",
2108 core_addr_to_string_nz (tpaddr),
2109 core_addr_to_string_nz (buildaddr));
2110 return 1;
2111 }
2112 else
2113 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2114 *adjusted_insn_addr_end = buildaddr;
2115
2116 /* Go back to the start of the buffer. */
2117 p = buf;
2118
2119 /* Emit a branch back from the jump pad. */
2120 offset = (tpaddr + orig_size - buildaddr);
2121 if (!can_encode_int32 (offset, 28))
2122 {
2123 sprintf (err,
2124 "E.Jump back from jump pad too far from tracepoint "
2125 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2126 offset);
2127 return 1;
2128 }
2129
2130 p += emit_b (p, 0, offset);
2131 append_insns (&buildaddr, p - buf, buf);
2132
2133 /* Give the caller a branch instruction into the jump pad. */
2134 offset = (*jump_entry - tpaddr);
2135 if (!can_encode_int32 (offset, 28))
2136 {
2137 sprintf (err,
2138 "E.Jump pad too far from tracepoint "
2139 "(offset 0x%" PRIx64 " cannot be encoded in 28 bits).",
2140 offset);
2141 return 1;
2142 }
2143
2144 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2145 *jjump_pad_insn_size = 4;
2146
2147 /* Return the end address of our pad. */
2148 *jump_entry = buildaddr;
2149
2150 return 0;
2151 }
2152
2153 /* Helper function writing LEN instructions from START into
2154 current_insn_ptr. */
2155
2156 static void
2157 emit_ops_insns (const uint32_t *start, int len)
2158 {
2159 CORE_ADDR buildaddr = current_insn_ptr;
2160
2161 if (debug_threads)
2162 debug_printf ("Adding %d instrucions at %s\n",
2163 len, paddress (buildaddr));
2164
2165 append_insns (&buildaddr, len, start);
2166 current_insn_ptr = buildaddr;
2167 }
2168
2169 /* Pop a register from the stack. */
2170
2171 static int
2172 emit_pop (uint32_t *buf, struct aarch64_register rt)
2173 {
2174 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2175 }
2176
2177 /* Push a register on the stack. */
2178
2179 static int
2180 emit_push (uint32_t *buf, struct aarch64_register rt)
2181 {
2182 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2183 }
2184
2185 /* Implementation of emit_ops method "emit_prologue". */
2186
2187 static void
2188 aarch64_emit_prologue (void)
2189 {
2190 uint32_t buf[16];
2191 uint32_t *p = buf;
2192
2193 /* This function emit a prologue for the following function prototype:
2194
2195 enum eval_result_type f (unsigned char *regs,
2196 ULONGEST *value);
2197
2198 The first argument is a buffer of raw registers. The second
2199 argument is the result of
2200 evaluating the expression, which will be set to whatever is on top of
2201 the stack at the end.
2202
2203 The stack set up by the prologue is as such:
2204
2205 High *------------------------------------------------------*
2206 | LR |
2207 | FP | <- FP
2208 | x1 (ULONGEST *value) |
2209 | x0 (unsigned char *regs) |
2210 Low *------------------------------------------------------*
2211
2212 As we are implementing a stack machine, each opcode can expand the
2213 stack so we never know how far we are from the data saved by this
2214 prologue. In order to be able refer to value and regs later, we save
2215 the current stack pointer in the frame pointer. This way, it is not
2216 clobbered when calling C functions.
2217
2218 Finally, throughtout every operation, we are using register x0 as the
2219 top of the stack, and x1 as a scratch register. */
2220
2221 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2222 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2223 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2224
2225 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2226
2227
2228 emit_ops_insns (buf, p - buf);
2229 }
2230
2231 /* Implementation of emit_ops method "emit_epilogue". */
2232
2233 static void
2234 aarch64_emit_epilogue (void)
2235 {
2236 uint32_t buf[16];
2237 uint32_t *p = buf;
2238
2239 /* Store the result of the expression (x0) in *value. */
2240 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2241 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2242 p += emit_str (p, x0, x1, offset_memory_operand (0));
2243
2244 /* Restore the previous state. */
2245 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2246 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2247
2248 /* Return expr_eval_no_error. */
2249 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2250 p += emit_ret (p, lr);
2251
2252 emit_ops_insns (buf, p - buf);
2253 }
2254
2255 /* Implementation of emit_ops method "emit_add". */
2256
2257 static void
2258 aarch64_emit_add (void)
2259 {
2260 uint32_t buf[16];
2261 uint32_t *p = buf;
2262
2263 p += emit_pop (p, x1);
2264 p += emit_add (p, x0, x1, register_operand (x0));
2265
2266 emit_ops_insns (buf, p - buf);
2267 }
2268
2269 /* Implementation of emit_ops method "emit_sub". */
2270
2271 static void
2272 aarch64_emit_sub (void)
2273 {
2274 uint32_t buf[16];
2275 uint32_t *p = buf;
2276
2277 p += emit_pop (p, x1);
2278 p += emit_sub (p, x0, x1, register_operand (x0));
2279
2280 emit_ops_insns (buf, p - buf);
2281 }
2282
2283 /* Implementation of emit_ops method "emit_mul". */
2284
2285 static void
2286 aarch64_emit_mul (void)
2287 {
2288 uint32_t buf[16];
2289 uint32_t *p = buf;
2290
2291 p += emit_pop (p, x1);
2292 p += emit_mul (p, x0, x1, x0);
2293
2294 emit_ops_insns (buf, p - buf);
2295 }
2296
2297 /* Implementation of emit_ops method "emit_lsh". */
2298
2299 static void
2300 aarch64_emit_lsh (void)
2301 {
2302 uint32_t buf[16];
2303 uint32_t *p = buf;
2304
2305 p += emit_pop (p, x1);
2306 p += emit_lslv (p, x0, x1, x0);
2307
2308 emit_ops_insns (buf, p - buf);
2309 }
2310
2311 /* Implementation of emit_ops method "emit_rsh_signed". */
2312
2313 static void
2314 aarch64_emit_rsh_signed (void)
2315 {
2316 uint32_t buf[16];
2317 uint32_t *p = buf;
2318
2319 p += emit_pop (p, x1);
2320 p += emit_asrv (p, x0, x1, x0);
2321
2322 emit_ops_insns (buf, p - buf);
2323 }
2324
2325 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2326
2327 static void
2328 aarch64_emit_rsh_unsigned (void)
2329 {
2330 uint32_t buf[16];
2331 uint32_t *p = buf;
2332
2333 p += emit_pop (p, x1);
2334 p += emit_lsrv (p, x0, x1, x0);
2335
2336 emit_ops_insns (buf, p - buf);
2337 }
2338
2339 /* Implementation of emit_ops method "emit_ext". */
2340
2341 static void
2342 aarch64_emit_ext (int arg)
2343 {
2344 uint32_t buf[16];
2345 uint32_t *p = buf;
2346
2347 p += emit_sbfx (p, x0, x0, 0, arg);
2348
2349 emit_ops_insns (buf, p - buf);
2350 }
2351
2352 /* Implementation of emit_ops method "emit_log_not". */
2353
2354 static void
2355 aarch64_emit_log_not (void)
2356 {
2357 uint32_t buf[16];
2358 uint32_t *p = buf;
2359
2360 /* If the top of the stack is 0, replace it with 1. Else replace it with
2361 0. */
2362
2363 p += emit_cmp (p, x0, immediate_operand (0));
2364 p += emit_cset (p, x0, EQ);
2365
2366 emit_ops_insns (buf, p - buf);
2367 }
2368
2369 /* Implementation of emit_ops method "emit_bit_and". */
2370
2371 static void
2372 aarch64_emit_bit_and (void)
2373 {
2374 uint32_t buf[16];
2375 uint32_t *p = buf;
2376
2377 p += emit_pop (p, x1);
2378 p += emit_and (p, x0, x0, x1);
2379
2380 emit_ops_insns (buf, p - buf);
2381 }
2382
2383 /* Implementation of emit_ops method "emit_bit_or". */
2384
2385 static void
2386 aarch64_emit_bit_or (void)
2387 {
2388 uint32_t buf[16];
2389 uint32_t *p = buf;
2390
2391 p += emit_pop (p, x1);
2392 p += emit_orr (p, x0, x0, x1);
2393
2394 emit_ops_insns (buf, p - buf);
2395 }
2396
2397 /* Implementation of emit_ops method "emit_bit_xor". */
2398
2399 static void
2400 aarch64_emit_bit_xor (void)
2401 {
2402 uint32_t buf[16];
2403 uint32_t *p = buf;
2404
2405 p += emit_pop (p, x1);
2406 p += emit_eor (p, x0, x0, x1);
2407
2408 emit_ops_insns (buf, p - buf);
2409 }
2410
2411 /* Implementation of emit_ops method "emit_bit_not". */
2412
2413 static void
2414 aarch64_emit_bit_not (void)
2415 {
2416 uint32_t buf[16];
2417 uint32_t *p = buf;
2418
2419 p += emit_mvn (p, x0, x0);
2420
2421 emit_ops_insns (buf, p - buf);
2422 }
2423
2424 /* Implementation of emit_ops method "emit_equal". */
2425
2426 static void
2427 aarch64_emit_equal (void)
2428 {
2429 uint32_t buf[16];
2430 uint32_t *p = buf;
2431
2432 p += emit_pop (p, x1);
2433 p += emit_cmp (p, x0, register_operand (x1));
2434 p += emit_cset (p, x0, EQ);
2435
2436 emit_ops_insns (buf, p - buf);
2437 }
2438
2439 /* Implementation of emit_ops method "emit_less_signed". */
2440
2441 static void
2442 aarch64_emit_less_signed (void)
2443 {
2444 uint32_t buf[16];
2445 uint32_t *p = buf;
2446
2447 p += emit_pop (p, x1);
2448 p += emit_cmp (p, x1, register_operand (x0));
2449 p += emit_cset (p, x0, LT);
2450
2451 emit_ops_insns (buf, p - buf);
2452 }
2453
2454 /* Implementation of emit_ops method "emit_less_unsigned". */
2455
2456 static void
2457 aarch64_emit_less_unsigned (void)
2458 {
2459 uint32_t buf[16];
2460 uint32_t *p = buf;
2461
2462 p += emit_pop (p, x1);
2463 p += emit_cmp (p, x1, register_operand (x0));
2464 p += emit_cset (p, x0, LO);
2465
2466 emit_ops_insns (buf, p - buf);
2467 }
2468
2469 /* Implementation of emit_ops method "emit_ref". */
2470
2471 static void
2472 aarch64_emit_ref (int size)
2473 {
2474 uint32_t buf[16];
2475 uint32_t *p = buf;
2476
2477 switch (size)
2478 {
2479 case 1:
2480 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2481 break;
2482 case 2:
2483 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2484 break;
2485 case 4:
2486 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2487 break;
2488 case 8:
2489 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2490 break;
2491 default:
2492 /* Unknown size, bail on compilation. */
2493 emit_error = 1;
2494 break;
2495 }
2496
2497 emit_ops_insns (buf, p - buf);
2498 }
2499
2500 /* Implementation of emit_ops method "emit_if_goto". */
2501
2502 static void
2503 aarch64_emit_if_goto (int *offset_p, int *size_p)
2504 {
2505 uint32_t buf[16];
2506 uint32_t *p = buf;
2507
2508 /* The Z flag is set or cleared here. */
2509 p += emit_cmp (p, x0, immediate_operand (0));
2510 /* This instruction must not change the Z flag. */
2511 p += emit_pop (p, x0);
2512 /* Branch over the next instruction if x0 == 0. */
2513 p += emit_bcond (p, EQ, 8);
2514
2515 /* The NOP instruction will be patched with an unconditional branch. */
2516 if (offset_p)
2517 *offset_p = (p - buf) * 4;
2518 if (size_p)
2519 *size_p = 4;
2520 p += emit_nop (p);
2521
2522 emit_ops_insns (buf, p - buf);
2523 }
2524
2525 /* Implementation of emit_ops method "emit_goto". */
2526
2527 static void
2528 aarch64_emit_goto (int *offset_p, int *size_p)
2529 {
2530 uint32_t buf[16];
2531 uint32_t *p = buf;
2532
2533 /* The NOP instruction will be patched with an unconditional branch. */
2534 if (offset_p)
2535 *offset_p = 0;
2536 if (size_p)
2537 *size_p = 4;
2538 p += emit_nop (p);
2539
2540 emit_ops_insns (buf, p - buf);
2541 }
2542
2543 /* Implementation of emit_ops method "write_goto_address". */
2544
2545 void
2546 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2547 {
2548 uint32_t insn;
2549
2550 emit_b (&insn, 0, to - from);
2551 append_insns (&from, 1, &insn);
2552 }
2553
2554 /* Implementation of emit_ops method "emit_const". */
2555
2556 static void
2557 aarch64_emit_const (LONGEST num)
2558 {
2559 uint32_t buf[16];
2560 uint32_t *p = buf;
2561
2562 p += emit_mov_addr (p, x0, num);
2563
2564 emit_ops_insns (buf, p - buf);
2565 }
2566
2567 /* Implementation of emit_ops method "emit_call". */
2568
2569 static void
2570 aarch64_emit_call (CORE_ADDR fn)
2571 {
2572 uint32_t buf[16];
2573 uint32_t *p = buf;
2574
2575 p += emit_mov_addr (p, ip0, fn);
2576 p += emit_blr (p, ip0);
2577
2578 emit_ops_insns (buf, p - buf);
2579 }
2580
2581 /* Implementation of emit_ops method "emit_reg". */
2582
2583 static void
2584 aarch64_emit_reg (int reg)
2585 {
2586 uint32_t buf[16];
2587 uint32_t *p = buf;
2588
2589 /* Set x0 to unsigned char *regs. */
2590 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2591 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2592 p += emit_mov (p, x1, immediate_operand (reg));
2593
2594 emit_ops_insns (buf, p - buf);
2595
2596 aarch64_emit_call (get_raw_reg_func_addr ());
2597 }
2598
2599 /* Implementation of emit_ops method "emit_pop". */
2600
2601 static void
2602 aarch64_emit_pop (void)
2603 {
2604 uint32_t buf[16];
2605 uint32_t *p = buf;
2606
2607 p += emit_pop (p, x0);
2608
2609 emit_ops_insns (buf, p - buf);
2610 }
2611
2612 /* Implementation of emit_ops method "emit_stack_flush". */
2613
2614 static void
2615 aarch64_emit_stack_flush (void)
2616 {
2617 uint32_t buf[16];
2618 uint32_t *p = buf;
2619
2620 p += emit_push (p, x0);
2621
2622 emit_ops_insns (buf, p - buf);
2623 }
2624
2625 /* Implementation of emit_ops method "emit_zero_ext". */
2626
2627 static void
2628 aarch64_emit_zero_ext (int arg)
2629 {
2630 uint32_t buf[16];
2631 uint32_t *p = buf;
2632
2633 p += emit_ubfx (p, x0, x0, 0, arg);
2634
2635 emit_ops_insns (buf, p - buf);
2636 }
2637
2638 /* Implementation of emit_ops method "emit_swap". */
2639
2640 static void
2641 aarch64_emit_swap (void)
2642 {
2643 uint32_t buf[16];
2644 uint32_t *p = buf;
2645
2646 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2647 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2648 p += emit_mov (p, x0, register_operand (x1));
2649
2650 emit_ops_insns (buf, p - buf);
2651 }
2652
2653 /* Implementation of emit_ops method "emit_stack_adjust". */
2654
2655 static void
2656 aarch64_emit_stack_adjust (int n)
2657 {
2658 /* This is not needed with our design. */
2659 uint32_t buf[16];
2660 uint32_t *p = buf;
2661
2662 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2663
2664 emit_ops_insns (buf, p - buf);
2665 }
2666
2667 /* Implementation of emit_ops method "emit_int_call_1". */
2668
2669 static void
2670 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2671 {
2672 uint32_t buf[16];
2673 uint32_t *p = buf;
2674
2675 p += emit_mov (p, x0, immediate_operand (arg1));
2676
2677 emit_ops_insns (buf, p - buf);
2678
2679 aarch64_emit_call (fn);
2680 }
2681
2682 /* Implementation of emit_ops method "emit_void_call_2". */
2683
2684 static void
2685 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2686 {
2687 uint32_t buf[16];
2688 uint32_t *p = buf;
2689
2690 /* Push x0 on the stack. */
2691 aarch64_emit_stack_flush ();
2692
2693 /* Setup arguments for the function call:
2694
2695 x0: arg1
2696 x1: top of the stack
2697
2698 MOV x1, x0
2699 MOV x0, #arg1 */
2700
2701 p += emit_mov (p, x1, register_operand (x0));
2702 p += emit_mov (p, x0, immediate_operand (arg1));
2703
2704 emit_ops_insns (buf, p - buf);
2705
2706 aarch64_emit_call (fn);
2707
2708 /* Restore x0. */
2709 aarch64_emit_pop ();
2710 }
2711
2712 /* Implementation of emit_ops method "emit_eq_goto". */
2713
2714 static void
2715 aarch64_emit_eq_goto (int *offset_p, int *size_p)
2716 {
2717 uint32_t buf[16];
2718 uint32_t *p = buf;
2719
2720 p += emit_pop (p, x1);
2721 p += emit_cmp (p, x1, register_operand (x0));
2722 /* Branch over the next instruction if x0 != x1. */
2723 p += emit_bcond (p, NE, 8);
2724 /* The NOP instruction will be patched with an unconditional branch. */
2725 if (offset_p)
2726 *offset_p = (p - buf) * 4;
2727 if (size_p)
2728 *size_p = 4;
2729 p += emit_nop (p);
2730
2731 emit_ops_insns (buf, p - buf);
2732 }
2733
2734 /* Implementation of emit_ops method "emit_ne_goto". */
2735
2736 static void
2737 aarch64_emit_ne_goto (int *offset_p, int *size_p)
2738 {
2739 uint32_t buf[16];
2740 uint32_t *p = buf;
2741
2742 p += emit_pop (p, x1);
2743 p += emit_cmp (p, x1, register_operand (x0));
2744 /* Branch over the next instruction if x0 == x1. */
2745 p += emit_bcond (p, EQ, 8);
2746 /* The NOP instruction will be patched with an unconditional branch. */
2747 if (offset_p)
2748 *offset_p = (p - buf) * 4;
2749 if (size_p)
2750 *size_p = 4;
2751 p += emit_nop (p);
2752
2753 emit_ops_insns (buf, p - buf);
2754 }
2755
2756 /* Implementation of emit_ops method "emit_lt_goto". */
2757
2758 static void
2759 aarch64_emit_lt_goto (int *offset_p, int *size_p)
2760 {
2761 uint32_t buf[16];
2762 uint32_t *p = buf;
2763
2764 p += emit_pop (p, x1);
2765 p += emit_cmp (p, x1, register_operand (x0));
2766 /* Branch over the next instruction if x0 >= x1. */
2767 p += emit_bcond (p, GE, 8);
2768 /* The NOP instruction will be patched with an unconditional branch. */
2769 if (offset_p)
2770 *offset_p = (p - buf) * 4;
2771 if (size_p)
2772 *size_p = 4;
2773 p += emit_nop (p);
2774
2775 emit_ops_insns (buf, p - buf);
2776 }
2777
2778 /* Implementation of emit_ops method "emit_le_goto". */
2779
2780 static void
2781 aarch64_emit_le_goto (int *offset_p, int *size_p)
2782 {
2783 uint32_t buf[16];
2784 uint32_t *p = buf;
2785
2786 p += emit_pop (p, x1);
2787 p += emit_cmp (p, x1, register_operand (x0));
2788 /* Branch over the next instruction if x0 > x1. */
2789 p += emit_bcond (p, GT, 8);
2790 /* The NOP instruction will be patched with an unconditional branch. */
2791 if (offset_p)
2792 *offset_p = (p - buf) * 4;
2793 if (size_p)
2794 *size_p = 4;
2795 p += emit_nop (p);
2796
2797 emit_ops_insns (buf, p - buf);
2798 }
2799
2800 /* Implementation of emit_ops method "emit_gt_goto". */
2801
2802 static void
2803 aarch64_emit_gt_goto (int *offset_p, int *size_p)
2804 {
2805 uint32_t buf[16];
2806 uint32_t *p = buf;
2807
2808 p += emit_pop (p, x1);
2809 p += emit_cmp (p, x1, register_operand (x0));
2810 /* Branch over the next instruction if x0 <= x1. */
2811 p += emit_bcond (p, LE, 8);
2812 /* The NOP instruction will be patched with an unconditional branch. */
2813 if (offset_p)
2814 *offset_p = (p - buf) * 4;
2815 if (size_p)
2816 *size_p = 4;
2817 p += emit_nop (p);
2818
2819 emit_ops_insns (buf, p - buf);
2820 }
2821
2822 /* Implementation of emit_ops method "emit_ge_got". */
2823
2824 static void
2825 aarch64_emit_ge_got (int *offset_p, int *size_p)
2826 {
2827 uint32_t buf[16];
2828 uint32_t *p = buf;
2829
2830 p += emit_pop (p, x1);
2831 p += emit_cmp (p, x1, register_operand (x0));
2832 /* Branch over the next instruction if x0 <= x1. */
2833 p += emit_bcond (p, LT, 8);
2834 /* The NOP instruction will be patched with an unconditional branch. */
2835 if (offset_p)
2836 *offset_p = (p - buf) * 4;
2837 if (size_p)
2838 *size_p = 4;
2839 p += emit_nop (p);
2840
2841 emit_ops_insns (buf, p - buf);
2842 }
2843
2844 static struct emit_ops aarch64_emit_ops_impl =
2845 {
2846 aarch64_emit_prologue,
2847 aarch64_emit_epilogue,
2848 aarch64_emit_add,
2849 aarch64_emit_sub,
2850 aarch64_emit_mul,
2851 aarch64_emit_lsh,
2852 aarch64_emit_rsh_signed,
2853 aarch64_emit_rsh_unsigned,
2854 aarch64_emit_ext,
2855 aarch64_emit_log_not,
2856 aarch64_emit_bit_and,
2857 aarch64_emit_bit_or,
2858 aarch64_emit_bit_xor,
2859 aarch64_emit_bit_not,
2860 aarch64_emit_equal,
2861 aarch64_emit_less_signed,
2862 aarch64_emit_less_unsigned,
2863 aarch64_emit_ref,
2864 aarch64_emit_if_goto,
2865 aarch64_emit_goto,
2866 aarch64_write_goto_address,
2867 aarch64_emit_const,
2868 aarch64_emit_call,
2869 aarch64_emit_reg,
2870 aarch64_emit_pop,
2871 aarch64_emit_stack_flush,
2872 aarch64_emit_zero_ext,
2873 aarch64_emit_swap,
2874 aarch64_emit_stack_adjust,
2875 aarch64_emit_int_call_1,
2876 aarch64_emit_void_call_2,
2877 aarch64_emit_eq_goto,
2878 aarch64_emit_ne_goto,
2879 aarch64_emit_lt_goto,
2880 aarch64_emit_le_goto,
2881 aarch64_emit_gt_goto,
2882 aarch64_emit_ge_got,
2883 };
2884
2885 /* Implementation of linux_target_ops method "emit_ops". */
2886
2887 static struct emit_ops *
2888 aarch64_emit_ops (void)
2889 {
2890 return &aarch64_emit_ops_impl;
2891 }
2892
2893 /* Implementation of linux_target_ops method
2894 "get_min_fast_tracepoint_insn_len". */
2895
2896 static int
2897 aarch64_get_min_fast_tracepoint_insn_len (void)
2898 {
2899 return 4;
2900 }
2901
2902 /* Implementation of linux_target_ops method "supports_range_stepping". */
2903
2904 static int
2905 aarch64_supports_range_stepping (void)
2906 {
2907 return 1;
2908 }
2909
2910 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2911
2912 static const gdb_byte *
2913 aarch64_sw_breakpoint_from_kind (int kind, int *size)
2914 {
2915 if (is_64bit_tdesc ())
2916 {
2917 *size = aarch64_breakpoint_len;
2918 return aarch64_breakpoint;
2919 }
2920 else
2921 return arm_sw_breakpoint_from_kind (kind, size);
2922 }
2923
2924 /* Implementation of linux_target_ops method "breakpoint_kind_from_pc". */
2925
2926 static int
2927 aarch64_breakpoint_kind_from_pc (CORE_ADDR *pcptr)
2928 {
2929 if (is_64bit_tdesc ())
2930 return aarch64_breakpoint_len;
2931 else
2932 return arm_breakpoint_kind_from_pc (pcptr);
2933 }
2934
2935 /* Implementation of the linux_target_ops method
2936 "breakpoint_kind_from_current_state". */
2937
2938 static int
2939 aarch64_breakpoint_kind_from_current_state (CORE_ADDR *pcptr)
2940 {
2941 if (is_64bit_tdesc ())
2942 return aarch64_breakpoint_len;
2943 else
2944 return arm_breakpoint_kind_from_current_state (pcptr);
2945 }
2946
2947 /* Support for hardware single step. */
2948
2949 static int
2950 aarch64_supports_hardware_single_step (void)
2951 {
2952 return 1;
2953 }
2954
2955 struct linux_target_ops the_low_target =
2956 {
2957 aarch64_arch_setup,
2958 aarch64_regs_info,
2959 aarch64_cannot_fetch_register,
2960 aarch64_cannot_store_register,
2961 NULL, /* fetch_register */
2962 aarch64_get_pc,
2963 aarch64_set_pc,
2964 aarch64_breakpoint_kind_from_pc,
2965 aarch64_sw_breakpoint_from_kind,
2966 NULL, /* get_next_pcs */
2967 0, /* decr_pc_after_break */
2968 aarch64_breakpoint_at,
2969 aarch64_supports_z_point_type,
2970 aarch64_insert_point,
2971 aarch64_remove_point,
2972 aarch64_stopped_by_watchpoint,
2973 aarch64_stopped_data_address,
2974 NULL, /* collect_ptrace_register */
2975 NULL, /* supply_ptrace_register */
2976 aarch64_linux_siginfo_fixup,
2977 aarch64_linux_new_process,
2978 aarch64_linux_delete_process,
2979 aarch64_linux_new_thread,
2980 aarch64_linux_delete_thread,
2981 aarch64_linux_new_fork,
2982 aarch64_linux_prepare_to_resume,
2983 NULL, /* process_qsupported */
2984 aarch64_supports_tracepoints,
2985 aarch64_get_thread_area,
2986 aarch64_install_fast_tracepoint_jump_pad,
2987 aarch64_emit_ops,
2988 aarch64_get_min_fast_tracepoint_insn_len,
2989 aarch64_supports_range_stepping,
2990 aarch64_breakpoint_kind_from_current_state,
2991 aarch64_supports_hardware_single_step,
2992 aarch64_get_syscall_trapinfo,
2993 };
2994
2995 void
2996 initialize_low_arch (void)
2997 {
2998 initialize_low_arch_aarch32 ();
2999
3000 initialize_regsets_info (&aarch64_regsets_info);
3001
3002 #if GDB_SELF_TEST
3003 initialize_low_tdesc ();
3004 #endif
3005 }