]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-aarch64-low.c
Cast void * to struct user_pt_regs *
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-aarch64-low.c
1 /* GNU/Linux/AArch64 specific low level interface, for the remote server for
2 GDB.
3
4 Copyright (C) 2009-2015 Free Software Foundation, Inc.
5 Contributed by ARM Ltd.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "server.h"
23 #include "linux-low.h"
24 #include "nat/aarch64-linux.h"
25 #include "nat/aarch64-linux-hw-point.h"
26 #include "arch/aarch64-insn.h"
27 #include "linux-aarch32-low.h"
28 #include "elf/common.h"
29 #include "ax.h"
30 #include "tracepoint.h"
31
32 #include <signal.h>
33 #include <sys/user.h>
34 #include "nat/gdb_ptrace.h"
35 #include <asm/ptrace.h>
36 #include <inttypes.h>
37 #include <endian.h>
38 #include <sys/uio.h>
39
40 #include "gdb_proc_service.h"
41
42 /* Defined in auto-generated files. */
43 void init_registers_aarch64 (void);
44 extern const struct target_desc *tdesc_aarch64;
45
46 #ifdef HAVE_SYS_REG_H
47 #include <sys/reg.h>
48 #endif
49
50 #define AARCH64_X_REGS_NUM 31
51 #define AARCH64_V_REGS_NUM 32
52 #define AARCH64_X0_REGNO 0
53 #define AARCH64_SP_REGNO 31
54 #define AARCH64_PC_REGNO 32
55 #define AARCH64_CPSR_REGNO 33
56 #define AARCH64_V0_REGNO 34
57 #define AARCH64_FPSR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM)
58 #define AARCH64_FPCR_REGNO (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 1)
59
60 #define AARCH64_NUM_REGS (AARCH64_V0_REGNO + AARCH64_V_REGS_NUM + 2)
61
62 /* Per-process arch-specific data we want to keep. */
63
64 struct arch_process_info
65 {
66 /* Hardware breakpoint/watchpoint data.
67 The reason for them to be per-process rather than per-thread is
68 due to the lack of information in the gdbserver environment;
69 gdbserver is not told that whether a requested hardware
70 breakpoint/watchpoint is thread specific or not, so it has to set
71 each hw bp/wp for every thread in the current process. The
72 higher level bp/wp management in gdb will resume a thread if a hw
73 bp/wp trap is not expected for it. Since the hw bp/wp setting is
74 same for each thread, it is reasonable for the data to live here.
75 */
76 struct aarch64_debug_reg_state debug_reg_state;
77 };
78
79 /* Return true if the size of register 0 is 8 byte. */
80
81 static int
82 is_64bit_tdesc (void)
83 {
84 struct regcache *regcache = get_thread_regcache (current_thread, 0);
85
86 return register_size (regcache->tdesc, 0) == 8;
87 }
88
89 /* Implementation of linux_target_ops method "cannot_store_register". */
90
91 static int
92 aarch64_cannot_store_register (int regno)
93 {
94 return regno >= AARCH64_NUM_REGS;
95 }
96
97 /* Implementation of linux_target_ops method "cannot_fetch_register". */
98
99 static int
100 aarch64_cannot_fetch_register (int regno)
101 {
102 return regno >= AARCH64_NUM_REGS;
103 }
104
105 static void
106 aarch64_fill_gregset (struct regcache *regcache, void *buf)
107 {
108 struct user_pt_regs *regset = (struct user_pt_regs *) buf;
109 int i;
110
111 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
112 collect_register (regcache, AARCH64_X0_REGNO + i, &regset->regs[i]);
113 collect_register (regcache, AARCH64_SP_REGNO, &regset->sp);
114 collect_register (regcache, AARCH64_PC_REGNO, &regset->pc);
115 collect_register (regcache, AARCH64_CPSR_REGNO, &regset->pstate);
116 }
117
118 static void
119 aarch64_store_gregset (struct regcache *regcache, const void *buf)
120 {
121 const struct user_pt_regs *regset = (const struct user_pt_regs *) buf;
122 int i;
123
124 for (i = 0; i < AARCH64_X_REGS_NUM; i++)
125 supply_register (regcache, AARCH64_X0_REGNO + i, &regset->regs[i]);
126 supply_register (regcache, AARCH64_SP_REGNO, &regset->sp);
127 supply_register (regcache, AARCH64_PC_REGNO, &regset->pc);
128 supply_register (regcache, AARCH64_CPSR_REGNO, &regset->pstate);
129 }
130
131 static void
132 aarch64_fill_fpregset (struct regcache *regcache, void *buf)
133 {
134 struct user_fpsimd_state *regset = buf;
135 int i;
136
137 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
138 collect_register (regcache, AARCH64_V0_REGNO + i, &regset->vregs[i]);
139 collect_register (regcache, AARCH64_FPSR_REGNO, &regset->fpsr);
140 collect_register (regcache, AARCH64_FPCR_REGNO, &regset->fpcr);
141 }
142
143 static void
144 aarch64_store_fpregset (struct regcache *regcache, const void *buf)
145 {
146 const struct user_fpsimd_state *regset = buf;
147 int i;
148
149 for (i = 0; i < AARCH64_V_REGS_NUM; i++)
150 supply_register (regcache, AARCH64_V0_REGNO + i, &regset->vregs[i]);
151 supply_register (regcache, AARCH64_FPSR_REGNO, &regset->fpsr);
152 supply_register (regcache, AARCH64_FPCR_REGNO, &regset->fpcr);
153 }
154
155 /* Enable miscellaneous debugging output. The name is historical - it
156 was originally used to debug LinuxThreads support. */
157 extern int debug_threads;
158
159 /* Implementation of linux_target_ops method "get_pc". */
160
161 static CORE_ADDR
162 aarch64_get_pc (struct regcache *regcache)
163 {
164 if (register_size (regcache->tdesc, 0) == 8)
165 {
166 unsigned long pc;
167
168 collect_register_by_name (regcache, "pc", &pc);
169 if (debug_threads)
170 debug_printf ("stop pc is %08lx\n", pc);
171 return pc;
172 }
173 else
174 {
175 unsigned int pc;
176
177 collect_register_by_name (regcache, "pc", &pc);
178 if (debug_threads)
179 debug_printf ("stop pc is %04x\n", pc);
180 return pc;
181 }
182 }
183
184 /* Implementation of linux_target_ops method "set_pc". */
185
186 static void
187 aarch64_set_pc (struct regcache *regcache, CORE_ADDR pc)
188 {
189 if (register_size (regcache->tdesc, 0) == 8)
190 {
191 unsigned long newpc = pc;
192 supply_register_by_name (regcache, "pc", &newpc);
193 }
194 else
195 {
196 unsigned int newpc = pc;
197 supply_register_by_name (regcache, "pc", &newpc);
198 }
199 }
200
201 #define aarch64_breakpoint_len 4
202
203 /* AArch64 BRK software debug mode instruction.
204 This instruction needs to match gdb/aarch64-tdep.c
205 (aarch64_default_breakpoint). */
206 static const gdb_byte aarch64_breakpoint[] = {0x00, 0x00, 0x20, 0xd4};
207
208 /* Implementation of linux_target_ops method "breakpoint_at". */
209
210 static int
211 aarch64_breakpoint_at (CORE_ADDR where)
212 {
213 gdb_byte insn[aarch64_breakpoint_len];
214
215 (*the_target->read_memory) (where, (unsigned char *) &insn,
216 aarch64_breakpoint_len);
217 if (memcmp (insn, aarch64_breakpoint, aarch64_breakpoint_len) == 0)
218 return 1;
219
220 return 0;
221 }
222
223 static void
224 aarch64_init_debug_reg_state (struct aarch64_debug_reg_state *state)
225 {
226 int i;
227
228 for (i = 0; i < AARCH64_HBP_MAX_NUM; ++i)
229 {
230 state->dr_addr_bp[i] = 0;
231 state->dr_ctrl_bp[i] = 0;
232 state->dr_ref_count_bp[i] = 0;
233 }
234
235 for (i = 0; i < AARCH64_HWP_MAX_NUM; ++i)
236 {
237 state->dr_addr_wp[i] = 0;
238 state->dr_ctrl_wp[i] = 0;
239 state->dr_ref_count_wp[i] = 0;
240 }
241 }
242
243 /* Return the pointer to the debug register state structure in the
244 current process' arch-specific data area. */
245
246 struct aarch64_debug_reg_state *
247 aarch64_get_debug_reg_state (pid_t pid)
248 {
249 struct process_info *proc = find_process_pid (pid);
250
251 return &proc->priv->arch_private->debug_reg_state;
252 }
253
254 /* Implementation of linux_target_ops method "supports_z_point_type". */
255
256 static int
257 aarch64_supports_z_point_type (char z_type)
258 {
259 switch (z_type)
260 {
261 case Z_PACKET_SW_BP:
262 {
263 if (!extended_protocol && is_64bit_tdesc ())
264 {
265 /* Only enable Z0 packet in non-multi-arch debugging. If
266 extended protocol is used, don't enable Z0 packet because
267 GDBserver may attach to 32-bit process. */
268 return 1;
269 }
270 else
271 {
272 /* Disable Z0 packet so that GDBserver doesn't have to handle
273 different breakpoint instructions (aarch64, arm, thumb etc)
274 in multi-arch debugging. */
275 return 0;
276 }
277 }
278 case Z_PACKET_HW_BP:
279 case Z_PACKET_WRITE_WP:
280 case Z_PACKET_READ_WP:
281 case Z_PACKET_ACCESS_WP:
282 return 1;
283 default:
284 return 0;
285 }
286 }
287
288 /* Implementation of linux_target_ops method "insert_point".
289
290 It actually only records the info of the to-be-inserted bp/wp;
291 the actual insertion will happen when threads are resumed. */
292
293 static int
294 aarch64_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
295 int len, struct raw_breakpoint *bp)
296 {
297 int ret;
298 enum target_hw_bp_type targ_type;
299 struct aarch64_debug_reg_state *state
300 = aarch64_get_debug_reg_state (pid_of (current_thread));
301
302 if (show_debug_regs)
303 fprintf (stderr, "insert_point on entry (addr=0x%08lx, len=%d)\n",
304 (unsigned long) addr, len);
305
306 /* Determine the type from the raw breakpoint type. */
307 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
308
309 if (targ_type != hw_execute)
310 {
311 if (aarch64_linux_region_ok_for_watchpoint (addr, len))
312 ret = aarch64_handle_watchpoint (targ_type, addr, len,
313 1 /* is_insert */, state);
314 else
315 ret = -1;
316 }
317 else
318 {
319 if (len == 3)
320 {
321 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
322 instruction. Set it to 2 to correctly encode length bit
323 mask in hardware/watchpoint control register. */
324 len = 2;
325 }
326 ret = aarch64_handle_breakpoint (targ_type, addr, len,
327 1 /* is_insert */, state);
328 }
329
330 if (show_debug_regs)
331 aarch64_show_debug_reg_state (state, "insert_point", addr, len,
332 targ_type);
333
334 return ret;
335 }
336
337 /* Implementation of linux_target_ops method "remove_point".
338
339 It actually only records the info of the to-be-removed bp/wp,
340 the actual removal will be done when threads are resumed. */
341
342 static int
343 aarch64_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
344 int len, struct raw_breakpoint *bp)
345 {
346 int ret;
347 enum target_hw_bp_type targ_type;
348 struct aarch64_debug_reg_state *state
349 = aarch64_get_debug_reg_state (pid_of (current_thread));
350
351 if (show_debug_regs)
352 fprintf (stderr, "remove_point on entry (addr=0x%08lx, len=%d)\n",
353 (unsigned long) addr, len);
354
355 /* Determine the type from the raw breakpoint type. */
356 targ_type = raw_bkpt_type_to_target_hw_bp_type (type);
357
358 /* Set up state pointers. */
359 if (targ_type != hw_execute)
360 ret =
361 aarch64_handle_watchpoint (targ_type, addr, len, 0 /* is_insert */,
362 state);
363 else
364 {
365 if (len == 3)
366 {
367 /* LEN is 3 means the breakpoint is set on a 32-bit thumb
368 instruction. Set it to 2 to correctly encode length bit
369 mask in hardware/watchpoint control register. */
370 len = 2;
371 }
372 ret = aarch64_handle_breakpoint (targ_type, addr, len,
373 0 /* is_insert */, state);
374 }
375
376 if (show_debug_regs)
377 aarch64_show_debug_reg_state (state, "remove_point", addr, len,
378 targ_type);
379
380 return ret;
381 }
382
383 /* Implementation of linux_target_ops method "stopped_data_address". */
384
385 static CORE_ADDR
386 aarch64_stopped_data_address (void)
387 {
388 siginfo_t siginfo;
389 int pid, i;
390 struct aarch64_debug_reg_state *state;
391
392 pid = lwpid_of (current_thread);
393
394 /* Get the siginfo. */
395 if (ptrace (PTRACE_GETSIGINFO, pid, NULL, &siginfo) != 0)
396 return (CORE_ADDR) 0;
397
398 /* Need to be a hardware breakpoint/watchpoint trap. */
399 if (siginfo.si_signo != SIGTRAP
400 || (siginfo.si_code & 0xffff) != 0x0004 /* TRAP_HWBKPT */)
401 return (CORE_ADDR) 0;
402
403 /* Check if the address matches any watched address. */
404 state = aarch64_get_debug_reg_state (pid_of (current_thread));
405 for (i = aarch64_num_wp_regs - 1; i >= 0; --i)
406 {
407 const unsigned int len = aarch64_watchpoint_length (state->dr_ctrl_wp[i]);
408 const CORE_ADDR addr_trap = (CORE_ADDR) siginfo.si_addr;
409 const CORE_ADDR addr_watch = state->dr_addr_wp[i];
410 if (state->dr_ref_count_wp[i]
411 && DR_CONTROL_ENABLED (state->dr_ctrl_wp[i])
412 && addr_trap >= addr_watch
413 && addr_trap < addr_watch + len)
414 return addr_trap;
415 }
416
417 return (CORE_ADDR) 0;
418 }
419
420 /* Implementation of linux_target_ops method "stopped_by_watchpoint". */
421
422 static int
423 aarch64_stopped_by_watchpoint (void)
424 {
425 if (aarch64_stopped_data_address () != 0)
426 return 1;
427 else
428 return 0;
429 }
430
431 /* Fetch the thread-local storage pointer for libthread_db. */
432
433 ps_err_e
434 ps_get_thread_area (const struct ps_prochandle *ph,
435 lwpid_t lwpid, int idx, void **base)
436 {
437 return aarch64_ps_get_thread_area (ph, lwpid, idx, base,
438 is_64bit_tdesc ());
439 }
440
441 /* Implementation of linux_target_ops method "siginfo_fixup". */
442
443 static int
444 aarch64_linux_siginfo_fixup (siginfo_t *native, void *inf, int direction)
445 {
446 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
447 if (!is_64bit_tdesc ())
448 {
449 if (direction == 0)
450 aarch64_compat_siginfo_from_siginfo ((struct compat_siginfo *) inf,
451 native);
452 else
453 aarch64_siginfo_from_compat_siginfo (native,
454 (struct compat_siginfo *) inf);
455
456 return 1;
457 }
458
459 return 0;
460 }
461
462 /* Implementation of linux_target_ops method "linux_new_process". */
463
464 static struct arch_process_info *
465 aarch64_linux_new_process (void)
466 {
467 struct arch_process_info *info = XCNEW (struct arch_process_info);
468
469 aarch64_init_debug_reg_state (&info->debug_reg_state);
470
471 return info;
472 }
473
474 /* Implementation of linux_target_ops method "linux_new_fork". */
475
476 static void
477 aarch64_linux_new_fork (struct process_info *parent,
478 struct process_info *child)
479 {
480 /* These are allocated by linux_add_process. */
481 gdb_assert (parent->priv != NULL
482 && parent->priv->arch_private != NULL);
483 gdb_assert (child->priv != NULL
484 && child->priv->arch_private != NULL);
485
486 /* Linux kernel before 2.6.33 commit
487 72f674d203cd230426437cdcf7dd6f681dad8b0d
488 will inherit hardware debug registers from parent
489 on fork/vfork/clone. Newer Linux kernels create such tasks with
490 zeroed debug registers.
491
492 GDB core assumes the child inherits the watchpoints/hw
493 breakpoints of the parent, and will remove them all from the
494 forked off process. Copy the debug registers mirrors into the
495 new process so that all breakpoints and watchpoints can be
496 removed together. The debug registers mirror will become zeroed
497 in the end before detaching the forked off process, thus making
498 this compatible with older Linux kernels too. */
499
500 *child->priv->arch_private = *parent->priv->arch_private;
501 }
502
503 /* Return the right target description according to the ELF file of
504 current thread. */
505
506 static const struct target_desc *
507 aarch64_linux_read_description (void)
508 {
509 unsigned int machine;
510 int is_elf64;
511 int tid;
512
513 tid = lwpid_of (current_thread);
514
515 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
516
517 if (is_elf64)
518 return tdesc_aarch64;
519 else
520 return tdesc_arm_with_neon;
521 }
522
523 /* Implementation of linux_target_ops method "arch_setup". */
524
525 static void
526 aarch64_arch_setup (void)
527 {
528 current_process ()->tdesc = aarch64_linux_read_description ();
529
530 aarch64_linux_get_debug_reg_capacity (lwpid_of (current_thread));
531 }
532
533 static struct regset_info aarch64_regsets[] =
534 {
535 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_PRSTATUS,
536 sizeof (struct user_pt_regs), GENERAL_REGS,
537 aarch64_fill_gregset, aarch64_store_gregset },
538 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_FPREGSET,
539 sizeof (struct user_fpsimd_state), FP_REGS,
540 aarch64_fill_fpregset, aarch64_store_fpregset
541 },
542 NULL_REGSET
543 };
544
545 static struct regsets_info aarch64_regsets_info =
546 {
547 aarch64_regsets, /* regsets */
548 0, /* num_regsets */
549 NULL, /* disabled_regsets */
550 };
551
552 static struct regs_info regs_info_aarch64 =
553 {
554 NULL, /* regset_bitmap */
555 NULL, /* usrregs */
556 &aarch64_regsets_info,
557 };
558
559 /* Implementation of linux_target_ops method "regs_info". */
560
561 static const struct regs_info *
562 aarch64_regs_info (void)
563 {
564 if (is_64bit_tdesc ())
565 return &regs_info_aarch64;
566 else
567 return &regs_info_aarch32;
568 }
569
570 /* Implementation of linux_target_ops method "supports_tracepoints". */
571
572 static int
573 aarch64_supports_tracepoints (void)
574 {
575 if (current_thread == NULL)
576 return 1;
577 else
578 {
579 /* We don't support tracepoints on aarch32 now. */
580 return is_64bit_tdesc ();
581 }
582 }
583
584 /* Implementation of linux_target_ops method "get_thread_area". */
585
586 static int
587 aarch64_get_thread_area (int lwpid, CORE_ADDR *addrp)
588 {
589 struct iovec iovec;
590 uint64_t reg;
591
592 iovec.iov_base = &reg;
593 iovec.iov_len = sizeof (reg);
594
595 if (ptrace (PTRACE_GETREGSET, lwpid, NT_ARM_TLS, &iovec) != 0)
596 return -1;
597
598 *addrp = reg;
599
600 return 0;
601 }
602
603 /* List of condition codes that we need. */
604
605 enum aarch64_condition_codes
606 {
607 EQ = 0x0,
608 NE = 0x1,
609 LO = 0x3,
610 GE = 0xa,
611 LT = 0xb,
612 GT = 0xc,
613 LE = 0xd,
614 };
615
616 /* Representation of an operand. At this time, it only supports register
617 and immediate types. */
618
619 struct aarch64_operand
620 {
621 /* Type of the operand. */
622 enum
623 {
624 OPERAND_IMMEDIATE,
625 OPERAND_REGISTER,
626 } type;
627 /* Value of the operand according to the type. */
628 union
629 {
630 uint32_t imm;
631 struct aarch64_register reg;
632 };
633 };
634
635 /* List of registers that we are currently using, we can add more here as
636 we need to use them. */
637
638 /* General purpose scratch registers (64 bit). */
639 static const struct aarch64_register x0 = { 0, 1 };
640 static const struct aarch64_register x1 = { 1, 1 };
641 static const struct aarch64_register x2 = { 2, 1 };
642 static const struct aarch64_register x3 = { 3, 1 };
643 static const struct aarch64_register x4 = { 4, 1 };
644
645 /* General purpose scratch registers (32 bit). */
646 static const struct aarch64_register w0 = { 0, 0 };
647 static const struct aarch64_register w2 = { 2, 0 };
648
649 /* Intra-procedure scratch registers. */
650 static const struct aarch64_register ip0 = { 16, 1 };
651
652 /* Special purpose registers. */
653 static const struct aarch64_register fp = { 29, 1 };
654 static const struct aarch64_register lr = { 30, 1 };
655 static const struct aarch64_register sp = { 31, 1 };
656 static const struct aarch64_register xzr = { 31, 1 };
657
658 /* Dynamically allocate a new register. If we know the register
659 statically, we should make it a global as above instead of using this
660 helper function. */
661
662 static struct aarch64_register
663 aarch64_register (unsigned num, int is64)
664 {
665 return (struct aarch64_register) { num, is64 };
666 }
667
668 /* Helper function to create a register operand, for instructions with
669 different types of operands.
670
671 For example:
672 p += emit_mov (p, x0, register_operand (x1)); */
673
674 static struct aarch64_operand
675 register_operand (struct aarch64_register reg)
676 {
677 struct aarch64_operand operand;
678
679 operand.type = OPERAND_REGISTER;
680 operand.reg = reg;
681
682 return operand;
683 }
684
685 /* Helper function to create an immediate operand, for instructions with
686 different types of operands.
687
688 For example:
689 p += emit_mov (p, x0, immediate_operand (12)); */
690
691 static struct aarch64_operand
692 immediate_operand (uint32_t imm)
693 {
694 struct aarch64_operand operand;
695
696 operand.type = OPERAND_IMMEDIATE;
697 operand.imm = imm;
698
699 return operand;
700 }
701
702 /* Helper function to create an offset memory operand.
703
704 For example:
705 p += emit_ldr (p, x0, sp, offset_memory_operand (16)); */
706
707 static struct aarch64_memory_operand
708 offset_memory_operand (int32_t offset)
709 {
710 return (struct aarch64_memory_operand) { MEMORY_OPERAND_OFFSET, offset };
711 }
712
713 /* Helper function to create a pre-index memory operand.
714
715 For example:
716 p += emit_ldr (p, x0, sp, preindex_memory_operand (16)); */
717
718 static struct aarch64_memory_operand
719 preindex_memory_operand (int32_t index)
720 {
721 return (struct aarch64_memory_operand) { MEMORY_OPERAND_PREINDEX, index };
722 }
723
724 /* Helper function to create a post-index memory operand.
725
726 For example:
727 p += emit_ldr (p, x0, sp, postindex_memory_operand (16)); */
728
729 static struct aarch64_memory_operand
730 postindex_memory_operand (int32_t index)
731 {
732 return (struct aarch64_memory_operand) { MEMORY_OPERAND_POSTINDEX, index };
733 }
734
735 /* System control registers. These special registers can be written and
736 read with the MRS and MSR instructions.
737
738 - NZCV: Condition flags. GDB refers to this register under the CPSR
739 name.
740 - FPSR: Floating-point status register.
741 - FPCR: Floating-point control registers.
742 - TPIDR_EL0: Software thread ID register. */
743
744 enum aarch64_system_control_registers
745 {
746 /* op0 op1 crn crm op2 */
747 NZCV = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x2 << 3) | 0x0,
748 FPSR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x1,
749 FPCR = (0x1 << 14) | (0x3 << 11) | (0x4 << 7) | (0x4 << 3) | 0x0,
750 TPIDR_EL0 = (0x1 << 14) | (0x3 << 11) | (0xd << 7) | (0x0 << 3) | 0x2
751 };
752
753 /* Write a BLR instruction into *BUF.
754
755 BLR rn
756
757 RN is the register to branch to. */
758
759 static int
760 emit_blr (uint32_t *buf, struct aarch64_register rn)
761 {
762 return aarch64_emit_insn (buf, BLR | ENCODE (rn.num, 5, 5));
763 }
764
765 /* Write a RET instruction into *BUF.
766
767 RET xn
768
769 RN is the register to branch to. */
770
771 static int
772 emit_ret (uint32_t *buf, struct aarch64_register rn)
773 {
774 return aarch64_emit_insn (buf, RET | ENCODE (rn.num, 5, 5));
775 }
776
777 static int
778 emit_load_store_pair (uint32_t *buf, enum aarch64_opcodes opcode,
779 struct aarch64_register rt,
780 struct aarch64_register rt2,
781 struct aarch64_register rn,
782 struct aarch64_memory_operand operand)
783 {
784 uint32_t opc;
785 uint32_t pre_index;
786 uint32_t write_back;
787
788 if (rt.is64)
789 opc = ENCODE (2, 2, 30);
790 else
791 opc = ENCODE (0, 2, 30);
792
793 switch (operand.type)
794 {
795 case MEMORY_OPERAND_OFFSET:
796 {
797 pre_index = ENCODE (1, 1, 24);
798 write_back = ENCODE (0, 1, 23);
799 break;
800 }
801 case MEMORY_OPERAND_POSTINDEX:
802 {
803 pre_index = ENCODE (0, 1, 24);
804 write_back = ENCODE (1, 1, 23);
805 break;
806 }
807 case MEMORY_OPERAND_PREINDEX:
808 {
809 pre_index = ENCODE (1, 1, 24);
810 write_back = ENCODE (1, 1, 23);
811 break;
812 }
813 default:
814 return 0;
815 }
816
817 return aarch64_emit_insn (buf, opcode | opc | pre_index | write_back
818 | ENCODE (operand.index >> 3, 7, 15)
819 | ENCODE (rt2.num, 5, 10)
820 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
821 }
822
823 /* Write a STP instruction into *BUF.
824
825 STP rt, rt2, [rn, #offset]
826 STP rt, rt2, [rn, #index]!
827 STP rt, rt2, [rn], #index
828
829 RT and RT2 are the registers to store.
830 RN is the base address register.
831 OFFSET is the immediate to add to the base address. It is limited to a
832 -512 .. 504 range (7 bits << 3). */
833
834 static int
835 emit_stp (uint32_t *buf, struct aarch64_register rt,
836 struct aarch64_register rt2, struct aarch64_register rn,
837 struct aarch64_memory_operand operand)
838 {
839 return emit_load_store_pair (buf, STP, rt, rt2, rn, operand);
840 }
841
842 /* Write a LDP instruction into *BUF.
843
844 LDP rt, rt2, [rn, #offset]
845 LDP rt, rt2, [rn, #index]!
846 LDP rt, rt2, [rn], #index
847
848 RT and RT2 are the registers to store.
849 RN is the base address register.
850 OFFSET is the immediate to add to the base address. It is limited to a
851 -512 .. 504 range (7 bits << 3). */
852
853 static int
854 emit_ldp (uint32_t *buf, struct aarch64_register rt,
855 struct aarch64_register rt2, struct aarch64_register rn,
856 struct aarch64_memory_operand operand)
857 {
858 return emit_load_store_pair (buf, LDP, rt, rt2, rn, operand);
859 }
860
861 /* Write a LDP (SIMD&VFP) instruction using Q registers into *BUF.
862
863 LDP qt, qt2, [rn, #offset]
864
865 RT and RT2 are the Q registers to store.
866 RN is the base address register.
867 OFFSET is the immediate to add to the base address. It is limited to
868 -1024 .. 1008 range (7 bits << 4). */
869
870 static int
871 emit_ldp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
872 struct aarch64_register rn, int32_t offset)
873 {
874 uint32_t opc = ENCODE (2, 2, 30);
875 uint32_t pre_index = ENCODE (1, 1, 24);
876
877 return aarch64_emit_insn (buf, LDP_SIMD_VFP | opc | pre_index
878 | ENCODE (offset >> 4, 7, 15)
879 | ENCODE (rt2, 5, 10)
880 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
881 }
882
883 /* Write a STP (SIMD&VFP) instruction using Q registers into *BUF.
884
885 STP qt, qt2, [rn, #offset]
886
887 RT and RT2 are the Q registers to store.
888 RN is the base address register.
889 OFFSET is the immediate to add to the base address. It is limited to
890 -1024 .. 1008 range (7 bits << 4). */
891
892 static int
893 emit_stp_q_offset (uint32_t *buf, unsigned rt, unsigned rt2,
894 struct aarch64_register rn, int32_t offset)
895 {
896 uint32_t opc = ENCODE (2, 2, 30);
897 uint32_t pre_index = ENCODE (1, 1, 24);
898
899 return aarch64_emit_insn (buf, STP_SIMD_VFP | opc | pre_index
900 | ENCODE (offset >> 4, 7, 15)
901 | ENCODE (rt2, 5, 10)
902 | ENCODE (rn.num, 5, 5) | ENCODE (rt, 5, 0));
903 }
904
905 /* Write a LDRH instruction into *BUF.
906
907 LDRH wt, [xn, #offset]
908 LDRH wt, [xn, #index]!
909 LDRH wt, [xn], #index
910
911 RT is the register to store.
912 RN is the base address register.
913 OFFSET is the immediate to add to the base address. It is limited to
914 0 .. 32760 range (12 bits << 3). */
915
916 static int
917 emit_ldrh (uint32_t *buf, struct aarch64_register rt,
918 struct aarch64_register rn,
919 struct aarch64_memory_operand operand)
920 {
921 return aarch64_emit_load_store (buf, 1, LDR, rt, rn, operand);
922 }
923
924 /* Write a LDRB instruction into *BUF.
925
926 LDRB wt, [xn, #offset]
927 LDRB wt, [xn, #index]!
928 LDRB wt, [xn], #index
929
930 RT is the register to store.
931 RN is the base address register.
932 OFFSET is the immediate to add to the base address. It is limited to
933 0 .. 32760 range (12 bits << 3). */
934
935 static int
936 emit_ldrb (uint32_t *buf, struct aarch64_register rt,
937 struct aarch64_register rn,
938 struct aarch64_memory_operand operand)
939 {
940 return aarch64_emit_load_store (buf, 0, LDR, rt, rn, operand);
941 }
942
943
944
945 /* Write a STR instruction into *BUF.
946
947 STR rt, [rn, #offset]
948 STR rt, [rn, #index]!
949 STR rt, [rn], #index
950
951 RT is the register to store.
952 RN is the base address register.
953 OFFSET is the immediate to add to the base address. It is limited to
954 0 .. 32760 range (12 bits << 3). */
955
956 static int
957 emit_str (uint32_t *buf, struct aarch64_register rt,
958 struct aarch64_register rn,
959 struct aarch64_memory_operand operand)
960 {
961 return aarch64_emit_load_store (buf, rt.is64 ? 3 : 2, STR, rt, rn, operand);
962 }
963
964 /* Helper function emitting an exclusive load or store instruction. */
965
966 static int
967 emit_load_store_exclusive (uint32_t *buf, uint32_t size,
968 enum aarch64_opcodes opcode,
969 struct aarch64_register rs,
970 struct aarch64_register rt,
971 struct aarch64_register rt2,
972 struct aarch64_register rn)
973 {
974 return aarch64_emit_insn (buf, opcode | ENCODE (size, 2, 30)
975 | ENCODE (rs.num, 5, 16) | ENCODE (rt2.num, 5, 10)
976 | ENCODE (rn.num, 5, 5) | ENCODE (rt.num, 5, 0));
977 }
978
979 /* Write a LAXR instruction into *BUF.
980
981 LDAXR rt, [xn]
982
983 RT is the destination register.
984 RN is the base address register. */
985
986 static int
987 emit_ldaxr (uint32_t *buf, struct aarch64_register rt,
988 struct aarch64_register rn)
989 {
990 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, LDAXR, xzr, rt,
991 xzr, rn);
992 }
993
994 /* Write a STXR instruction into *BUF.
995
996 STXR ws, rt, [xn]
997
998 RS is the result register, it indicates if the store succeeded or not.
999 RT is the destination register.
1000 RN is the base address register. */
1001
1002 static int
1003 emit_stxr (uint32_t *buf, struct aarch64_register rs,
1004 struct aarch64_register rt, struct aarch64_register rn)
1005 {
1006 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STXR, rs, rt,
1007 xzr, rn);
1008 }
1009
1010 /* Write a STLR instruction into *BUF.
1011
1012 STLR rt, [xn]
1013
1014 RT is the register to store.
1015 RN is the base address register. */
1016
1017 static int
1018 emit_stlr (uint32_t *buf, struct aarch64_register rt,
1019 struct aarch64_register rn)
1020 {
1021 return emit_load_store_exclusive (buf, rt.is64 ? 3 : 2, STLR, xzr, rt,
1022 xzr, rn);
1023 }
1024
1025 /* Helper function for data processing instructions with register sources. */
1026
1027 static int
1028 emit_data_processing_reg (uint32_t *buf, enum aarch64_opcodes opcode,
1029 struct aarch64_register rd,
1030 struct aarch64_register rn,
1031 struct aarch64_register rm)
1032 {
1033 uint32_t size = ENCODE (rd.is64, 1, 31);
1034
1035 return aarch64_emit_insn (buf, opcode | size | ENCODE (rm.num, 5, 16)
1036 | ENCODE (rn.num, 5, 5) | ENCODE (rd.num, 5, 0));
1037 }
1038
1039 /* Helper function for data processing instructions taking either a register
1040 or an immediate. */
1041
1042 static int
1043 emit_data_processing (uint32_t *buf, enum aarch64_opcodes opcode,
1044 struct aarch64_register rd,
1045 struct aarch64_register rn,
1046 struct aarch64_operand operand)
1047 {
1048 uint32_t size = ENCODE (rd.is64, 1, 31);
1049 /* The opcode is different for register and immediate source operands. */
1050 uint32_t operand_opcode;
1051
1052 if (operand.type == OPERAND_IMMEDIATE)
1053 {
1054 /* xxx1 000x xxxx xxxx xxxx xxxx xxxx xxxx */
1055 operand_opcode = ENCODE (8, 4, 25);
1056
1057 return aarch64_emit_insn (buf, opcode | operand_opcode | size
1058 | ENCODE (operand.imm, 12, 10)
1059 | ENCODE (rn.num, 5, 5)
1060 | ENCODE (rd.num, 5, 0));
1061 }
1062 else
1063 {
1064 /* xxx0 101x xxxx xxxx xxxx xxxx xxxx xxxx */
1065 operand_opcode = ENCODE (5, 4, 25);
1066
1067 return emit_data_processing_reg (buf, opcode | operand_opcode, rd,
1068 rn, operand.reg);
1069 }
1070 }
1071
1072 /* Write an ADD instruction into *BUF.
1073
1074 ADD rd, rn, #imm
1075 ADD rd, rn, rm
1076
1077 This function handles both an immediate and register add.
1078
1079 RD is the destination register.
1080 RN is the input register.
1081 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1082 OPERAND_REGISTER. */
1083
1084 static int
1085 emit_add (uint32_t *buf, struct aarch64_register rd,
1086 struct aarch64_register rn, struct aarch64_operand operand)
1087 {
1088 return emit_data_processing (buf, ADD, rd, rn, operand);
1089 }
1090
1091 /* Write a SUB instruction into *BUF.
1092
1093 SUB rd, rn, #imm
1094 SUB rd, rn, rm
1095
1096 This function handles both an immediate and register sub.
1097
1098 RD is the destination register.
1099 RN is the input register.
1100 IMM is the immediate to substract to RN. */
1101
1102 static int
1103 emit_sub (uint32_t *buf, struct aarch64_register rd,
1104 struct aarch64_register rn, struct aarch64_operand operand)
1105 {
1106 return emit_data_processing (buf, SUB, rd, rn, operand);
1107 }
1108
1109 /* Write a MOV instruction into *BUF.
1110
1111 MOV rd, #imm
1112 MOV rd, rm
1113
1114 This function handles both a wide immediate move and a register move,
1115 with the condition that the source register is not xzr. xzr and the
1116 stack pointer share the same encoding and this function only supports
1117 the stack pointer.
1118
1119 RD is the destination register.
1120 OPERAND is the source operand, either of type OPERAND_IMMEDIATE or
1121 OPERAND_REGISTER. */
1122
1123 static int
1124 emit_mov (uint32_t *buf, struct aarch64_register rd,
1125 struct aarch64_operand operand)
1126 {
1127 if (operand.type == OPERAND_IMMEDIATE)
1128 {
1129 uint32_t size = ENCODE (rd.is64, 1, 31);
1130 /* Do not shift the immediate. */
1131 uint32_t shift = ENCODE (0, 2, 21);
1132
1133 return aarch64_emit_insn (buf, MOV | size | shift
1134 | ENCODE (operand.imm, 16, 5)
1135 | ENCODE (rd.num, 5, 0));
1136 }
1137 else
1138 return emit_add (buf, rd, operand.reg, immediate_operand (0));
1139 }
1140
1141 /* Write a MOVK instruction into *BUF.
1142
1143 MOVK rd, #imm, lsl #shift
1144
1145 RD is the destination register.
1146 IMM is the immediate.
1147 SHIFT is the logical shift left to apply to IMM. */
1148
1149 static int
1150 emit_movk (uint32_t *buf, struct aarch64_register rd, uint32_t imm,
1151 unsigned shift)
1152 {
1153 uint32_t size = ENCODE (rd.is64, 1, 31);
1154
1155 return aarch64_emit_insn (buf, MOVK | size | ENCODE (shift, 2, 21) |
1156 ENCODE (imm, 16, 5) | ENCODE (rd.num, 5, 0));
1157 }
1158
1159 /* Write instructions into *BUF in order to move ADDR into a register.
1160 ADDR can be a 64-bit value.
1161
1162 This function will emit a series of MOV and MOVK instructions, such as:
1163
1164 MOV xd, #(addr)
1165 MOVK xd, #(addr >> 16), lsl #16
1166 MOVK xd, #(addr >> 32), lsl #32
1167 MOVK xd, #(addr >> 48), lsl #48 */
1168
1169 static int
1170 emit_mov_addr (uint32_t *buf, struct aarch64_register rd, CORE_ADDR addr)
1171 {
1172 uint32_t *p = buf;
1173
1174 /* The MOV (wide immediate) instruction clears to top bits of the
1175 register. */
1176 p += emit_mov (p, rd, immediate_operand (addr & 0xffff));
1177
1178 if ((addr >> 16) != 0)
1179 p += emit_movk (p, rd, (addr >> 16) & 0xffff, 1);
1180 else
1181 return p - buf;
1182
1183 if ((addr >> 32) != 0)
1184 p += emit_movk (p, rd, (addr >> 32) & 0xffff, 2);
1185 else
1186 return p - buf;
1187
1188 if ((addr >> 48) != 0)
1189 p += emit_movk (p, rd, (addr >> 48) & 0xffff, 3);
1190
1191 return p - buf;
1192 }
1193
1194 /* Write a SUBS instruction into *BUF.
1195
1196 SUBS rd, rn, rm
1197
1198 This instruction update the condition flags.
1199
1200 RD is the destination register.
1201 RN and RM are the source registers. */
1202
1203 static int
1204 emit_subs (uint32_t *buf, struct aarch64_register rd,
1205 struct aarch64_register rn, struct aarch64_operand operand)
1206 {
1207 return emit_data_processing (buf, SUBS, rd, rn, operand);
1208 }
1209
1210 /* Write a CMP instruction into *BUF.
1211
1212 CMP rn, rm
1213
1214 This instruction is an alias of SUBS xzr, rn, rm.
1215
1216 RN and RM are the registers to compare. */
1217
1218 static int
1219 emit_cmp (uint32_t *buf, struct aarch64_register rn,
1220 struct aarch64_operand operand)
1221 {
1222 return emit_subs (buf, xzr, rn, operand);
1223 }
1224
1225 /* Write a AND instruction into *BUF.
1226
1227 AND rd, rn, rm
1228
1229 RD is the destination register.
1230 RN and RM are the source registers. */
1231
1232 static int
1233 emit_and (uint32_t *buf, struct aarch64_register rd,
1234 struct aarch64_register rn, struct aarch64_register rm)
1235 {
1236 return emit_data_processing_reg (buf, AND, rd, rn, rm);
1237 }
1238
1239 /* Write a ORR instruction into *BUF.
1240
1241 ORR rd, rn, rm
1242
1243 RD is the destination register.
1244 RN and RM are the source registers. */
1245
1246 static int
1247 emit_orr (uint32_t *buf, struct aarch64_register rd,
1248 struct aarch64_register rn, struct aarch64_register rm)
1249 {
1250 return emit_data_processing_reg (buf, ORR, rd, rn, rm);
1251 }
1252
1253 /* Write a ORN instruction into *BUF.
1254
1255 ORN rd, rn, rm
1256
1257 RD is the destination register.
1258 RN and RM are the source registers. */
1259
1260 static int
1261 emit_orn (uint32_t *buf, struct aarch64_register rd,
1262 struct aarch64_register rn, struct aarch64_register rm)
1263 {
1264 return emit_data_processing_reg (buf, ORN, rd, rn, rm);
1265 }
1266
1267 /* Write a EOR instruction into *BUF.
1268
1269 EOR rd, rn, rm
1270
1271 RD is the destination register.
1272 RN and RM are the source registers. */
1273
1274 static int
1275 emit_eor (uint32_t *buf, struct aarch64_register rd,
1276 struct aarch64_register rn, struct aarch64_register rm)
1277 {
1278 return emit_data_processing_reg (buf, EOR, rd, rn, rm);
1279 }
1280
1281 /* Write a MVN instruction into *BUF.
1282
1283 MVN rd, rm
1284
1285 This is an alias for ORN rd, xzr, rm.
1286
1287 RD is the destination register.
1288 RM is the source register. */
1289
1290 static int
1291 emit_mvn (uint32_t *buf, struct aarch64_register rd,
1292 struct aarch64_register rm)
1293 {
1294 return emit_orn (buf, rd, xzr, rm);
1295 }
1296
1297 /* Write a LSLV instruction into *BUF.
1298
1299 LSLV rd, rn, rm
1300
1301 RD is the destination register.
1302 RN and RM are the source registers. */
1303
1304 static int
1305 emit_lslv (uint32_t *buf, struct aarch64_register rd,
1306 struct aarch64_register rn, struct aarch64_register rm)
1307 {
1308 return emit_data_processing_reg (buf, LSLV, rd, rn, rm);
1309 }
1310
1311 /* Write a LSRV instruction into *BUF.
1312
1313 LSRV rd, rn, rm
1314
1315 RD is the destination register.
1316 RN and RM are the source registers. */
1317
1318 static int
1319 emit_lsrv (uint32_t *buf, struct aarch64_register rd,
1320 struct aarch64_register rn, struct aarch64_register rm)
1321 {
1322 return emit_data_processing_reg (buf, LSRV, rd, rn, rm);
1323 }
1324
1325 /* Write a ASRV instruction into *BUF.
1326
1327 ASRV rd, rn, rm
1328
1329 RD is the destination register.
1330 RN and RM are the source registers. */
1331
1332 static int
1333 emit_asrv (uint32_t *buf, struct aarch64_register rd,
1334 struct aarch64_register rn, struct aarch64_register rm)
1335 {
1336 return emit_data_processing_reg (buf, ASRV, rd, rn, rm);
1337 }
1338
1339 /* Write a MUL instruction into *BUF.
1340
1341 MUL rd, rn, rm
1342
1343 RD is the destination register.
1344 RN and RM are the source registers. */
1345
1346 static int
1347 emit_mul (uint32_t *buf, struct aarch64_register rd,
1348 struct aarch64_register rn, struct aarch64_register rm)
1349 {
1350 return emit_data_processing_reg (buf, MUL, rd, rn, rm);
1351 }
1352
1353 /* Write a MRS instruction into *BUF. The register size is 64-bit.
1354
1355 MRS xt, system_reg
1356
1357 RT is the destination register.
1358 SYSTEM_REG is special purpose register to read. */
1359
1360 static int
1361 emit_mrs (uint32_t *buf, struct aarch64_register rt,
1362 enum aarch64_system_control_registers system_reg)
1363 {
1364 return aarch64_emit_insn (buf, MRS | ENCODE (system_reg, 15, 5)
1365 | ENCODE (rt.num, 5, 0));
1366 }
1367
1368 /* Write a MSR instruction into *BUF. The register size is 64-bit.
1369
1370 MSR system_reg, xt
1371
1372 SYSTEM_REG is special purpose register to write.
1373 RT is the input register. */
1374
1375 static int
1376 emit_msr (uint32_t *buf, enum aarch64_system_control_registers system_reg,
1377 struct aarch64_register rt)
1378 {
1379 return aarch64_emit_insn (buf, MSR | ENCODE (system_reg, 15, 5)
1380 | ENCODE (rt.num, 5, 0));
1381 }
1382
1383 /* Write a SEVL instruction into *BUF.
1384
1385 This is a hint instruction telling the hardware to trigger an event. */
1386
1387 static int
1388 emit_sevl (uint32_t *buf)
1389 {
1390 return aarch64_emit_insn (buf, SEVL);
1391 }
1392
1393 /* Write a WFE instruction into *BUF.
1394
1395 This is a hint instruction telling the hardware to wait for an event. */
1396
1397 static int
1398 emit_wfe (uint32_t *buf)
1399 {
1400 return aarch64_emit_insn (buf, WFE);
1401 }
1402
1403 /* Write a SBFM instruction into *BUF.
1404
1405 SBFM rd, rn, #immr, #imms
1406
1407 This instruction moves the bits from #immr to #imms into the
1408 destination, sign extending the result.
1409
1410 RD is the destination register.
1411 RN is the source register.
1412 IMMR is the bit number to start at (least significant bit).
1413 IMMS is the bit number to stop at (most significant bit). */
1414
1415 static int
1416 emit_sbfm (uint32_t *buf, struct aarch64_register rd,
1417 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1418 {
1419 uint32_t size = ENCODE (rd.is64, 1, 31);
1420 uint32_t n = ENCODE (rd.is64, 1, 22);
1421
1422 return aarch64_emit_insn (buf, SBFM | size | n | ENCODE (immr, 6, 16)
1423 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1424 | ENCODE (rd.num, 5, 0));
1425 }
1426
1427 /* Write a SBFX instruction into *BUF.
1428
1429 SBFX rd, rn, #lsb, #width
1430
1431 This instruction moves #width bits from #lsb into the destination, sign
1432 extending the result. This is an alias for:
1433
1434 SBFM rd, rn, #lsb, #(lsb + width - 1)
1435
1436 RD is the destination register.
1437 RN is the source register.
1438 LSB is the bit number to start at (least significant bit).
1439 WIDTH is the number of bits to move. */
1440
1441 static int
1442 emit_sbfx (uint32_t *buf, struct aarch64_register rd,
1443 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1444 {
1445 return emit_sbfm (buf, rd, rn, lsb, lsb + width - 1);
1446 }
1447
1448 /* Write a UBFM instruction into *BUF.
1449
1450 UBFM rd, rn, #immr, #imms
1451
1452 This instruction moves the bits from #immr to #imms into the
1453 destination, extending the result with zeros.
1454
1455 RD is the destination register.
1456 RN is the source register.
1457 IMMR is the bit number to start at (least significant bit).
1458 IMMS is the bit number to stop at (most significant bit). */
1459
1460 static int
1461 emit_ubfm (uint32_t *buf, struct aarch64_register rd,
1462 struct aarch64_register rn, uint32_t immr, uint32_t imms)
1463 {
1464 uint32_t size = ENCODE (rd.is64, 1, 31);
1465 uint32_t n = ENCODE (rd.is64, 1, 22);
1466
1467 return aarch64_emit_insn (buf, UBFM | size | n | ENCODE (immr, 6, 16)
1468 | ENCODE (imms, 6, 10) | ENCODE (rn.num, 5, 5)
1469 | ENCODE (rd.num, 5, 0));
1470 }
1471
1472 /* Write a UBFX instruction into *BUF.
1473
1474 UBFX rd, rn, #lsb, #width
1475
1476 This instruction moves #width bits from #lsb into the destination,
1477 extending the result with zeros. This is an alias for:
1478
1479 UBFM rd, rn, #lsb, #(lsb + width - 1)
1480
1481 RD is the destination register.
1482 RN is the source register.
1483 LSB is the bit number to start at (least significant bit).
1484 WIDTH is the number of bits to move. */
1485
1486 static int
1487 emit_ubfx (uint32_t *buf, struct aarch64_register rd,
1488 struct aarch64_register rn, uint32_t lsb, uint32_t width)
1489 {
1490 return emit_ubfm (buf, rd, rn, lsb, lsb + width - 1);
1491 }
1492
1493 /* Write a CSINC instruction into *BUF.
1494
1495 CSINC rd, rn, rm, cond
1496
1497 This instruction conditionally increments rn or rm and places the result
1498 in rd. rn is chosen is the condition is true.
1499
1500 RD is the destination register.
1501 RN and RM are the source registers.
1502 COND is the encoded condition. */
1503
1504 static int
1505 emit_csinc (uint32_t *buf, struct aarch64_register rd,
1506 struct aarch64_register rn, struct aarch64_register rm,
1507 unsigned cond)
1508 {
1509 uint32_t size = ENCODE (rd.is64, 1, 31);
1510
1511 return aarch64_emit_insn (buf, CSINC | size | ENCODE (rm.num, 5, 16)
1512 | ENCODE (cond, 4, 12) | ENCODE (rn.num, 5, 5)
1513 | ENCODE (rd.num, 5, 0));
1514 }
1515
1516 /* Write a CSET instruction into *BUF.
1517
1518 CSET rd, cond
1519
1520 This instruction conditionally write 1 or 0 in the destination register.
1521 1 is written if the condition is true. This is an alias for:
1522
1523 CSINC rd, xzr, xzr, !cond
1524
1525 Note that the condition needs to be inverted.
1526
1527 RD is the destination register.
1528 RN and RM are the source registers.
1529 COND is the encoded condition. */
1530
1531 static int
1532 emit_cset (uint32_t *buf, struct aarch64_register rd, unsigned cond)
1533 {
1534 /* The least significant bit of the condition needs toggling in order to
1535 invert it. */
1536 return emit_csinc (buf, rd, xzr, xzr, cond ^ 0x1);
1537 }
1538
1539 /* Write LEN instructions from BUF into the inferior memory at *TO.
1540
1541 Note instructions are always little endian on AArch64, unlike data. */
1542
1543 static void
1544 append_insns (CORE_ADDR *to, size_t len, const uint32_t *buf)
1545 {
1546 size_t byte_len = len * sizeof (uint32_t);
1547 #if (__BYTE_ORDER == __BIG_ENDIAN)
1548 uint32_t *le_buf = xmalloc (byte_len);
1549 size_t i;
1550
1551 for (i = 0; i < len; i++)
1552 le_buf[i] = htole32 (buf[i]);
1553
1554 write_inferior_memory (*to, (const unsigned char *) le_buf, byte_len);
1555
1556 xfree (le_buf);
1557 #else
1558 write_inferior_memory (*to, (const unsigned char *) buf, byte_len);
1559 #endif
1560
1561 *to += byte_len;
1562 }
1563
1564 /* Sub-class of struct aarch64_insn_data, store information of
1565 instruction relocation for fast tracepoint. Visitor can
1566 relocate an instruction from BASE.INSN_ADDR to NEW_ADDR and save
1567 the relocated instructions in buffer pointed by INSN_PTR. */
1568
1569 struct aarch64_insn_relocation_data
1570 {
1571 struct aarch64_insn_data base;
1572
1573 /* The new address the instruction is relocated to. */
1574 CORE_ADDR new_addr;
1575 /* Pointer to the buffer of relocated instruction(s). */
1576 uint32_t *insn_ptr;
1577 };
1578
1579 /* Implementation of aarch64_insn_visitor method "b". */
1580
1581 static void
1582 aarch64_ftrace_insn_reloc_b (const int is_bl, const int32_t offset,
1583 struct aarch64_insn_data *data)
1584 {
1585 struct aarch64_insn_relocation_data *insn_reloc
1586 = (struct aarch64_insn_relocation_data *) data;
1587 int32_t new_offset
1588 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1589
1590 if (can_encode_int32 (new_offset, 28))
1591 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, is_bl, new_offset);
1592 }
1593
1594 /* Implementation of aarch64_insn_visitor method "b_cond". */
1595
1596 static void
1597 aarch64_ftrace_insn_reloc_b_cond (const unsigned cond, const int32_t offset,
1598 struct aarch64_insn_data *data)
1599 {
1600 struct aarch64_insn_relocation_data *insn_reloc
1601 = (struct aarch64_insn_relocation_data *) data;
1602 int32_t new_offset
1603 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1604
1605 if (can_encode_int32 (new_offset, 21))
1606 {
1607 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond,
1608 new_offset);
1609 }
1610 else if (can_encode_int32 (new_offset, 28))
1611 {
1612 /* The offset is out of range for a conditional branch
1613 instruction but not for a unconditional branch. We can use
1614 the following instructions instead:
1615
1616 B.COND TAKEN ; If cond is true, then jump to TAKEN.
1617 B NOT_TAKEN ; Else jump over TAKEN and continue.
1618 TAKEN:
1619 B #(offset - 8)
1620 NOT_TAKEN:
1621
1622 */
1623
1624 insn_reloc->insn_ptr += emit_bcond (insn_reloc->insn_ptr, cond, 8);
1625 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1626 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1627 }
1628 }
1629
1630 /* Implementation of aarch64_insn_visitor method "cb". */
1631
1632 static void
1633 aarch64_ftrace_insn_reloc_cb (const int32_t offset, const int is_cbnz,
1634 const unsigned rn, int is64,
1635 struct aarch64_insn_data *data)
1636 {
1637 struct aarch64_insn_relocation_data *insn_reloc
1638 = (struct aarch64_insn_relocation_data *) data;
1639 int32_t new_offset
1640 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1641
1642 if (can_encode_int32 (new_offset, 21))
1643 {
1644 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1645 aarch64_register (rn, is64), new_offset);
1646 }
1647 else if (can_encode_int32 (new_offset, 28))
1648 {
1649 /* The offset is out of range for a compare and branch
1650 instruction but not for a unconditional branch. We can use
1651 the following instructions instead:
1652
1653 CBZ xn, TAKEN ; xn == 0, then jump to TAKEN.
1654 B NOT_TAKEN ; Else jump over TAKEN and continue.
1655 TAKEN:
1656 B #(offset - 8)
1657 NOT_TAKEN:
1658
1659 */
1660 insn_reloc->insn_ptr += emit_cb (insn_reloc->insn_ptr, is_cbnz,
1661 aarch64_register (rn, is64), 8);
1662 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1663 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, new_offset - 8);
1664 }
1665 }
1666
1667 /* Implementation of aarch64_insn_visitor method "tb". */
1668
1669 static void
1670 aarch64_ftrace_insn_reloc_tb (const int32_t offset, int is_tbnz,
1671 const unsigned rt, unsigned bit,
1672 struct aarch64_insn_data *data)
1673 {
1674 struct aarch64_insn_relocation_data *insn_reloc
1675 = (struct aarch64_insn_relocation_data *) data;
1676 int32_t new_offset
1677 = insn_reloc->base.insn_addr - insn_reloc->new_addr + offset;
1678
1679 if (can_encode_int32 (new_offset, 16))
1680 {
1681 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1682 aarch64_register (rt, 1), new_offset);
1683 }
1684 else if (can_encode_int32 (new_offset, 28))
1685 {
1686 /* The offset is out of range for a test bit and branch
1687 instruction but not for a unconditional branch. We can use
1688 the following instructions instead:
1689
1690 TBZ xn, #bit, TAKEN ; xn[bit] == 0, then jump to TAKEN.
1691 B NOT_TAKEN ; Else jump over TAKEN and continue.
1692 TAKEN:
1693 B #(offset - 8)
1694 NOT_TAKEN:
1695
1696 */
1697 insn_reloc->insn_ptr += emit_tb (insn_reloc->insn_ptr, is_tbnz, bit,
1698 aarch64_register (rt, 1), 8);
1699 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0, 8);
1700 insn_reloc->insn_ptr += emit_b (insn_reloc->insn_ptr, 0,
1701 new_offset - 8);
1702 }
1703 }
1704
1705 /* Implementation of aarch64_insn_visitor method "adr". */
1706
1707 static void
1708 aarch64_ftrace_insn_reloc_adr (const int32_t offset, const unsigned rd,
1709 const int is_adrp,
1710 struct aarch64_insn_data *data)
1711 {
1712 struct aarch64_insn_relocation_data *insn_reloc
1713 = (struct aarch64_insn_relocation_data *) data;
1714 /* We know exactly the address the ADR{P,} instruction will compute.
1715 We can just write it to the destination register. */
1716 CORE_ADDR address = data->insn_addr + offset;
1717
1718 if (is_adrp)
1719 {
1720 /* Clear the lower 12 bits of the offset to get the 4K page. */
1721 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1722 aarch64_register (rd, 1),
1723 address & ~0xfff);
1724 }
1725 else
1726 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1727 aarch64_register (rd, 1), address);
1728 }
1729
1730 /* Implementation of aarch64_insn_visitor method "ldr_literal". */
1731
1732 static void
1733 aarch64_ftrace_insn_reloc_ldr_literal (const int32_t offset, const int is_sw,
1734 const unsigned rt, const int is64,
1735 struct aarch64_insn_data *data)
1736 {
1737 struct aarch64_insn_relocation_data *insn_reloc
1738 = (struct aarch64_insn_relocation_data *) data;
1739 CORE_ADDR address = data->insn_addr + offset;
1740
1741 insn_reloc->insn_ptr += emit_mov_addr (insn_reloc->insn_ptr,
1742 aarch64_register (rt, 1), address);
1743
1744 /* We know exactly what address to load from, and what register we
1745 can use:
1746
1747 MOV xd, #(oldloc + offset)
1748 MOVK xd, #((oldloc + offset) >> 16), lsl #16
1749 ...
1750
1751 LDR xd, [xd] ; or LDRSW xd, [xd]
1752
1753 */
1754
1755 if (is_sw)
1756 insn_reloc->insn_ptr += emit_ldrsw (insn_reloc->insn_ptr,
1757 aarch64_register (rt, 1),
1758 aarch64_register (rt, 1),
1759 offset_memory_operand (0));
1760 else
1761 insn_reloc->insn_ptr += emit_ldr (insn_reloc->insn_ptr,
1762 aarch64_register (rt, is64),
1763 aarch64_register (rt, 1),
1764 offset_memory_operand (0));
1765 }
1766
1767 /* Implementation of aarch64_insn_visitor method "others". */
1768
1769 static void
1770 aarch64_ftrace_insn_reloc_others (const uint32_t insn,
1771 struct aarch64_insn_data *data)
1772 {
1773 struct aarch64_insn_relocation_data *insn_reloc
1774 = (struct aarch64_insn_relocation_data *) data;
1775
1776 /* The instruction is not PC relative. Just re-emit it at the new
1777 location. */
1778 insn_reloc->insn_ptr += aarch64_emit_insn (insn_reloc->insn_ptr, insn);
1779 }
1780
1781 static const struct aarch64_insn_visitor visitor =
1782 {
1783 aarch64_ftrace_insn_reloc_b,
1784 aarch64_ftrace_insn_reloc_b_cond,
1785 aarch64_ftrace_insn_reloc_cb,
1786 aarch64_ftrace_insn_reloc_tb,
1787 aarch64_ftrace_insn_reloc_adr,
1788 aarch64_ftrace_insn_reloc_ldr_literal,
1789 aarch64_ftrace_insn_reloc_others,
1790 };
1791
1792 /* Implementation of linux_target_ops method
1793 "install_fast_tracepoint_jump_pad". */
1794
1795 static int
1796 aarch64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1797 CORE_ADDR tpaddr,
1798 CORE_ADDR collector,
1799 CORE_ADDR lockaddr,
1800 ULONGEST orig_size,
1801 CORE_ADDR *jump_entry,
1802 CORE_ADDR *trampoline,
1803 ULONGEST *trampoline_size,
1804 unsigned char *jjump_pad_insn,
1805 ULONGEST *jjump_pad_insn_size,
1806 CORE_ADDR *adjusted_insn_addr,
1807 CORE_ADDR *adjusted_insn_addr_end,
1808 char *err)
1809 {
1810 uint32_t buf[256];
1811 uint32_t *p = buf;
1812 int32_t offset;
1813 int i;
1814 uint32_t insn;
1815 CORE_ADDR buildaddr = *jump_entry;
1816 struct aarch64_insn_relocation_data insn_data;
1817
1818 /* We need to save the current state on the stack both to restore it
1819 later and to collect register values when the tracepoint is hit.
1820
1821 The saved registers are pushed in a layout that needs to be in sync
1822 with aarch64_ft_collect_regmap (see linux-aarch64-ipa.c). Later on
1823 the supply_fast_tracepoint_registers function will fill in the
1824 register cache from a pointer to saved registers on the stack we build
1825 here.
1826
1827 For simplicity, we set the size of each cell on the stack to 16 bytes.
1828 This way one cell can hold any register type, from system registers
1829 to the 128 bit SIMD&FP registers. Furthermore, the stack pointer
1830 has to be 16 bytes aligned anyway.
1831
1832 Note that the CPSR register does not exist on AArch64. Instead we
1833 can access system bits describing the process state with the
1834 MRS/MSR instructions, namely the condition flags. We save them as
1835 if they are part of a CPSR register because that's how GDB
1836 interprets these system bits. At the moment, only the condition
1837 flags are saved in CPSR (NZCV).
1838
1839 Stack layout, each cell is 16 bytes (descending):
1840
1841 High *-------- SIMD&FP registers from 31 down to 0. --------*
1842 | q31 |
1843 . .
1844 . . 32 cells
1845 . .
1846 | q0 |
1847 *---- General purpose registers from 30 down to 0. ----*
1848 | x30 |
1849 . .
1850 . . 31 cells
1851 . .
1852 | x0 |
1853 *------------- Special purpose registers. -------------*
1854 | SP |
1855 | PC |
1856 | CPSR (NZCV) | 5 cells
1857 | FPSR |
1858 | FPCR | <- SP + 16
1859 *------------- collecting_t object --------------------*
1860 | TPIDR_EL0 | struct tracepoint * |
1861 Low *------------------------------------------------------*
1862
1863 After this stack is set up, we issue a call to the collector, passing
1864 it the saved registers at (SP + 16). */
1865
1866 /* Push SIMD&FP registers on the stack:
1867
1868 SUB sp, sp, #(32 * 16)
1869
1870 STP q30, q31, [sp, #(30 * 16)]
1871 ...
1872 STP q0, q1, [sp]
1873
1874 */
1875 p += emit_sub (p, sp, sp, immediate_operand (32 * 16));
1876 for (i = 30; i >= 0; i -= 2)
1877 p += emit_stp_q_offset (p, i, i + 1, sp, i * 16);
1878
1879 /* Push general puspose registers on the stack. Note that we do not need
1880 to push x31 as it represents the xzr register and not the stack
1881 pointer in a STR instruction.
1882
1883 SUB sp, sp, #(31 * 16)
1884
1885 STR x30, [sp, #(30 * 16)]
1886 ...
1887 STR x0, [sp]
1888
1889 */
1890 p += emit_sub (p, sp, sp, immediate_operand (31 * 16));
1891 for (i = 30; i >= 0; i -= 1)
1892 p += emit_str (p, aarch64_register (i, 1), sp,
1893 offset_memory_operand (i * 16));
1894
1895 /* Make space for 5 more cells.
1896
1897 SUB sp, sp, #(5 * 16)
1898
1899 */
1900 p += emit_sub (p, sp, sp, immediate_operand (5 * 16));
1901
1902
1903 /* Save SP:
1904
1905 ADD x4, sp, #((32 + 31 + 5) * 16)
1906 STR x4, [sp, #(4 * 16)]
1907
1908 */
1909 p += emit_add (p, x4, sp, immediate_operand ((32 + 31 + 5) * 16));
1910 p += emit_str (p, x4, sp, offset_memory_operand (4 * 16));
1911
1912 /* Save PC (tracepoint address):
1913
1914 MOV x3, #(tpaddr)
1915 ...
1916
1917 STR x3, [sp, #(3 * 16)]
1918
1919 */
1920
1921 p += emit_mov_addr (p, x3, tpaddr);
1922 p += emit_str (p, x3, sp, offset_memory_operand (3 * 16));
1923
1924 /* Save CPSR (NZCV), FPSR and FPCR:
1925
1926 MRS x2, nzcv
1927 MRS x1, fpsr
1928 MRS x0, fpcr
1929
1930 STR x2, [sp, #(2 * 16)]
1931 STR x1, [sp, #(1 * 16)]
1932 STR x0, [sp, #(0 * 16)]
1933
1934 */
1935 p += emit_mrs (p, x2, NZCV);
1936 p += emit_mrs (p, x1, FPSR);
1937 p += emit_mrs (p, x0, FPCR);
1938 p += emit_str (p, x2, sp, offset_memory_operand (2 * 16));
1939 p += emit_str (p, x1, sp, offset_memory_operand (1 * 16));
1940 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
1941
1942 /* Push the collecting_t object. It consist of the address of the
1943 tracepoint and an ID for the current thread. We get the latter by
1944 reading the tpidr_el0 system register. It corresponds to the
1945 NT_ARM_TLS register accessible with ptrace.
1946
1947 MOV x0, #(tpoint)
1948 ...
1949
1950 MRS x1, tpidr_el0
1951
1952 STP x0, x1, [sp, #-16]!
1953
1954 */
1955
1956 p += emit_mov_addr (p, x0, tpoint);
1957 p += emit_mrs (p, x1, TPIDR_EL0);
1958 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-16));
1959
1960 /* Spin-lock:
1961
1962 The shared memory for the lock is at lockaddr. It will hold zero
1963 if no-one is holding the lock, otherwise it contains the address of
1964 the collecting_t object on the stack of the thread which acquired it.
1965
1966 At this stage, the stack pointer points to this thread's collecting_t
1967 object.
1968
1969 We use the following registers:
1970 - x0: Address of the lock.
1971 - x1: Pointer to collecting_t object.
1972 - x2: Scratch register.
1973
1974 MOV x0, #(lockaddr)
1975 ...
1976 MOV x1, sp
1977
1978 ; Trigger an event local to this core. So the following WFE
1979 ; instruction is ignored.
1980 SEVL
1981 again:
1982 ; Wait for an event. The event is triggered by either the SEVL
1983 ; or STLR instructions (store release).
1984 WFE
1985
1986 ; Atomically read at lockaddr. This marks the memory location as
1987 ; exclusive. This instruction also has memory constraints which
1988 ; make sure all previous data reads and writes are done before
1989 ; executing it.
1990 LDAXR x2, [x0]
1991
1992 ; Try again if another thread holds the lock.
1993 CBNZ x2, again
1994
1995 ; We can lock it! Write the address of the collecting_t object.
1996 ; This instruction will fail if the memory location is not marked
1997 ; as exclusive anymore. If it succeeds, it will remove the
1998 ; exclusive mark on the memory location. This way, if another
1999 ; thread executes this instruction before us, we will fail and try
2000 ; all over again.
2001 STXR w2, x1, [x0]
2002 CBNZ w2, again
2003
2004 */
2005
2006 p += emit_mov_addr (p, x0, lockaddr);
2007 p += emit_mov (p, x1, register_operand (sp));
2008
2009 p += emit_sevl (p);
2010 p += emit_wfe (p);
2011 p += emit_ldaxr (p, x2, x0);
2012 p += emit_cb (p, 1, w2, -2 * 4);
2013 p += emit_stxr (p, w2, x1, x0);
2014 p += emit_cb (p, 1, x2, -4 * 4);
2015
2016 /* Call collector (struct tracepoint *, unsigned char *):
2017
2018 MOV x0, #(tpoint)
2019 ...
2020
2021 ; Saved registers start after the collecting_t object.
2022 ADD x1, sp, #16
2023
2024 ; We use an intra-procedure-call scratch register.
2025 MOV ip0, #(collector)
2026 ...
2027
2028 ; And call back to C!
2029 BLR ip0
2030
2031 */
2032
2033 p += emit_mov_addr (p, x0, tpoint);
2034 p += emit_add (p, x1, sp, immediate_operand (16));
2035
2036 p += emit_mov_addr (p, ip0, collector);
2037 p += emit_blr (p, ip0);
2038
2039 /* Release the lock.
2040
2041 MOV x0, #(lockaddr)
2042 ...
2043
2044 ; This instruction is a normal store with memory ordering
2045 ; constraints. Thanks to this we do not have to put a data
2046 ; barrier instruction to make sure all data read and writes are done
2047 ; before this instruction is executed. Furthermore, this instrucion
2048 ; will trigger an event, letting other threads know they can grab
2049 ; the lock.
2050 STLR xzr, [x0]
2051
2052 */
2053 p += emit_mov_addr (p, x0, lockaddr);
2054 p += emit_stlr (p, xzr, x0);
2055
2056 /* Free collecting_t object:
2057
2058 ADD sp, sp, #16
2059
2060 */
2061 p += emit_add (p, sp, sp, immediate_operand (16));
2062
2063 /* Restore CPSR (NZCV), FPSR and FPCR. And free all special purpose
2064 registers from the stack.
2065
2066 LDR x2, [sp, #(2 * 16)]
2067 LDR x1, [sp, #(1 * 16)]
2068 LDR x0, [sp, #(0 * 16)]
2069
2070 MSR NZCV, x2
2071 MSR FPSR, x1
2072 MSR FPCR, x0
2073
2074 ADD sp, sp #(5 * 16)
2075
2076 */
2077 p += emit_ldr (p, x2, sp, offset_memory_operand (2 * 16));
2078 p += emit_ldr (p, x1, sp, offset_memory_operand (1 * 16));
2079 p += emit_ldr (p, x0, sp, offset_memory_operand (0 * 16));
2080 p += emit_msr (p, NZCV, x2);
2081 p += emit_msr (p, FPSR, x1);
2082 p += emit_msr (p, FPCR, x0);
2083
2084 p += emit_add (p, sp, sp, immediate_operand (5 * 16));
2085
2086 /* Pop general purpose registers:
2087
2088 LDR x0, [sp]
2089 ...
2090 LDR x30, [sp, #(30 * 16)]
2091
2092 ADD sp, sp, #(31 * 16)
2093
2094 */
2095 for (i = 0; i <= 30; i += 1)
2096 p += emit_ldr (p, aarch64_register (i, 1), sp,
2097 offset_memory_operand (i * 16));
2098 p += emit_add (p, sp, sp, immediate_operand (31 * 16));
2099
2100 /* Pop SIMD&FP registers:
2101
2102 LDP q0, q1, [sp]
2103 ...
2104 LDP q30, q31, [sp, #(30 * 16)]
2105
2106 ADD sp, sp, #(32 * 16)
2107
2108 */
2109 for (i = 0; i <= 30; i += 2)
2110 p += emit_ldp_q_offset (p, i, i + 1, sp, i * 16);
2111 p += emit_add (p, sp, sp, immediate_operand (32 * 16));
2112
2113 /* Write the code into the inferior memory. */
2114 append_insns (&buildaddr, p - buf, buf);
2115
2116 /* Now emit the relocated instruction. */
2117 *adjusted_insn_addr = buildaddr;
2118 target_read_uint32 (tpaddr, &insn);
2119
2120 insn_data.base.insn_addr = tpaddr;
2121 insn_data.new_addr = buildaddr;
2122 insn_data.insn_ptr = buf;
2123
2124 aarch64_relocate_instruction (insn, &visitor,
2125 (struct aarch64_insn_data *) &insn_data);
2126
2127 /* We may not have been able to relocate the instruction. */
2128 if (insn_data.insn_ptr == buf)
2129 {
2130 sprintf (err,
2131 "E.Could not relocate instruction from %s to %s.",
2132 core_addr_to_string_nz (tpaddr),
2133 core_addr_to_string_nz (buildaddr));
2134 return 1;
2135 }
2136 else
2137 append_insns (&buildaddr, insn_data.insn_ptr - buf, buf);
2138 *adjusted_insn_addr_end = buildaddr;
2139
2140 /* Go back to the start of the buffer. */
2141 p = buf;
2142
2143 /* Emit a branch back from the jump pad. */
2144 offset = (tpaddr + orig_size - buildaddr);
2145 if (!can_encode_int32 (offset, 28))
2146 {
2147 sprintf (err,
2148 "E.Jump back from jump pad too far from tracepoint "
2149 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2150 offset);
2151 return 1;
2152 }
2153
2154 p += emit_b (p, 0, offset);
2155 append_insns (&buildaddr, p - buf, buf);
2156
2157 /* Give the caller a branch instruction into the jump pad. */
2158 offset = (*jump_entry - tpaddr);
2159 if (!can_encode_int32 (offset, 28))
2160 {
2161 sprintf (err,
2162 "E.Jump pad too far from tracepoint "
2163 "(offset 0x%" PRIx32 " cannot be encoded in 28 bits).",
2164 offset);
2165 return 1;
2166 }
2167
2168 emit_b ((uint32_t *) jjump_pad_insn, 0, offset);
2169 *jjump_pad_insn_size = 4;
2170
2171 /* Return the end address of our pad. */
2172 *jump_entry = buildaddr;
2173
2174 return 0;
2175 }
2176
2177 /* Helper function writing LEN instructions from START into
2178 current_insn_ptr. */
2179
2180 static void
2181 emit_ops_insns (const uint32_t *start, int len)
2182 {
2183 CORE_ADDR buildaddr = current_insn_ptr;
2184
2185 if (debug_threads)
2186 debug_printf ("Adding %d instrucions at %s\n",
2187 len, paddress (buildaddr));
2188
2189 append_insns (&buildaddr, len, start);
2190 current_insn_ptr = buildaddr;
2191 }
2192
2193 /* Pop a register from the stack. */
2194
2195 static int
2196 emit_pop (uint32_t *buf, struct aarch64_register rt)
2197 {
2198 return emit_ldr (buf, rt, sp, postindex_memory_operand (1 * 16));
2199 }
2200
2201 /* Push a register on the stack. */
2202
2203 static int
2204 emit_push (uint32_t *buf, struct aarch64_register rt)
2205 {
2206 return emit_str (buf, rt, sp, preindex_memory_operand (-1 * 16));
2207 }
2208
2209 /* Implementation of emit_ops method "emit_prologue". */
2210
2211 static void
2212 aarch64_emit_prologue (void)
2213 {
2214 uint32_t buf[16];
2215 uint32_t *p = buf;
2216
2217 /* This function emit a prologue for the following function prototype:
2218
2219 enum eval_result_type f (unsigned char *regs,
2220 ULONGEST *value);
2221
2222 The first argument is a buffer of raw registers. The second
2223 argument is the result of
2224 evaluating the expression, which will be set to whatever is on top of
2225 the stack at the end.
2226
2227 The stack set up by the prologue is as such:
2228
2229 High *------------------------------------------------------*
2230 | LR |
2231 | FP | <- FP
2232 | x1 (ULONGEST *value) |
2233 | x0 (unsigned char *regs) |
2234 Low *------------------------------------------------------*
2235
2236 As we are implementing a stack machine, each opcode can expand the
2237 stack so we never know how far we are from the data saved by this
2238 prologue. In order to be able refer to value and regs later, we save
2239 the current stack pointer in the frame pointer. This way, it is not
2240 clobbered when calling C functions.
2241
2242 Finally, throughtout every operation, we are using register x0 as the
2243 top of the stack, and x1 as a scratch register. */
2244
2245 p += emit_stp (p, x0, x1, sp, preindex_memory_operand (-2 * 16));
2246 p += emit_str (p, lr, sp, offset_memory_operand (3 * 8));
2247 p += emit_str (p, fp, sp, offset_memory_operand (2 * 8));
2248
2249 p += emit_add (p, fp, sp, immediate_operand (2 * 8));
2250
2251
2252 emit_ops_insns (buf, p - buf);
2253 }
2254
2255 /* Implementation of emit_ops method "emit_epilogue". */
2256
2257 static void
2258 aarch64_emit_epilogue (void)
2259 {
2260 uint32_t buf[16];
2261 uint32_t *p = buf;
2262
2263 /* Store the result of the expression (x0) in *value. */
2264 p += emit_sub (p, x1, fp, immediate_operand (1 * 8));
2265 p += emit_ldr (p, x1, x1, offset_memory_operand (0));
2266 p += emit_str (p, x0, x1, offset_memory_operand (0));
2267
2268 /* Restore the previous state. */
2269 p += emit_add (p, sp, fp, immediate_operand (2 * 8));
2270 p += emit_ldp (p, fp, lr, fp, offset_memory_operand (0));
2271
2272 /* Return expr_eval_no_error. */
2273 p += emit_mov (p, x0, immediate_operand (expr_eval_no_error));
2274 p += emit_ret (p, lr);
2275
2276 emit_ops_insns (buf, p - buf);
2277 }
2278
2279 /* Implementation of emit_ops method "emit_add". */
2280
2281 static void
2282 aarch64_emit_add (void)
2283 {
2284 uint32_t buf[16];
2285 uint32_t *p = buf;
2286
2287 p += emit_pop (p, x1);
2288 p += emit_add (p, x0, x0, register_operand (x1));
2289
2290 emit_ops_insns (buf, p - buf);
2291 }
2292
2293 /* Implementation of emit_ops method "emit_sub". */
2294
2295 static void
2296 aarch64_emit_sub (void)
2297 {
2298 uint32_t buf[16];
2299 uint32_t *p = buf;
2300
2301 p += emit_pop (p, x1);
2302 p += emit_sub (p, x0, x0, register_operand (x1));
2303
2304 emit_ops_insns (buf, p - buf);
2305 }
2306
2307 /* Implementation of emit_ops method "emit_mul". */
2308
2309 static void
2310 aarch64_emit_mul (void)
2311 {
2312 uint32_t buf[16];
2313 uint32_t *p = buf;
2314
2315 p += emit_pop (p, x1);
2316 p += emit_mul (p, x0, x1, x0);
2317
2318 emit_ops_insns (buf, p - buf);
2319 }
2320
2321 /* Implementation of emit_ops method "emit_lsh". */
2322
2323 static void
2324 aarch64_emit_lsh (void)
2325 {
2326 uint32_t buf[16];
2327 uint32_t *p = buf;
2328
2329 p += emit_pop (p, x1);
2330 p += emit_lslv (p, x0, x1, x0);
2331
2332 emit_ops_insns (buf, p - buf);
2333 }
2334
2335 /* Implementation of emit_ops method "emit_rsh_signed". */
2336
2337 static void
2338 aarch64_emit_rsh_signed (void)
2339 {
2340 uint32_t buf[16];
2341 uint32_t *p = buf;
2342
2343 p += emit_pop (p, x1);
2344 p += emit_asrv (p, x0, x1, x0);
2345
2346 emit_ops_insns (buf, p - buf);
2347 }
2348
2349 /* Implementation of emit_ops method "emit_rsh_unsigned". */
2350
2351 static void
2352 aarch64_emit_rsh_unsigned (void)
2353 {
2354 uint32_t buf[16];
2355 uint32_t *p = buf;
2356
2357 p += emit_pop (p, x1);
2358 p += emit_lsrv (p, x0, x1, x0);
2359
2360 emit_ops_insns (buf, p - buf);
2361 }
2362
2363 /* Implementation of emit_ops method "emit_ext". */
2364
2365 static void
2366 aarch64_emit_ext (int arg)
2367 {
2368 uint32_t buf[16];
2369 uint32_t *p = buf;
2370
2371 p += emit_sbfx (p, x0, x0, 0, arg);
2372
2373 emit_ops_insns (buf, p - buf);
2374 }
2375
2376 /* Implementation of emit_ops method "emit_log_not". */
2377
2378 static void
2379 aarch64_emit_log_not (void)
2380 {
2381 uint32_t buf[16];
2382 uint32_t *p = buf;
2383
2384 /* If the top of the stack is 0, replace it with 1. Else replace it with
2385 0. */
2386
2387 p += emit_cmp (p, x0, immediate_operand (0));
2388 p += emit_cset (p, x0, EQ);
2389
2390 emit_ops_insns (buf, p - buf);
2391 }
2392
2393 /* Implementation of emit_ops method "emit_bit_and". */
2394
2395 static void
2396 aarch64_emit_bit_and (void)
2397 {
2398 uint32_t buf[16];
2399 uint32_t *p = buf;
2400
2401 p += emit_pop (p, x1);
2402 p += emit_and (p, x0, x0, x1);
2403
2404 emit_ops_insns (buf, p - buf);
2405 }
2406
2407 /* Implementation of emit_ops method "emit_bit_or". */
2408
2409 static void
2410 aarch64_emit_bit_or (void)
2411 {
2412 uint32_t buf[16];
2413 uint32_t *p = buf;
2414
2415 p += emit_pop (p, x1);
2416 p += emit_orr (p, x0, x0, x1);
2417
2418 emit_ops_insns (buf, p - buf);
2419 }
2420
2421 /* Implementation of emit_ops method "emit_bit_xor". */
2422
2423 static void
2424 aarch64_emit_bit_xor (void)
2425 {
2426 uint32_t buf[16];
2427 uint32_t *p = buf;
2428
2429 p += emit_pop (p, x1);
2430 p += emit_eor (p, x0, x0, x1);
2431
2432 emit_ops_insns (buf, p - buf);
2433 }
2434
2435 /* Implementation of emit_ops method "emit_bit_not". */
2436
2437 static void
2438 aarch64_emit_bit_not (void)
2439 {
2440 uint32_t buf[16];
2441 uint32_t *p = buf;
2442
2443 p += emit_mvn (p, x0, x0);
2444
2445 emit_ops_insns (buf, p - buf);
2446 }
2447
2448 /* Implementation of emit_ops method "emit_equal". */
2449
2450 static void
2451 aarch64_emit_equal (void)
2452 {
2453 uint32_t buf[16];
2454 uint32_t *p = buf;
2455
2456 p += emit_pop (p, x1);
2457 p += emit_cmp (p, x0, register_operand (x1));
2458 p += emit_cset (p, x0, EQ);
2459
2460 emit_ops_insns (buf, p - buf);
2461 }
2462
2463 /* Implementation of emit_ops method "emit_less_signed". */
2464
2465 static void
2466 aarch64_emit_less_signed (void)
2467 {
2468 uint32_t buf[16];
2469 uint32_t *p = buf;
2470
2471 p += emit_pop (p, x1);
2472 p += emit_cmp (p, x1, register_operand (x0));
2473 p += emit_cset (p, x0, LT);
2474
2475 emit_ops_insns (buf, p - buf);
2476 }
2477
2478 /* Implementation of emit_ops method "emit_less_unsigned". */
2479
2480 static void
2481 aarch64_emit_less_unsigned (void)
2482 {
2483 uint32_t buf[16];
2484 uint32_t *p = buf;
2485
2486 p += emit_pop (p, x1);
2487 p += emit_cmp (p, x1, register_operand (x0));
2488 p += emit_cset (p, x0, LO);
2489
2490 emit_ops_insns (buf, p - buf);
2491 }
2492
2493 /* Implementation of emit_ops method "emit_ref". */
2494
2495 static void
2496 aarch64_emit_ref (int size)
2497 {
2498 uint32_t buf[16];
2499 uint32_t *p = buf;
2500
2501 switch (size)
2502 {
2503 case 1:
2504 p += emit_ldrb (p, w0, x0, offset_memory_operand (0));
2505 break;
2506 case 2:
2507 p += emit_ldrh (p, w0, x0, offset_memory_operand (0));
2508 break;
2509 case 4:
2510 p += emit_ldr (p, w0, x0, offset_memory_operand (0));
2511 break;
2512 case 8:
2513 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2514 break;
2515 default:
2516 /* Unknown size, bail on compilation. */
2517 emit_error = 1;
2518 break;
2519 }
2520
2521 emit_ops_insns (buf, p - buf);
2522 }
2523
2524 /* Implementation of emit_ops method "emit_if_goto". */
2525
2526 static void
2527 aarch64_emit_if_goto (int *offset_p, int *size_p)
2528 {
2529 uint32_t buf[16];
2530 uint32_t *p = buf;
2531
2532 /* The Z flag is set or cleared here. */
2533 p += emit_cmp (p, x0, immediate_operand (0));
2534 /* This instruction must not change the Z flag. */
2535 p += emit_pop (p, x0);
2536 /* Branch over the next instruction if x0 == 0. */
2537 p += emit_bcond (p, EQ, 8);
2538
2539 /* The NOP instruction will be patched with an unconditional branch. */
2540 if (offset_p)
2541 *offset_p = (p - buf) * 4;
2542 if (size_p)
2543 *size_p = 4;
2544 p += emit_nop (p);
2545
2546 emit_ops_insns (buf, p - buf);
2547 }
2548
2549 /* Implementation of emit_ops method "emit_goto". */
2550
2551 static void
2552 aarch64_emit_goto (int *offset_p, int *size_p)
2553 {
2554 uint32_t buf[16];
2555 uint32_t *p = buf;
2556
2557 /* The NOP instruction will be patched with an unconditional branch. */
2558 if (offset_p)
2559 *offset_p = 0;
2560 if (size_p)
2561 *size_p = 4;
2562 p += emit_nop (p);
2563
2564 emit_ops_insns (buf, p - buf);
2565 }
2566
2567 /* Implementation of emit_ops method "write_goto_address". */
2568
2569 void
2570 aarch64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2571 {
2572 uint32_t insn;
2573
2574 emit_b (&insn, 0, to - from);
2575 append_insns (&from, 1, &insn);
2576 }
2577
2578 /* Implementation of emit_ops method "emit_const". */
2579
2580 static void
2581 aarch64_emit_const (LONGEST num)
2582 {
2583 uint32_t buf[16];
2584 uint32_t *p = buf;
2585
2586 p += emit_mov_addr (p, x0, num);
2587
2588 emit_ops_insns (buf, p - buf);
2589 }
2590
2591 /* Implementation of emit_ops method "emit_call". */
2592
2593 static void
2594 aarch64_emit_call (CORE_ADDR fn)
2595 {
2596 uint32_t buf[16];
2597 uint32_t *p = buf;
2598
2599 p += emit_mov_addr (p, ip0, fn);
2600 p += emit_blr (p, ip0);
2601
2602 emit_ops_insns (buf, p - buf);
2603 }
2604
2605 /* Implementation of emit_ops method "emit_reg". */
2606
2607 static void
2608 aarch64_emit_reg (int reg)
2609 {
2610 uint32_t buf[16];
2611 uint32_t *p = buf;
2612
2613 /* Set x0 to unsigned char *regs. */
2614 p += emit_sub (p, x0, fp, immediate_operand (2 * 8));
2615 p += emit_ldr (p, x0, x0, offset_memory_operand (0));
2616 p += emit_mov (p, x1, immediate_operand (reg));
2617
2618 emit_ops_insns (buf, p - buf);
2619
2620 aarch64_emit_call (get_raw_reg_func_addr ());
2621 }
2622
2623 /* Implementation of emit_ops method "emit_pop". */
2624
2625 static void
2626 aarch64_emit_pop (void)
2627 {
2628 uint32_t buf[16];
2629 uint32_t *p = buf;
2630
2631 p += emit_pop (p, x0);
2632
2633 emit_ops_insns (buf, p - buf);
2634 }
2635
2636 /* Implementation of emit_ops method "emit_stack_flush". */
2637
2638 static void
2639 aarch64_emit_stack_flush (void)
2640 {
2641 uint32_t buf[16];
2642 uint32_t *p = buf;
2643
2644 p += emit_push (p, x0);
2645
2646 emit_ops_insns (buf, p - buf);
2647 }
2648
2649 /* Implementation of emit_ops method "emit_zero_ext". */
2650
2651 static void
2652 aarch64_emit_zero_ext (int arg)
2653 {
2654 uint32_t buf[16];
2655 uint32_t *p = buf;
2656
2657 p += emit_ubfx (p, x0, x0, 0, arg);
2658
2659 emit_ops_insns (buf, p - buf);
2660 }
2661
2662 /* Implementation of emit_ops method "emit_swap". */
2663
2664 static void
2665 aarch64_emit_swap (void)
2666 {
2667 uint32_t buf[16];
2668 uint32_t *p = buf;
2669
2670 p += emit_ldr (p, x1, sp, offset_memory_operand (0 * 16));
2671 p += emit_str (p, x0, sp, offset_memory_operand (0 * 16));
2672 p += emit_mov (p, x0, register_operand (x1));
2673
2674 emit_ops_insns (buf, p - buf);
2675 }
2676
2677 /* Implementation of emit_ops method "emit_stack_adjust". */
2678
2679 static void
2680 aarch64_emit_stack_adjust (int n)
2681 {
2682 /* This is not needed with our design. */
2683 uint32_t buf[16];
2684 uint32_t *p = buf;
2685
2686 p += emit_add (p, sp, sp, immediate_operand (n * 16));
2687
2688 emit_ops_insns (buf, p - buf);
2689 }
2690
2691 /* Implementation of emit_ops method "emit_int_call_1". */
2692
2693 static void
2694 aarch64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2695 {
2696 uint32_t buf[16];
2697 uint32_t *p = buf;
2698
2699 p += emit_mov (p, x0, immediate_operand (arg1));
2700
2701 emit_ops_insns (buf, p - buf);
2702
2703 aarch64_emit_call (fn);
2704 }
2705
2706 /* Implementation of emit_ops method "emit_void_call_2". */
2707
2708 static void
2709 aarch64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2710 {
2711 uint32_t buf[16];
2712 uint32_t *p = buf;
2713
2714 /* Push x0 on the stack. */
2715 aarch64_emit_stack_flush ();
2716
2717 /* Setup arguments for the function call:
2718
2719 x0: arg1
2720 x1: top of the stack
2721
2722 MOV x1, x0
2723 MOV x0, #arg1 */
2724
2725 p += emit_mov (p, x1, register_operand (x0));
2726 p += emit_mov (p, x0, immediate_operand (arg1));
2727
2728 emit_ops_insns (buf, p - buf);
2729
2730 aarch64_emit_call (fn);
2731
2732 /* Restore x0. */
2733 aarch64_emit_pop ();
2734 }
2735
2736 /* Implementation of emit_ops method "emit_eq_goto". */
2737
2738 static void
2739 aarch64_emit_eq_goto (int *offset_p, int *size_p)
2740 {
2741 uint32_t buf[16];
2742 uint32_t *p = buf;
2743
2744 p += emit_pop (p, x1);
2745 p += emit_cmp (p, x1, register_operand (x0));
2746 /* Branch over the next instruction if x0 != x1. */
2747 p += emit_bcond (p, NE, 8);
2748 /* The NOP instruction will be patched with an unconditional branch. */
2749 if (offset_p)
2750 *offset_p = (p - buf) * 4;
2751 if (size_p)
2752 *size_p = 4;
2753 p += emit_nop (p);
2754
2755 emit_ops_insns (buf, p - buf);
2756 }
2757
2758 /* Implementation of emit_ops method "emit_ne_goto". */
2759
2760 static void
2761 aarch64_emit_ne_goto (int *offset_p, int *size_p)
2762 {
2763 uint32_t buf[16];
2764 uint32_t *p = buf;
2765
2766 p += emit_pop (p, x1);
2767 p += emit_cmp (p, x1, register_operand (x0));
2768 /* Branch over the next instruction if x0 == x1. */
2769 p += emit_bcond (p, EQ, 8);
2770 /* The NOP instruction will be patched with an unconditional branch. */
2771 if (offset_p)
2772 *offset_p = (p - buf) * 4;
2773 if (size_p)
2774 *size_p = 4;
2775 p += emit_nop (p);
2776
2777 emit_ops_insns (buf, p - buf);
2778 }
2779
2780 /* Implementation of emit_ops method "emit_lt_goto". */
2781
2782 static void
2783 aarch64_emit_lt_goto (int *offset_p, int *size_p)
2784 {
2785 uint32_t buf[16];
2786 uint32_t *p = buf;
2787
2788 p += emit_pop (p, x1);
2789 p += emit_cmp (p, x1, register_operand (x0));
2790 /* Branch over the next instruction if x0 >= x1. */
2791 p += emit_bcond (p, GE, 8);
2792 /* The NOP instruction will be patched with an unconditional branch. */
2793 if (offset_p)
2794 *offset_p = (p - buf) * 4;
2795 if (size_p)
2796 *size_p = 4;
2797 p += emit_nop (p);
2798
2799 emit_ops_insns (buf, p - buf);
2800 }
2801
2802 /* Implementation of emit_ops method "emit_le_goto". */
2803
2804 static void
2805 aarch64_emit_le_goto (int *offset_p, int *size_p)
2806 {
2807 uint32_t buf[16];
2808 uint32_t *p = buf;
2809
2810 p += emit_pop (p, x1);
2811 p += emit_cmp (p, x1, register_operand (x0));
2812 /* Branch over the next instruction if x0 > x1. */
2813 p += emit_bcond (p, GT, 8);
2814 /* The NOP instruction will be patched with an unconditional branch. */
2815 if (offset_p)
2816 *offset_p = (p - buf) * 4;
2817 if (size_p)
2818 *size_p = 4;
2819 p += emit_nop (p);
2820
2821 emit_ops_insns (buf, p - buf);
2822 }
2823
2824 /* Implementation of emit_ops method "emit_gt_goto". */
2825
2826 static void
2827 aarch64_emit_gt_goto (int *offset_p, int *size_p)
2828 {
2829 uint32_t buf[16];
2830 uint32_t *p = buf;
2831
2832 p += emit_pop (p, x1);
2833 p += emit_cmp (p, x1, register_operand (x0));
2834 /* Branch over the next instruction if x0 <= x1. */
2835 p += emit_bcond (p, LE, 8);
2836 /* The NOP instruction will be patched with an unconditional branch. */
2837 if (offset_p)
2838 *offset_p = (p - buf) * 4;
2839 if (size_p)
2840 *size_p = 4;
2841 p += emit_nop (p);
2842
2843 emit_ops_insns (buf, p - buf);
2844 }
2845
2846 /* Implementation of emit_ops method "emit_ge_got". */
2847
2848 static void
2849 aarch64_emit_ge_got (int *offset_p, int *size_p)
2850 {
2851 uint32_t buf[16];
2852 uint32_t *p = buf;
2853
2854 p += emit_pop (p, x1);
2855 p += emit_cmp (p, x1, register_operand (x0));
2856 /* Branch over the next instruction if x0 <= x1. */
2857 p += emit_bcond (p, LT, 8);
2858 /* The NOP instruction will be patched with an unconditional branch. */
2859 if (offset_p)
2860 *offset_p = (p - buf) * 4;
2861 if (size_p)
2862 *size_p = 4;
2863 p += emit_nop (p);
2864
2865 emit_ops_insns (buf, p - buf);
2866 }
2867
2868 static struct emit_ops aarch64_emit_ops_impl =
2869 {
2870 aarch64_emit_prologue,
2871 aarch64_emit_epilogue,
2872 aarch64_emit_add,
2873 aarch64_emit_sub,
2874 aarch64_emit_mul,
2875 aarch64_emit_lsh,
2876 aarch64_emit_rsh_signed,
2877 aarch64_emit_rsh_unsigned,
2878 aarch64_emit_ext,
2879 aarch64_emit_log_not,
2880 aarch64_emit_bit_and,
2881 aarch64_emit_bit_or,
2882 aarch64_emit_bit_xor,
2883 aarch64_emit_bit_not,
2884 aarch64_emit_equal,
2885 aarch64_emit_less_signed,
2886 aarch64_emit_less_unsigned,
2887 aarch64_emit_ref,
2888 aarch64_emit_if_goto,
2889 aarch64_emit_goto,
2890 aarch64_write_goto_address,
2891 aarch64_emit_const,
2892 aarch64_emit_call,
2893 aarch64_emit_reg,
2894 aarch64_emit_pop,
2895 aarch64_emit_stack_flush,
2896 aarch64_emit_zero_ext,
2897 aarch64_emit_swap,
2898 aarch64_emit_stack_adjust,
2899 aarch64_emit_int_call_1,
2900 aarch64_emit_void_call_2,
2901 aarch64_emit_eq_goto,
2902 aarch64_emit_ne_goto,
2903 aarch64_emit_lt_goto,
2904 aarch64_emit_le_goto,
2905 aarch64_emit_gt_goto,
2906 aarch64_emit_ge_got,
2907 };
2908
2909 /* Implementation of linux_target_ops method "emit_ops". */
2910
2911 static struct emit_ops *
2912 aarch64_emit_ops (void)
2913 {
2914 return &aarch64_emit_ops_impl;
2915 }
2916
2917 /* Implementation of linux_target_ops method
2918 "get_min_fast_tracepoint_insn_len". */
2919
2920 static int
2921 aarch64_get_min_fast_tracepoint_insn_len (void)
2922 {
2923 return 4;
2924 }
2925
2926 /* Implementation of linux_target_ops method "supports_range_stepping". */
2927
2928 static int
2929 aarch64_supports_range_stepping (void)
2930 {
2931 return 1;
2932 }
2933
2934 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2935
2936 static const gdb_byte *
2937 aarch64_sw_breakpoint_from_kind (int kind, int *size)
2938 {
2939 *size = aarch64_breakpoint_len;
2940 return aarch64_breakpoint;
2941 }
2942
2943 struct linux_target_ops the_low_target =
2944 {
2945 aarch64_arch_setup,
2946 aarch64_regs_info,
2947 aarch64_cannot_fetch_register,
2948 aarch64_cannot_store_register,
2949 NULL, /* fetch_register */
2950 aarch64_get_pc,
2951 aarch64_set_pc,
2952 NULL, /* breakpoint_kind_from_pc */
2953 aarch64_sw_breakpoint_from_kind,
2954 NULL, /* breakpoint_reinsert_addr */
2955 0, /* decr_pc_after_break */
2956 aarch64_breakpoint_at,
2957 aarch64_supports_z_point_type,
2958 aarch64_insert_point,
2959 aarch64_remove_point,
2960 aarch64_stopped_by_watchpoint,
2961 aarch64_stopped_data_address,
2962 NULL, /* collect_ptrace_register */
2963 NULL, /* supply_ptrace_register */
2964 aarch64_linux_siginfo_fixup,
2965 aarch64_linux_new_process,
2966 aarch64_linux_new_thread,
2967 aarch64_linux_new_fork,
2968 aarch64_linux_prepare_to_resume,
2969 NULL, /* process_qsupported */
2970 aarch64_supports_tracepoints,
2971 aarch64_get_thread_area,
2972 aarch64_install_fast_tracepoint_jump_pad,
2973 aarch64_emit_ops,
2974 aarch64_get_min_fast_tracepoint_insn_len,
2975 aarch64_supports_range_stepping,
2976 };
2977
2978 void
2979 initialize_low_arch (void)
2980 {
2981 init_registers_aarch64 ();
2982
2983 initialize_low_arch_aarch32 ();
2984
2985 initialize_regsets_info (&aarch64_regsets_info);
2986 }