1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2016 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
75 #include "nat/gdb_ptrace.h"
78 #ifndef PTRACE_GET_THREAD_AREA
79 #define PTRACE_GET_THREAD_AREA 25
82 /* This definition comes from prctl.h, but some kernels may not have it. */
83 #ifndef PTRACE_ARCH_PRCTL
84 #define PTRACE_ARCH_PRCTL 30
87 /* The following definitions come from prctl.h, but may be absent
88 for certain configurations. */
90 #define ARCH_SET_GS 0x1001
91 #define ARCH_SET_FS 0x1002
92 #define ARCH_GET_FS 0x1003
93 #define ARCH_GET_GS 0x1004
96 /* Per-process arch-specific data we want to keep. */
98 struct arch_process_info
100 struct x86_debug_reg_state debug_reg_state
;
105 /* Mapping between the general-purpose registers in `struct user'
106 format and GDB's register array layout.
107 Note that the transfer layout uses 64-bit regs. */
108 static /*const*/ int i386_regmap
[] =
110 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
111 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
112 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
113 DS
* 8, ES
* 8, FS
* 8, GS
* 8
116 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
118 /* So code below doesn't have to care, i386 or amd64. */
119 #define ORIG_EAX ORIG_RAX
122 static const int x86_64_regmap
[] =
124 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
125 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
126 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
127 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
128 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
129 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
130 -1, -1, -1, -1, -1, -1, -1, -1,
131 -1, -1, -1, -1, -1, -1, -1, -1,
132 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
137 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
138 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
139 -1, -1, -1, -1, -1, -1, -1, -1,
140 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
141 -1, -1, -1, -1, -1, -1, -1, -1,
142 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
143 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
144 -1, -1, -1, -1, -1, -1, -1, -1,
145 -1, -1, -1, -1, -1, -1, -1, -1,
146 -1, -1, -1, -1, -1, -1, -1, -1
149 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
150 #define X86_64_USER_REGS (GS + 1)
152 #else /* ! __x86_64__ */
154 /* Mapping between the general-purpose registers in `struct user'
155 format and GDB's register array layout. */
156 static /*const*/ int i386_regmap
[] =
158 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
159 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
160 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
161 DS
* 4, ES
* 4, FS
* 4, GS
* 4
164 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
172 /* Returns true if the current inferior belongs to a x86-64 process,
176 is_64bit_tdesc (void)
178 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
180 return register_size (regcache
->tdesc
, 0) == 8;
186 /* Called by libthread_db. */
189 ps_get_thread_area (const struct ps_prochandle
*ph
,
190 lwpid_t lwpid
, int idx
, void **base
)
193 int use_64bit
= is_64bit_tdesc ();
200 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
204 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
215 unsigned int desc
[4];
217 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
218 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
221 /* Ensure we properly extend the value to 64-bits for x86_64. */
222 *base
= (void *) (uintptr_t) desc
[1];
227 /* Get the thread area address. This is used to recognize which
228 thread is which when tracing with the in-process agent library. We
229 don't read anything from the address, and treat it as opaque; it's
230 the address itself that we assume is unique per-thread. */
233 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
236 int use_64bit
= is_64bit_tdesc ();
241 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
243 *addr
= (CORE_ADDR
) (uintptr_t) base
;
252 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
253 struct thread_info
*thr
= get_lwp_thread (lwp
);
254 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
255 unsigned int desc
[4];
257 const int reg_thread_area
= 3; /* bits to scale down register value. */
260 collect_register_by_name (regcache
, "gs", &gs
);
262 idx
= gs
>> reg_thread_area
;
264 if (ptrace (PTRACE_GET_THREAD_AREA
,
266 (void *) (long) idx
, (unsigned long) &desc
) < 0)
277 x86_cannot_store_register (int regno
)
280 if (is_64bit_tdesc ())
284 return regno
>= I386_NUM_REGS
;
288 x86_cannot_fetch_register (int regno
)
291 if (is_64bit_tdesc ())
295 return regno
>= I386_NUM_REGS
;
299 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
304 if (register_size (regcache
->tdesc
, 0) == 8)
306 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
307 if (x86_64_regmap
[i
] != -1)
308 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
312 /* 32-bit inferior registers need to be zero-extended.
313 Callers would read uninitialized memory otherwise. */
314 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
317 for (i
= 0; i
< I386_NUM_REGS
; i
++)
318 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
320 collect_register_by_name (regcache
, "orig_eax",
321 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
325 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
330 if (register_size (regcache
->tdesc
, 0) == 8)
332 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
333 if (x86_64_regmap
[i
] != -1)
334 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
339 for (i
= 0; i
< I386_NUM_REGS
; i
++)
340 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
342 supply_register_by_name (regcache
, "orig_eax",
343 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
347 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
350 i387_cache_to_fxsave (regcache
, buf
);
352 i387_cache_to_fsave (regcache
, buf
);
357 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
360 i387_fxsave_to_cache (regcache
, buf
);
362 i387_fsave_to_cache (regcache
, buf
);
369 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
371 i387_cache_to_fxsave (regcache
, buf
);
375 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
377 i387_fxsave_to_cache (regcache
, buf
);
383 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
385 i387_cache_to_xsave (regcache
, buf
);
389 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
391 i387_xsave_to_cache (regcache
, buf
);
394 /* ??? The non-biarch i386 case stores all the i387 regs twice.
395 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
396 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
397 doesn't work. IWBN to avoid the duplication in the case where it
398 does work. Maybe the arch_setup routine could check whether it works
399 and update the supported regsets accordingly. */
401 static struct regset_info x86_regsets
[] =
403 #ifdef HAVE_PTRACE_GETREGS
404 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
406 x86_fill_gregset
, x86_store_gregset
},
407 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
408 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
410 # ifdef HAVE_PTRACE_GETFPXREGS
411 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
413 x86_fill_fpxregset
, x86_store_fpxregset
},
416 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
418 x86_fill_fpregset
, x86_store_fpregset
},
419 #endif /* HAVE_PTRACE_GETREGS */
424 x86_get_pc (struct regcache
*regcache
)
426 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
432 collect_register_by_name (regcache
, "rip", &pc
);
433 return (CORE_ADDR
) pc
;
439 collect_register_by_name (regcache
, "eip", &pc
);
440 return (CORE_ADDR
) pc
;
445 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
447 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
453 supply_register_by_name (regcache
, "rip", &newpc
);
459 supply_register_by_name (regcache
, "eip", &newpc
);
463 static const gdb_byte x86_breakpoint
[] = { 0xCC };
464 #define x86_breakpoint_len 1
467 x86_breakpoint_at (CORE_ADDR pc
)
471 (*the_target
->read_memory
) (pc
, &c
, 1);
478 /* Low-level function vector. */
479 struct x86_dr_low_type x86_dr_low
=
481 x86_linux_dr_set_control
,
482 x86_linux_dr_set_addr
,
483 x86_linux_dr_get_addr
,
484 x86_linux_dr_get_status
,
485 x86_linux_dr_get_control
,
489 /* Breakpoint/Watchpoint support. */
492 x86_supports_z_point_type (char z_type
)
498 case Z_PACKET_WRITE_WP
:
499 case Z_PACKET_ACCESS_WP
:
507 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
508 int size
, struct raw_breakpoint
*bp
)
510 struct process_info
*proc
= current_process ();
514 case raw_bkpt_type_hw
:
515 case raw_bkpt_type_write_wp
:
516 case raw_bkpt_type_access_wp
:
518 enum target_hw_bp_type hw_type
519 = raw_bkpt_type_to_target_hw_bp_type (type
);
520 struct x86_debug_reg_state
*state
521 = &proc
->priv
->arch_private
->debug_reg_state
;
523 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
533 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
534 int size
, struct raw_breakpoint
*bp
)
536 struct process_info
*proc
= current_process ();
540 case raw_bkpt_type_hw
:
541 case raw_bkpt_type_write_wp
:
542 case raw_bkpt_type_access_wp
:
544 enum target_hw_bp_type hw_type
545 = raw_bkpt_type_to_target_hw_bp_type (type
);
546 struct x86_debug_reg_state
*state
547 = &proc
->priv
->arch_private
->debug_reg_state
;
549 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
558 x86_stopped_by_watchpoint (void)
560 struct process_info
*proc
= current_process ();
561 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
565 x86_stopped_data_address (void)
567 struct process_info
*proc
= current_process ();
569 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
575 /* Called when a new process is created. */
577 static struct arch_process_info
*
578 x86_linux_new_process (void)
580 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
582 x86_low_init_dregs (&info
->debug_reg_state
);
587 /* Target routine for linux_new_fork. */
590 x86_linux_new_fork (struct process_info
*parent
, struct process_info
*child
)
592 /* These are allocated by linux_add_process. */
593 gdb_assert (parent
->priv
!= NULL
594 && parent
->priv
->arch_private
!= NULL
);
595 gdb_assert (child
->priv
!= NULL
596 && child
->priv
->arch_private
!= NULL
);
598 /* Linux kernel before 2.6.33 commit
599 72f674d203cd230426437cdcf7dd6f681dad8b0d
600 will inherit hardware debug registers from parent
601 on fork/vfork/clone. Newer Linux kernels create such tasks with
602 zeroed debug registers.
604 GDB core assumes the child inherits the watchpoints/hw
605 breakpoints of the parent, and will remove them all from the
606 forked off process. Copy the debug registers mirrors into the
607 new process so that all breakpoints and watchpoints can be
608 removed together. The debug registers mirror will become zeroed
609 in the end before detaching the forked off process, thus making
610 this compatible with older Linux kernels too. */
612 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
615 /* See nat/x86-dregs.h. */
617 struct x86_debug_reg_state
*
618 x86_debug_reg_state (pid_t pid
)
620 struct process_info
*proc
= find_process_pid (pid
);
622 return &proc
->priv
->arch_private
->debug_reg_state
;
625 /* When GDBSERVER is built as a 64-bit application on linux, the
626 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
627 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
628 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
629 conversion in-place ourselves. */
631 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
632 layout of the inferiors' architecture. Returns true if any
633 conversion was done; false otherwise. If DIRECTION is 1, then copy
634 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
638 x86_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
641 unsigned int machine
;
642 int tid
= lwpid_of (current_thread
);
643 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
645 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
646 if (!is_64bit_tdesc ())
647 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
649 /* No fixup for native x32 GDB. */
650 else if (!is_elf64
&& sizeof (void *) == 8)
651 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
660 /* Format of XSAVE extended state is:
664 sw_usable_bytes[464..511]
665 xstate_hdr_bytes[512..575]
670 Same memory layout will be used for the coredump NT_X86_XSTATE
671 representing the XSAVE extended state registers.
673 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
674 extended state mask, which is the same as the extended control register
675 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
676 together with the mask saved in the xstate_hdr_bytes to determine what
677 states the processor/OS supports and what state, used or initialized,
678 the process/thread is in. */
679 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
681 /* Does the current host support the GETFPXREGS request? The header
682 file may or may not define it, and even if it is defined, the
683 kernel will return EIO if it's running on a pre-SSE processor. */
684 int have_ptrace_getfpxregs
=
685 #ifdef HAVE_PTRACE_GETFPXREGS
692 /* Get Linux/x86 target description from running target. */
694 static const struct target_desc
*
695 x86_linux_read_description (void)
697 unsigned int machine
;
701 static uint64_t xcr0
;
702 struct regset_info
*regset
;
704 tid
= lwpid_of (current_thread
);
706 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
708 if (sizeof (void *) == 4)
711 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
713 else if (machine
== EM_X86_64
)
714 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
718 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
719 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
721 elf_fpxregset_t fpxregs
;
723 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
725 have_ptrace_getfpxregs
= 0;
726 have_ptrace_getregset
= 0;
727 return tdesc_i386_mmx_linux
;
730 have_ptrace_getfpxregs
= 1;
736 x86_xcr0
= X86_XSTATE_SSE_MASK
;
740 if (machine
== EM_X86_64
)
741 return tdesc_amd64_linux_no_xml
;
744 return tdesc_i386_linux_no_xml
;
747 if (have_ptrace_getregset
== -1)
749 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
752 iov
.iov_base
= xstateregs
;
753 iov
.iov_len
= sizeof (xstateregs
);
755 /* Check if PTRACE_GETREGSET works. */
756 if (ptrace (PTRACE_GETREGSET
, tid
,
757 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
758 have_ptrace_getregset
= 0;
761 have_ptrace_getregset
= 1;
763 /* Get XCR0 from XSAVE extended state. */
764 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
765 / sizeof (uint64_t))];
767 /* Use PTRACE_GETREGSET if it is available. */
768 for (regset
= x86_regsets
;
769 regset
->fill_function
!= NULL
; regset
++)
770 if (regset
->get_request
== PTRACE_GETREGSET
)
771 regset
->size
= X86_XSTATE_SIZE (xcr0
);
772 else if (regset
->type
!= GENERAL_REGS
)
777 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
778 xcr0_features
= (have_ptrace_getregset
779 && (xcr0
& X86_XSTATE_ALL_MASK
));
784 if (machine
== EM_X86_64
)
791 switch (xcr0
& X86_XSTATE_ALL_MASK
)
793 case X86_XSTATE_AVX512_MASK
:
794 return tdesc_amd64_avx512_linux
;
796 case X86_XSTATE_AVX_MPX_MASK
:
797 return tdesc_amd64_avx_mpx_linux
;
799 case X86_XSTATE_MPX_MASK
:
800 return tdesc_amd64_mpx_linux
;
802 case X86_XSTATE_AVX_MASK
:
803 return tdesc_amd64_avx_linux
;
806 return tdesc_amd64_linux
;
810 return tdesc_amd64_linux
;
816 switch (xcr0
& X86_XSTATE_ALL_MASK
)
818 case X86_XSTATE_AVX512_MASK
:
819 return tdesc_x32_avx512_linux
;
821 case X86_XSTATE_MPX_MASK
: /* No MPX on x32. */
822 case X86_XSTATE_AVX_MASK
:
823 return tdesc_x32_avx_linux
;
826 return tdesc_x32_linux
;
830 return tdesc_x32_linux
;
838 switch (xcr0
& X86_XSTATE_ALL_MASK
)
840 case (X86_XSTATE_AVX512_MASK
):
841 return tdesc_i386_avx512_linux
;
843 case (X86_XSTATE_MPX_MASK
):
844 return tdesc_i386_mpx_linux
;
846 case (X86_XSTATE_AVX_MPX_MASK
):
847 return tdesc_i386_avx_mpx_linux
;
849 case (X86_XSTATE_AVX_MASK
):
850 return tdesc_i386_avx_linux
;
853 return tdesc_i386_linux
;
857 return tdesc_i386_linux
;
860 gdb_assert_not_reached ("failed to return tdesc");
863 /* Callback for find_inferior. Stops iteration when a thread with a
864 given PID is found. */
867 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
869 int pid
= *(int *) data
;
871 return (ptid_get_pid (entry
->id
) == pid
);
874 /* Callback for for_each_inferior. Calls the arch_setup routine for
878 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
880 int pid
= ptid_get_pid (entry
->id
);
882 /* Look up any thread of this processes. */
884 = (struct thread_info
*) find_inferior (&all_threads
,
885 same_process_callback
, &pid
);
887 the_low_target
.arch_setup ();
890 /* Update all the target description of all processes; a new GDB
891 connected, and it may or not support xml target descriptions. */
894 x86_linux_update_xmltarget (void)
896 struct thread_info
*saved_thread
= current_thread
;
898 /* Before changing the register cache's internal layout, flush the
899 contents of the current valid caches back to the threads, and
900 release the current regcache objects. */
903 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
905 current_thread
= saved_thread
;
908 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
912 x86_linux_process_qsupported (char **features
, int count
)
916 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
917 with "i386" in qSupported query, it supports x86 XML target
920 for (i
= 0; i
< count
; i
++)
922 const char *feature
= features
[i
];
924 if (startswith (feature
, "xmlRegisters="))
926 char *copy
= xstrdup (feature
+ 13);
929 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
931 if (strcmp (p
, "i386") == 0)
941 x86_linux_update_xmltarget ();
944 /* Common for x86/x86-64. */
946 static struct regsets_info x86_regsets_info
=
948 x86_regsets
, /* regsets */
950 NULL
, /* disabled_regsets */
954 static struct regs_info amd64_linux_regs_info
=
956 NULL
, /* regset_bitmap */
957 NULL
, /* usrregs_info */
961 static struct usrregs_info i386_linux_usrregs_info
=
967 static struct regs_info i386_linux_regs_info
=
969 NULL
, /* regset_bitmap */
970 &i386_linux_usrregs_info
,
974 const struct regs_info
*
975 x86_linux_regs_info (void)
978 if (is_64bit_tdesc ())
979 return &amd64_linux_regs_info
;
982 return &i386_linux_regs_info
;
985 /* Initialize the target description for the architecture of the
989 x86_arch_setup (void)
991 current_process ()->tdesc
= x86_linux_read_description ();
994 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
995 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
998 x86_get_syscall_trapinfo (struct regcache
*regcache
, int *sysno
)
1000 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1006 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1007 *sysno
= (int) l_sysno
;
1010 collect_register_by_name (regcache
, "orig_eax", sysno
);
1014 x86_supports_tracepoints (void)
1020 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1022 write_inferior_memory (*to
, buf
, len
);
1027 push_opcode (unsigned char *buf
, char *op
)
1029 unsigned char *buf_org
= buf
;
1034 unsigned long ul
= strtoul (op
, &endptr
, 16);
1043 return buf
- buf_org
;
1048 /* Build a jump pad that saves registers and calls a collection
1049 function. Writes a jump instruction to the jump pad to
1050 JJUMPAD_INSN. The caller is responsible to write it in at the
1051 tracepoint address. */
1054 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1055 CORE_ADDR collector
,
1058 CORE_ADDR
*jump_entry
,
1059 CORE_ADDR
*trampoline
,
1060 ULONGEST
*trampoline_size
,
1061 unsigned char *jjump_pad_insn
,
1062 ULONGEST
*jjump_pad_insn_size
,
1063 CORE_ADDR
*adjusted_insn_addr
,
1064 CORE_ADDR
*adjusted_insn_addr_end
,
1067 unsigned char buf
[40];
1071 CORE_ADDR buildaddr
= *jump_entry
;
1073 /* Build the jump pad. */
1075 /* First, do tracepoint data collection. Save registers. */
1077 /* Need to ensure stack pointer saved first. */
1078 buf
[i
++] = 0x54; /* push %rsp */
1079 buf
[i
++] = 0x55; /* push %rbp */
1080 buf
[i
++] = 0x57; /* push %rdi */
1081 buf
[i
++] = 0x56; /* push %rsi */
1082 buf
[i
++] = 0x52; /* push %rdx */
1083 buf
[i
++] = 0x51; /* push %rcx */
1084 buf
[i
++] = 0x53; /* push %rbx */
1085 buf
[i
++] = 0x50; /* push %rax */
1086 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1087 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1088 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1089 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1090 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1091 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1092 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1093 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1094 buf
[i
++] = 0x9c; /* pushfq */
1095 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1097 memcpy (buf
+ i
, &tpaddr
, 8);
1099 buf
[i
++] = 0x57; /* push %rdi */
1100 append_insns (&buildaddr
, i
, buf
);
1102 /* Stack space for the collecting_t object. */
1104 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1105 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1106 memcpy (buf
+ i
, &tpoint
, 8);
1108 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1109 i
+= push_opcode (&buf
[i
],
1110 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1111 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1112 append_insns (&buildaddr
, i
, buf
);
1116 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1117 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1119 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1120 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1121 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1122 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1123 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1124 append_insns (&buildaddr
, i
, buf
);
1126 /* Set up the gdb_collect call. */
1127 /* At this point, (stack pointer + 0x18) is the base of our saved
1131 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1132 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1134 /* tpoint address may be 64-bit wide. */
1135 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1136 memcpy (buf
+ i
, &tpoint
, 8);
1138 append_insns (&buildaddr
, i
, buf
);
1140 /* The collector function being in the shared library, may be
1141 >31-bits away off the jump pad. */
1143 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1144 memcpy (buf
+ i
, &collector
, 8);
1146 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1147 append_insns (&buildaddr
, i
, buf
);
1149 /* Clear the spin-lock. */
1151 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1152 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1153 memcpy (buf
+ i
, &lockaddr
, 8);
1155 append_insns (&buildaddr
, i
, buf
);
1157 /* Remove stack that had been used for the collect_t object. */
1159 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1160 append_insns (&buildaddr
, i
, buf
);
1162 /* Restore register state. */
1164 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1168 buf
[i
++] = 0x9d; /* popfq */
1169 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1170 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1171 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1172 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1173 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1174 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1175 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1176 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1177 buf
[i
++] = 0x58; /* pop %rax */
1178 buf
[i
++] = 0x5b; /* pop %rbx */
1179 buf
[i
++] = 0x59; /* pop %rcx */
1180 buf
[i
++] = 0x5a; /* pop %rdx */
1181 buf
[i
++] = 0x5e; /* pop %rsi */
1182 buf
[i
++] = 0x5f; /* pop %rdi */
1183 buf
[i
++] = 0x5d; /* pop %rbp */
1184 buf
[i
++] = 0x5c; /* pop %rsp */
1185 append_insns (&buildaddr
, i
, buf
);
1187 /* Now, adjust the original instruction to execute in the jump
1189 *adjusted_insn_addr
= buildaddr
;
1190 relocate_instruction (&buildaddr
, tpaddr
);
1191 *adjusted_insn_addr_end
= buildaddr
;
1193 /* Finally, write a jump back to the program. */
1195 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1196 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1199 "E.Jump back from jump pad too far from tracepoint "
1200 "(offset 0x%" PRIx64
" > int32).", loffset
);
1204 offset
= (int) loffset
;
1205 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1206 memcpy (buf
+ 1, &offset
, 4);
1207 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1209 /* The jump pad is now built. Wire in a jump to our jump pad. This
1210 is always done last (by our caller actually), so that we can
1211 install fast tracepoints with threads running. This relies on
1212 the agent's atomic write support. */
1213 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1214 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1217 "E.Jump pad too far from tracepoint "
1218 "(offset 0x%" PRIx64
" > int32).", loffset
);
1222 offset
= (int) loffset
;
1224 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1225 memcpy (buf
+ 1, &offset
, 4);
1226 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1227 *jjump_pad_insn_size
= sizeof (jump_insn
);
1229 /* Return the end address of our pad. */
1230 *jump_entry
= buildaddr
;
1235 #endif /* __x86_64__ */
1237 /* Build a jump pad that saves registers and calls a collection
1238 function. Writes a jump instruction to the jump pad to
1239 JJUMPAD_INSN. The caller is responsible to write it in at the
1240 tracepoint address. */
1243 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1244 CORE_ADDR collector
,
1247 CORE_ADDR
*jump_entry
,
1248 CORE_ADDR
*trampoline
,
1249 ULONGEST
*trampoline_size
,
1250 unsigned char *jjump_pad_insn
,
1251 ULONGEST
*jjump_pad_insn_size
,
1252 CORE_ADDR
*adjusted_insn_addr
,
1253 CORE_ADDR
*adjusted_insn_addr_end
,
1256 unsigned char buf
[0x100];
1258 CORE_ADDR buildaddr
= *jump_entry
;
1260 /* Build the jump pad. */
1262 /* First, do tracepoint data collection. Save registers. */
1264 buf
[i
++] = 0x60; /* pushad */
1265 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1266 *((int *)(buf
+ i
)) = (int) tpaddr
;
1268 buf
[i
++] = 0x9c; /* pushf */
1269 buf
[i
++] = 0x1e; /* push %ds */
1270 buf
[i
++] = 0x06; /* push %es */
1271 buf
[i
++] = 0x0f; /* push %fs */
1273 buf
[i
++] = 0x0f; /* push %gs */
1275 buf
[i
++] = 0x16; /* push %ss */
1276 buf
[i
++] = 0x0e; /* push %cs */
1277 append_insns (&buildaddr
, i
, buf
);
1279 /* Stack space for the collecting_t object. */
1281 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1283 /* Build the object. */
1284 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1285 memcpy (buf
+ i
, &tpoint
, 4);
1287 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1289 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1290 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1291 append_insns (&buildaddr
, i
, buf
);
1293 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1294 If we cared for it, this could be using xchg alternatively. */
1297 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1298 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1300 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1302 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1303 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1304 append_insns (&buildaddr
, i
, buf
);
1307 /* Set up arguments to the gdb_collect call. */
1309 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1310 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1311 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1312 append_insns (&buildaddr
, i
, buf
);
1315 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1316 append_insns (&buildaddr
, i
, buf
);
1319 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1320 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1322 append_insns (&buildaddr
, i
, buf
);
1324 buf
[0] = 0xe8; /* call <reladdr> */
1325 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1326 memcpy (buf
+ 1, &offset
, 4);
1327 append_insns (&buildaddr
, 5, buf
);
1328 /* Clean up after the call. */
1329 buf
[0] = 0x83; /* add $0x8,%esp */
1332 append_insns (&buildaddr
, 3, buf
);
1335 /* Clear the spin-lock. This would need the LOCK prefix on older
1338 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1339 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1340 memcpy (buf
+ i
, &lockaddr
, 4);
1342 append_insns (&buildaddr
, i
, buf
);
1345 /* Remove stack that had been used for the collect_t object. */
1347 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1348 append_insns (&buildaddr
, i
, buf
);
1351 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1354 buf
[i
++] = 0x17; /* pop %ss */
1355 buf
[i
++] = 0x0f; /* pop %gs */
1357 buf
[i
++] = 0x0f; /* pop %fs */
1359 buf
[i
++] = 0x07; /* pop %es */
1360 buf
[i
++] = 0x1f; /* pop %ds */
1361 buf
[i
++] = 0x9d; /* popf */
1362 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1365 buf
[i
++] = 0x61; /* popad */
1366 append_insns (&buildaddr
, i
, buf
);
1368 /* Now, adjust the original instruction to execute in the jump
1370 *adjusted_insn_addr
= buildaddr
;
1371 relocate_instruction (&buildaddr
, tpaddr
);
1372 *adjusted_insn_addr_end
= buildaddr
;
1374 /* Write the jump back to the program. */
1375 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1376 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1377 memcpy (buf
+ 1, &offset
, 4);
1378 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1380 /* The jump pad is now built. Wire in a jump to our jump pad. This
1381 is always done last (by our caller actually), so that we can
1382 install fast tracepoints with threads running. This relies on
1383 the agent's atomic write support. */
1386 /* Create a trampoline. */
1387 *trampoline_size
= sizeof (jump_insn
);
1388 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1390 /* No trampoline space available. */
1392 "E.Cannot allocate trampoline space needed for fast "
1393 "tracepoints on 4-byte instructions.");
1397 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1398 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1399 memcpy (buf
+ 1, &offset
, 4);
1400 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1402 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1403 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1404 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1405 memcpy (buf
+ 2, &offset
, 2);
1406 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1407 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1411 /* Else use a 32-bit relative jump instruction. */
1412 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1413 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1414 memcpy (buf
+ 1, &offset
, 4);
1415 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1416 *jjump_pad_insn_size
= sizeof (jump_insn
);
1419 /* Return the end address of our pad. */
1420 *jump_entry
= buildaddr
;
1426 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1427 CORE_ADDR collector
,
1430 CORE_ADDR
*jump_entry
,
1431 CORE_ADDR
*trampoline
,
1432 ULONGEST
*trampoline_size
,
1433 unsigned char *jjump_pad_insn
,
1434 ULONGEST
*jjump_pad_insn_size
,
1435 CORE_ADDR
*adjusted_insn_addr
,
1436 CORE_ADDR
*adjusted_insn_addr_end
,
1440 if (is_64bit_tdesc ())
1441 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1442 collector
, lockaddr
,
1443 orig_size
, jump_entry
,
1444 trampoline
, trampoline_size
,
1446 jjump_pad_insn_size
,
1448 adjusted_insn_addr_end
,
1452 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1453 collector
, lockaddr
,
1454 orig_size
, jump_entry
,
1455 trampoline
, trampoline_size
,
1457 jjump_pad_insn_size
,
1459 adjusted_insn_addr_end
,
1463 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1467 x86_get_min_fast_tracepoint_insn_len (void)
1469 static int warned_about_fast_tracepoints
= 0;
1472 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1473 used for fast tracepoints. */
1474 if (is_64bit_tdesc ())
1478 if (agent_loaded_p ())
1480 char errbuf
[IPA_BUFSIZ
];
1484 /* On x86, if trampolines are available, then 4-byte jump instructions
1485 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1486 with a 4-byte offset are used instead. */
1487 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1491 /* GDB has no channel to explain to user why a shorter fast
1492 tracepoint is not possible, but at least make GDBserver
1493 mention that something has gone awry. */
1494 if (!warned_about_fast_tracepoints
)
1496 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1497 warned_about_fast_tracepoints
= 1;
1504 /* Indicate that the minimum length is currently unknown since the IPA
1505 has not loaded yet. */
1511 add_insns (unsigned char *start
, int len
)
1513 CORE_ADDR buildaddr
= current_insn_ptr
;
1516 debug_printf ("Adding %d bytes of insn at %s\n",
1517 len
, paddress (buildaddr
));
1519 append_insns (&buildaddr
, len
, start
);
1520 current_insn_ptr
= buildaddr
;
1523 /* Our general strategy for emitting code is to avoid specifying raw
1524 bytes whenever possible, and instead copy a block of inline asm
1525 that is embedded in the function. This is a little messy, because
1526 we need to keep the compiler from discarding what looks like dead
1527 code, plus suppress various warnings. */
1529 #define EMIT_ASM(NAME, INSNS) \
1532 extern unsigned char start_ ## NAME, end_ ## NAME; \
1533 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1534 __asm__ ("jmp end_" #NAME "\n" \
1535 "\t" "start_" #NAME ":" \
1537 "\t" "end_" #NAME ":"); \
1542 #define EMIT_ASM32(NAME,INSNS) \
1545 extern unsigned char start_ ## NAME, end_ ## NAME; \
1546 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1547 __asm__ (".code32\n" \
1548 "\t" "jmp end_" #NAME "\n" \
1549 "\t" "start_" #NAME ":\n" \
1551 "\t" "end_" #NAME ":\n" \
1557 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1564 amd64_emit_prologue (void)
1566 EMIT_ASM (amd64_prologue
,
1568 "movq %rsp,%rbp\n\t"
1569 "sub $0x20,%rsp\n\t"
1570 "movq %rdi,-8(%rbp)\n\t"
1571 "movq %rsi,-16(%rbp)");
1576 amd64_emit_epilogue (void)
1578 EMIT_ASM (amd64_epilogue
,
1579 "movq -16(%rbp),%rdi\n\t"
1580 "movq %rax,(%rdi)\n\t"
1587 amd64_emit_add (void)
1589 EMIT_ASM (amd64_add
,
1590 "add (%rsp),%rax\n\t"
1591 "lea 0x8(%rsp),%rsp");
1595 amd64_emit_sub (void)
1597 EMIT_ASM (amd64_sub
,
1598 "sub %rax,(%rsp)\n\t"
1603 amd64_emit_mul (void)
1609 amd64_emit_lsh (void)
1615 amd64_emit_rsh_signed (void)
1621 amd64_emit_rsh_unsigned (void)
1627 amd64_emit_ext (int arg
)
1632 EMIT_ASM (amd64_ext_8
,
1638 EMIT_ASM (amd64_ext_16
,
1643 EMIT_ASM (amd64_ext_32
,
1652 amd64_emit_log_not (void)
1654 EMIT_ASM (amd64_log_not
,
1655 "test %rax,%rax\n\t"
1661 amd64_emit_bit_and (void)
1663 EMIT_ASM (amd64_and
,
1664 "and (%rsp),%rax\n\t"
1665 "lea 0x8(%rsp),%rsp");
1669 amd64_emit_bit_or (void)
1672 "or (%rsp),%rax\n\t"
1673 "lea 0x8(%rsp),%rsp");
1677 amd64_emit_bit_xor (void)
1679 EMIT_ASM (amd64_xor
,
1680 "xor (%rsp),%rax\n\t"
1681 "lea 0x8(%rsp),%rsp");
1685 amd64_emit_bit_not (void)
1687 EMIT_ASM (amd64_bit_not
,
1688 "xorq $0xffffffffffffffff,%rax");
1692 amd64_emit_equal (void)
1694 EMIT_ASM (amd64_equal
,
1695 "cmp %rax,(%rsp)\n\t"
1696 "je .Lamd64_equal_true\n\t"
1698 "jmp .Lamd64_equal_end\n\t"
1699 ".Lamd64_equal_true:\n\t"
1701 ".Lamd64_equal_end:\n\t"
1702 "lea 0x8(%rsp),%rsp");
1706 amd64_emit_less_signed (void)
1708 EMIT_ASM (amd64_less_signed
,
1709 "cmp %rax,(%rsp)\n\t"
1710 "jl .Lamd64_less_signed_true\n\t"
1712 "jmp .Lamd64_less_signed_end\n\t"
1713 ".Lamd64_less_signed_true:\n\t"
1715 ".Lamd64_less_signed_end:\n\t"
1716 "lea 0x8(%rsp),%rsp");
1720 amd64_emit_less_unsigned (void)
1722 EMIT_ASM (amd64_less_unsigned
,
1723 "cmp %rax,(%rsp)\n\t"
1724 "jb .Lamd64_less_unsigned_true\n\t"
1726 "jmp .Lamd64_less_unsigned_end\n\t"
1727 ".Lamd64_less_unsigned_true:\n\t"
1729 ".Lamd64_less_unsigned_end:\n\t"
1730 "lea 0x8(%rsp),%rsp");
1734 amd64_emit_ref (int size
)
1739 EMIT_ASM (amd64_ref1
,
1743 EMIT_ASM (amd64_ref2
,
1747 EMIT_ASM (amd64_ref4
,
1748 "movl (%rax),%eax");
1751 EMIT_ASM (amd64_ref8
,
1752 "movq (%rax),%rax");
1758 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1760 EMIT_ASM (amd64_if_goto
,
1764 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1772 amd64_emit_goto (int *offset_p
, int *size_p
)
1774 EMIT_ASM (amd64_goto
,
1775 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1783 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1785 int diff
= (to
- (from
+ size
));
1786 unsigned char buf
[sizeof (int)];
1794 memcpy (buf
, &diff
, sizeof (int));
1795 write_inferior_memory (from
, buf
, sizeof (int));
1799 amd64_emit_const (LONGEST num
)
1801 unsigned char buf
[16];
1803 CORE_ADDR buildaddr
= current_insn_ptr
;
1806 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1807 memcpy (&buf
[i
], &num
, sizeof (num
));
1809 append_insns (&buildaddr
, i
, buf
);
1810 current_insn_ptr
= buildaddr
;
1814 amd64_emit_call (CORE_ADDR fn
)
1816 unsigned char buf
[16];
1818 CORE_ADDR buildaddr
;
1821 /* The destination function being in the shared library, may be
1822 >31-bits away off the compiled code pad. */
1824 buildaddr
= current_insn_ptr
;
1826 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1830 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1832 /* Offset is too large for a call. Use callq, but that requires
1833 a register, so avoid it if possible. Use r10, since it is
1834 call-clobbered, we don't have to push/pop it. */
1835 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1837 memcpy (buf
+ i
, &fn
, 8);
1839 buf
[i
++] = 0xff; /* callq *%r10 */
1844 int offset32
= offset64
; /* we know we can't overflow here. */
1845 memcpy (buf
+ i
, &offset32
, 4);
1849 append_insns (&buildaddr
, i
, buf
);
1850 current_insn_ptr
= buildaddr
;
1854 amd64_emit_reg (int reg
)
1856 unsigned char buf
[16];
1858 CORE_ADDR buildaddr
;
1860 /* Assume raw_regs is still in %rdi. */
1861 buildaddr
= current_insn_ptr
;
1863 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1864 memcpy (&buf
[i
], ®
, sizeof (reg
));
1866 append_insns (&buildaddr
, i
, buf
);
1867 current_insn_ptr
= buildaddr
;
1868 amd64_emit_call (get_raw_reg_func_addr ());
1872 amd64_emit_pop (void)
1874 EMIT_ASM (amd64_pop
,
1879 amd64_emit_stack_flush (void)
1881 EMIT_ASM (amd64_stack_flush
,
1886 amd64_emit_zero_ext (int arg
)
1891 EMIT_ASM (amd64_zero_ext_8
,
1895 EMIT_ASM (amd64_zero_ext_16
,
1896 "and $0xffff,%rax");
1899 EMIT_ASM (amd64_zero_ext_32
,
1900 "mov $0xffffffff,%rcx\n\t"
1909 amd64_emit_swap (void)
1911 EMIT_ASM (amd64_swap
,
1918 amd64_emit_stack_adjust (int n
)
1920 unsigned char buf
[16];
1922 CORE_ADDR buildaddr
= current_insn_ptr
;
1925 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
1929 /* This only handles adjustments up to 16, but we don't expect any more. */
1931 append_insns (&buildaddr
, i
, buf
);
1932 current_insn_ptr
= buildaddr
;
1935 /* FN's prototype is `LONGEST(*fn)(int)'. */
1938 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
1940 unsigned char buf
[16];
1942 CORE_ADDR buildaddr
;
1944 buildaddr
= current_insn_ptr
;
1946 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1947 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1949 append_insns (&buildaddr
, i
, buf
);
1950 current_insn_ptr
= buildaddr
;
1951 amd64_emit_call (fn
);
1954 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1957 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
1959 unsigned char buf
[16];
1961 CORE_ADDR buildaddr
;
1963 buildaddr
= current_insn_ptr
;
1965 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
1966 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
1968 append_insns (&buildaddr
, i
, buf
);
1969 current_insn_ptr
= buildaddr
;
1970 EMIT_ASM (amd64_void_call_2_a
,
1971 /* Save away a copy of the stack top. */
1973 /* Also pass top as the second argument. */
1975 amd64_emit_call (fn
);
1976 EMIT_ASM (amd64_void_call_2_b
,
1977 /* Restore the stack top, %rax may have been trashed. */
1982 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
1985 "cmp %rax,(%rsp)\n\t"
1986 "jne .Lamd64_eq_fallthru\n\t"
1987 "lea 0x8(%rsp),%rsp\n\t"
1989 /* jmp, but don't trust the assembler to choose the right jump */
1990 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1991 ".Lamd64_eq_fallthru:\n\t"
1992 "lea 0x8(%rsp),%rsp\n\t"
2002 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2005 "cmp %rax,(%rsp)\n\t"
2006 "je .Lamd64_ne_fallthru\n\t"
2007 "lea 0x8(%rsp),%rsp\n\t"
2009 /* jmp, but don't trust the assembler to choose the right jump */
2010 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2011 ".Lamd64_ne_fallthru:\n\t"
2012 "lea 0x8(%rsp),%rsp\n\t"
2022 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2025 "cmp %rax,(%rsp)\n\t"
2026 "jnl .Lamd64_lt_fallthru\n\t"
2027 "lea 0x8(%rsp),%rsp\n\t"
2029 /* jmp, but don't trust the assembler to choose the right jump */
2030 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2031 ".Lamd64_lt_fallthru:\n\t"
2032 "lea 0x8(%rsp),%rsp\n\t"
2042 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2045 "cmp %rax,(%rsp)\n\t"
2046 "jnle .Lamd64_le_fallthru\n\t"
2047 "lea 0x8(%rsp),%rsp\n\t"
2049 /* jmp, but don't trust the assembler to choose the right jump */
2050 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2051 ".Lamd64_le_fallthru:\n\t"
2052 "lea 0x8(%rsp),%rsp\n\t"
2062 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2065 "cmp %rax,(%rsp)\n\t"
2066 "jng .Lamd64_gt_fallthru\n\t"
2067 "lea 0x8(%rsp),%rsp\n\t"
2069 /* jmp, but don't trust the assembler to choose the right jump */
2070 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2071 ".Lamd64_gt_fallthru:\n\t"
2072 "lea 0x8(%rsp),%rsp\n\t"
2082 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2085 "cmp %rax,(%rsp)\n\t"
2086 "jnge .Lamd64_ge_fallthru\n\t"
2087 ".Lamd64_ge_jump:\n\t"
2088 "lea 0x8(%rsp),%rsp\n\t"
2090 /* jmp, but don't trust the assembler to choose the right jump */
2091 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2092 ".Lamd64_ge_fallthru:\n\t"
2093 "lea 0x8(%rsp),%rsp\n\t"
2102 struct emit_ops amd64_emit_ops
=
2104 amd64_emit_prologue
,
2105 amd64_emit_epilogue
,
2110 amd64_emit_rsh_signed
,
2111 amd64_emit_rsh_unsigned
,
2119 amd64_emit_less_signed
,
2120 amd64_emit_less_unsigned
,
2124 amd64_write_goto_address
,
2129 amd64_emit_stack_flush
,
2130 amd64_emit_zero_ext
,
2132 amd64_emit_stack_adjust
,
2133 amd64_emit_int_call_1
,
2134 amd64_emit_void_call_2
,
2143 #endif /* __x86_64__ */
2146 i386_emit_prologue (void)
2148 EMIT_ASM32 (i386_prologue
,
2152 /* At this point, the raw regs base address is at 8(%ebp), and the
2153 value pointer is at 12(%ebp). */
2157 i386_emit_epilogue (void)
2159 EMIT_ASM32 (i386_epilogue
,
2160 "mov 12(%ebp),%ecx\n\t"
2161 "mov %eax,(%ecx)\n\t"
2162 "mov %ebx,0x4(%ecx)\n\t"
2170 i386_emit_add (void)
2172 EMIT_ASM32 (i386_add
,
2173 "add (%esp),%eax\n\t"
2174 "adc 0x4(%esp),%ebx\n\t"
2175 "lea 0x8(%esp),%esp");
2179 i386_emit_sub (void)
2181 EMIT_ASM32 (i386_sub
,
2182 "subl %eax,(%esp)\n\t"
2183 "sbbl %ebx,4(%esp)\n\t"
2189 i386_emit_mul (void)
2195 i386_emit_lsh (void)
2201 i386_emit_rsh_signed (void)
2207 i386_emit_rsh_unsigned (void)
2213 i386_emit_ext (int arg
)
2218 EMIT_ASM32 (i386_ext_8
,
2221 "movl %eax,%ebx\n\t"
2225 EMIT_ASM32 (i386_ext_16
,
2227 "movl %eax,%ebx\n\t"
2231 EMIT_ASM32 (i386_ext_32
,
2232 "movl %eax,%ebx\n\t"
2241 i386_emit_log_not (void)
2243 EMIT_ASM32 (i386_log_not
,
2245 "test %eax,%eax\n\t"
2252 i386_emit_bit_and (void)
2254 EMIT_ASM32 (i386_and
,
2255 "and (%esp),%eax\n\t"
2256 "and 0x4(%esp),%ebx\n\t"
2257 "lea 0x8(%esp),%esp");
2261 i386_emit_bit_or (void)
2263 EMIT_ASM32 (i386_or
,
2264 "or (%esp),%eax\n\t"
2265 "or 0x4(%esp),%ebx\n\t"
2266 "lea 0x8(%esp),%esp");
2270 i386_emit_bit_xor (void)
2272 EMIT_ASM32 (i386_xor
,
2273 "xor (%esp),%eax\n\t"
2274 "xor 0x4(%esp),%ebx\n\t"
2275 "lea 0x8(%esp),%esp");
2279 i386_emit_bit_not (void)
2281 EMIT_ASM32 (i386_bit_not
,
2282 "xor $0xffffffff,%eax\n\t"
2283 "xor $0xffffffff,%ebx\n\t");
2287 i386_emit_equal (void)
2289 EMIT_ASM32 (i386_equal
,
2290 "cmpl %ebx,4(%esp)\n\t"
2291 "jne .Li386_equal_false\n\t"
2292 "cmpl %eax,(%esp)\n\t"
2293 "je .Li386_equal_true\n\t"
2294 ".Li386_equal_false:\n\t"
2296 "jmp .Li386_equal_end\n\t"
2297 ".Li386_equal_true:\n\t"
2299 ".Li386_equal_end:\n\t"
2301 "lea 0x8(%esp),%esp");
2305 i386_emit_less_signed (void)
2307 EMIT_ASM32 (i386_less_signed
,
2308 "cmpl %ebx,4(%esp)\n\t"
2309 "jl .Li386_less_signed_true\n\t"
2310 "jne .Li386_less_signed_false\n\t"
2311 "cmpl %eax,(%esp)\n\t"
2312 "jl .Li386_less_signed_true\n\t"
2313 ".Li386_less_signed_false:\n\t"
2315 "jmp .Li386_less_signed_end\n\t"
2316 ".Li386_less_signed_true:\n\t"
2318 ".Li386_less_signed_end:\n\t"
2320 "lea 0x8(%esp),%esp");
2324 i386_emit_less_unsigned (void)
2326 EMIT_ASM32 (i386_less_unsigned
,
2327 "cmpl %ebx,4(%esp)\n\t"
2328 "jb .Li386_less_unsigned_true\n\t"
2329 "jne .Li386_less_unsigned_false\n\t"
2330 "cmpl %eax,(%esp)\n\t"
2331 "jb .Li386_less_unsigned_true\n\t"
2332 ".Li386_less_unsigned_false:\n\t"
2334 "jmp .Li386_less_unsigned_end\n\t"
2335 ".Li386_less_unsigned_true:\n\t"
2337 ".Li386_less_unsigned_end:\n\t"
2339 "lea 0x8(%esp),%esp");
2343 i386_emit_ref (int size
)
2348 EMIT_ASM32 (i386_ref1
,
2352 EMIT_ASM32 (i386_ref2
,
2356 EMIT_ASM32 (i386_ref4
,
2357 "movl (%eax),%eax");
2360 EMIT_ASM32 (i386_ref8
,
2361 "movl 4(%eax),%ebx\n\t"
2362 "movl (%eax),%eax");
2368 i386_emit_if_goto (int *offset_p
, int *size_p
)
2370 EMIT_ASM32 (i386_if_goto
,
2376 /* Don't trust the assembler to choose the right jump */
2377 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2380 *offset_p
= 11; /* be sure that this matches the sequence above */
2386 i386_emit_goto (int *offset_p
, int *size_p
)
2388 EMIT_ASM32 (i386_goto
,
2389 /* Don't trust the assembler to choose the right jump */
2390 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2398 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2400 int diff
= (to
- (from
+ size
));
2401 unsigned char buf
[sizeof (int)];
2403 /* We're only doing 4-byte sizes at the moment. */
2410 memcpy (buf
, &diff
, sizeof (int));
2411 write_inferior_memory (from
, buf
, sizeof (int));
2415 i386_emit_const (LONGEST num
)
2417 unsigned char buf
[16];
2419 CORE_ADDR buildaddr
= current_insn_ptr
;
2422 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2423 lo
= num
& 0xffffffff;
2424 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2426 hi
= ((num
>> 32) & 0xffffffff);
2429 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2430 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2435 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2437 append_insns (&buildaddr
, i
, buf
);
2438 current_insn_ptr
= buildaddr
;
2442 i386_emit_call (CORE_ADDR fn
)
2444 unsigned char buf
[16];
2446 CORE_ADDR buildaddr
;
2448 buildaddr
= current_insn_ptr
;
2450 buf
[i
++] = 0xe8; /* call <reladdr> */
2451 offset
= ((int) fn
) - (buildaddr
+ 5);
2452 memcpy (buf
+ 1, &offset
, 4);
2453 append_insns (&buildaddr
, 5, buf
);
2454 current_insn_ptr
= buildaddr
;
2458 i386_emit_reg (int reg
)
2460 unsigned char buf
[16];
2462 CORE_ADDR buildaddr
;
2464 EMIT_ASM32 (i386_reg_a
,
2466 buildaddr
= current_insn_ptr
;
2468 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2469 memcpy (&buf
[i
], ®
, sizeof (reg
));
2471 append_insns (&buildaddr
, i
, buf
);
2472 current_insn_ptr
= buildaddr
;
2473 EMIT_ASM32 (i386_reg_b
,
2474 "mov %eax,4(%esp)\n\t"
2475 "mov 8(%ebp),%eax\n\t"
2477 i386_emit_call (get_raw_reg_func_addr ());
2478 EMIT_ASM32 (i386_reg_c
,
2480 "lea 0x8(%esp),%esp");
2484 i386_emit_pop (void)
2486 EMIT_ASM32 (i386_pop
,
2492 i386_emit_stack_flush (void)
2494 EMIT_ASM32 (i386_stack_flush
,
2500 i386_emit_zero_ext (int arg
)
2505 EMIT_ASM32 (i386_zero_ext_8
,
2506 "and $0xff,%eax\n\t"
2510 EMIT_ASM32 (i386_zero_ext_16
,
2511 "and $0xffff,%eax\n\t"
2515 EMIT_ASM32 (i386_zero_ext_32
,
2524 i386_emit_swap (void)
2526 EMIT_ASM32 (i386_swap
,
2536 i386_emit_stack_adjust (int n
)
2538 unsigned char buf
[16];
2540 CORE_ADDR buildaddr
= current_insn_ptr
;
2543 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2547 append_insns (&buildaddr
, i
, buf
);
2548 current_insn_ptr
= buildaddr
;
2551 /* FN's prototype is `LONGEST(*fn)(int)'. */
2554 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2556 unsigned char buf
[16];
2558 CORE_ADDR buildaddr
;
2560 EMIT_ASM32 (i386_int_call_1_a
,
2561 /* Reserve a bit of stack space. */
2563 /* Put the one argument on the stack. */
2564 buildaddr
= current_insn_ptr
;
2566 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2569 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2571 append_insns (&buildaddr
, i
, buf
);
2572 current_insn_ptr
= buildaddr
;
2573 i386_emit_call (fn
);
2574 EMIT_ASM32 (i386_int_call_1_c
,
2576 "lea 0x8(%esp),%esp");
2579 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2582 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2584 unsigned char buf
[16];
2586 CORE_ADDR buildaddr
;
2588 EMIT_ASM32 (i386_void_call_2_a
,
2589 /* Preserve %eax only; we don't have to worry about %ebx. */
2591 /* Reserve a bit of stack space for arguments. */
2592 "sub $0x10,%esp\n\t"
2593 /* Copy "top" to the second argument position. (Note that
2594 we can't assume function won't scribble on its
2595 arguments, so don't try to restore from this.) */
2596 "mov %eax,4(%esp)\n\t"
2597 "mov %ebx,8(%esp)");
2598 /* Put the first argument on the stack. */
2599 buildaddr
= current_insn_ptr
;
2601 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2604 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2606 append_insns (&buildaddr
, i
, buf
);
2607 current_insn_ptr
= buildaddr
;
2608 i386_emit_call (fn
);
2609 EMIT_ASM32 (i386_void_call_2_b
,
2610 "lea 0x10(%esp),%esp\n\t"
2611 /* Restore original stack top. */
2617 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2620 /* Check low half first, more likely to be decider */
2621 "cmpl %eax,(%esp)\n\t"
2622 "jne .Leq_fallthru\n\t"
2623 "cmpl %ebx,4(%esp)\n\t"
2624 "jne .Leq_fallthru\n\t"
2625 "lea 0x8(%esp),%esp\n\t"
2628 /* jmp, but don't trust the assembler to choose the right jump */
2629 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2630 ".Leq_fallthru:\n\t"
2631 "lea 0x8(%esp),%esp\n\t"
2642 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2645 /* Check low half first, more likely to be decider */
2646 "cmpl %eax,(%esp)\n\t"
2648 "cmpl %ebx,4(%esp)\n\t"
2649 "je .Lne_fallthru\n\t"
2651 "lea 0x8(%esp),%esp\n\t"
2654 /* jmp, but don't trust the assembler to choose the right jump */
2655 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2656 ".Lne_fallthru:\n\t"
2657 "lea 0x8(%esp),%esp\n\t"
2668 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2671 "cmpl %ebx,4(%esp)\n\t"
2673 "jne .Llt_fallthru\n\t"
2674 "cmpl %eax,(%esp)\n\t"
2675 "jnl .Llt_fallthru\n\t"
2677 "lea 0x8(%esp),%esp\n\t"
2680 /* jmp, but don't trust the assembler to choose the right jump */
2681 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2682 ".Llt_fallthru:\n\t"
2683 "lea 0x8(%esp),%esp\n\t"
2694 i386_emit_le_goto (int *offset_p
, int *size_p
)
2697 "cmpl %ebx,4(%esp)\n\t"
2699 "jne .Lle_fallthru\n\t"
2700 "cmpl %eax,(%esp)\n\t"
2701 "jnle .Lle_fallthru\n\t"
2703 "lea 0x8(%esp),%esp\n\t"
2706 /* jmp, but don't trust the assembler to choose the right jump */
2707 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2708 ".Lle_fallthru:\n\t"
2709 "lea 0x8(%esp),%esp\n\t"
2720 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2723 "cmpl %ebx,4(%esp)\n\t"
2725 "jne .Lgt_fallthru\n\t"
2726 "cmpl %eax,(%esp)\n\t"
2727 "jng .Lgt_fallthru\n\t"
2729 "lea 0x8(%esp),%esp\n\t"
2732 /* jmp, but don't trust the assembler to choose the right jump */
2733 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2734 ".Lgt_fallthru:\n\t"
2735 "lea 0x8(%esp),%esp\n\t"
2746 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2749 "cmpl %ebx,4(%esp)\n\t"
2751 "jne .Lge_fallthru\n\t"
2752 "cmpl %eax,(%esp)\n\t"
2753 "jnge .Lge_fallthru\n\t"
2755 "lea 0x8(%esp),%esp\n\t"
2758 /* jmp, but don't trust the assembler to choose the right jump */
2759 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2760 ".Lge_fallthru:\n\t"
2761 "lea 0x8(%esp),%esp\n\t"
2771 struct emit_ops i386_emit_ops
=
2779 i386_emit_rsh_signed
,
2780 i386_emit_rsh_unsigned
,
2788 i386_emit_less_signed
,
2789 i386_emit_less_unsigned
,
2793 i386_write_goto_address
,
2798 i386_emit_stack_flush
,
2801 i386_emit_stack_adjust
,
2802 i386_emit_int_call_1
,
2803 i386_emit_void_call_2
,
2813 static struct emit_ops
*
2817 if (is_64bit_tdesc ())
2818 return &amd64_emit_ops
;
2821 return &i386_emit_ops
;
2824 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2826 static const gdb_byte
*
2827 x86_sw_breakpoint_from_kind (int kind
, int *size
)
2829 *size
= x86_breakpoint_len
;
2830 return x86_breakpoint
;
2834 x86_supports_range_stepping (void)
2839 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2843 x86_supports_hardware_single_step (void)
2849 x86_get_ipa_tdesc_idx (void)
2851 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2852 const struct target_desc
*tdesc
= regcache
->tdesc
;
2855 if (tdesc
== tdesc_amd64_linux
|| tdesc
== tdesc_amd64_linux_no_xml
2856 || tdesc
== tdesc_x32_linux
)
2857 return X86_TDESC_SSE
;
2858 if (tdesc
== tdesc_amd64_avx_linux
|| tdesc
== tdesc_x32_avx_linux
)
2859 return X86_TDESC_AVX
;
2860 if (tdesc
== tdesc_amd64_mpx_linux
)
2861 return X86_TDESC_MPX
;
2862 if (tdesc
== tdesc_amd64_avx_mpx_linux
)
2863 return X86_TDESC_AVX_MPX
;
2864 if (tdesc
== tdesc_amd64_avx512_linux
|| tdesc
== tdesc_x32_avx512_linux
)
2865 return X86_TDESC_AVX512
;
2868 if (tdesc
== tdesc_i386_mmx_linux
)
2869 return X86_TDESC_MMX
;
2870 if (tdesc
== tdesc_i386_linux
|| tdesc
== tdesc_i386_linux_no_xml
)
2871 return X86_TDESC_SSE
;
2872 if (tdesc
== tdesc_i386_avx_linux
)
2873 return X86_TDESC_AVX
;
2874 if (tdesc
== tdesc_i386_mpx_linux
)
2875 return X86_TDESC_MPX
;
2876 if (tdesc
== tdesc_i386_avx_mpx_linux
)
2877 return X86_TDESC_AVX_MPX
;
2878 if (tdesc
== tdesc_i386_avx512_linux
)
2879 return X86_TDESC_AVX512
;
2884 /* This is initialized assuming an amd64 target.
2885 x86_arch_setup will correct it for i386 or amd64 targets. */
2887 struct linux_target_ops the_low_target
=
2890 x86_linux_regs_info
,
2891 x86_cannot_fetch_register
,
2892 x86_cannot_store_register
,
2893 NULL
, /* fetch_register */
2896 NULL
, /* breakpoint_kind_from_pc */
2897 x86_sw_breakpoint_from_kind
,
2901 x86_supports_z_point_type
,
2904 x86_stopped_by_watchpoint
,
2905 x86_stopped_data_address
,
2906 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2907 native i386 case (no registers smaller than an xfer unit), and are not
2908 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2911 /* need to fix up i386 siginfo if host is amd64 */
2913 x86_linux_new_process
,
2914 x86_linux_new_thread
,
2916 x86_linux_prepare_to_resume
,
2917 x86_linux_process_qsupported
,
2918 x86_supports_tracepoints
,
2919 x86_get_thread_area
,
2920 x86_install_fast_tracepoint_jump_pad
,
2922 x86_get_min_fast_tracepoint_insn_len
,
2923 x86_supports_range_stepping
,
2924 NULL
, /* breakpoint_kind_from_current_state */
2925 x86_supports_hardware_single_step
,
2926 x86_get_syscall_trapinfo
,
2927 x86_get_ipa_tdesc_idx
,
2931 initialize_low_arch (void)
2933 /* Initialize the Linux target descriptions. */
2935 init_registers_amd64_linux ();
2936 init_registers_amd64_avx_linux ();
2937 init_registers_amd64_avx512_linux ();
2938 init_registers_amd64_mpx_linux ();
2939 init_registers_amd64_avx_mpx_linux ();
2941 init_registers_x32_linux ();
2942 init_registers_x32_avx_linux ();
2943 init_registers_x32_avx512_linux ();
2945 tdesc_amd64_linux_no_xml
= XNEW (struct target_desc
);
2946 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
2947 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2949 init_registers_i386_linux ();
2950 init_registers_i386_mmx_linux ();
2951 init_registers_i386_avx_linux ();
2952 init_registers_i386_avx512_linux ();
2953 init_registers_i386_mpx_linux ();
2954 init_registers_i386_avx_mpx_linux ();
2956 tdesc_i386_linux_no_xml
= XNEW (struct target_desc
);
2957 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
2958 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
2960 initialize_regsets_info (&x86_regsets_info
);