1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
41 #include "gdbsupport/agent.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static struct target_desc
*tdesc_amd64_linux_no_xml
;
53 static struct target_desc
*tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
99 class x86_target
: public linux_process_target
103 const regs_info
*get_regs_info () override
;
105 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
107 bool supports_z_point_type (char z_type
) override
;
109 void process_qsupported (char **features
, int count
) override
;
111 bool supports_tracepoints () override
;
113 bool supports_fast_tracepoints () override
;
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
117 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
118 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
119 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
120 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
123 int get_min_fast_tracepoint_insn_len () override
;
125 struct emit_ops
*emit_ops () override
;
129 void low_arch_setup () override
;
131 bool low_cannot_fetch_register (int regno
) override
;
133 bool low_cannot_store_register (int regno
) override
;
135 bool low_supports_breakpoints () override
;
137 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
139 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
141 int low_decr_pc_after_break () override
;
143 bool low_breakpoint_at (CORE_ADDR pc
) override
;
145 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
146 int size
, raw_breakpoint
*bp
) override
;
148 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
149 int size
, raw_breakpoint
*bp
) override
;
151 bool low_stopped_by_watchpoint () override
;
153 CORE_ADDR
low_stopped_data_address () override
;
155 /* collect_ptrace_register/supply_ptrace_register are not needed in the
156 native i386 case (no registers smaller than an xfer unit), and are not
157 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
159 /* Need to fix up i386 siginfo if host is amd64. */
160 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
161 int direction
) override
;
163 arch_process_info
*low_new_process () override
;
165 void low_delete_process (arch_process_info
*info
) override
;
167 void low_new_thread (lwp_info
*) override
;
169 void low_delete_thread (arch_lwp_info
*) override
;
171 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
173 void low_prepare_to_resume (lwp_info
*lwp
) override
;
175 int low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
) override
;
177 bool low_supports_range_stepping () override
;
179 bool low_supports_catch_syscall () override
;
181 void low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
) override
;
185 /* Update all the target description of all processes; a new GDB
186 connected, and it may or not support xml target descriptions. */
187 void update_xmltarget ();
190 /* The singleton target ops object. */
192 static x86_target the_x86_target
;
194 /* Per-process arch-specific data we want to keep. */
196 struct arch_process_info
198 struct x86_debug_reg_state debug_reg_state
;
203 /* Mapping between the general-purpose registers in `struct user'
204 format and GDB's register array layout.
205 Note that the transfer layout uses 64-bit regs. */
206 static /*const*/ int i386_regmap
[] =
208 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
209 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
210 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
211 DS
* 8, ES
* 8, FS
* 8, GS
* 8
214 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
216 /* So code below doesn't have to care, i386 or amd64. */
217 #define ORIG_EAX ORIG_RAX
220 static const int x86_64_regmap
[] =
222 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
223 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
224 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
225 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
226 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
227 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
228 -1, -1, -1, -1, -1, -1, -1, -1,
229 -1, -1, -1, -1, -1, -1, -1, -1,
230 -1, -1, -1, -1, -1, -1, -1, -1,
232 -1, -1, -1, -1, -1, -1, -1, -1,
234 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
239 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
240 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
241 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
242 -1, -1, -1, -1, -1, -1, -1, -1,
243 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
244 -1, -1, -1, -1, -1, -1, -1, -1,
245 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
246 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
247 -1, -1, -1, -1, -1, -1, -1, -1,
248 -1, -1, -1, -1, -1, -1, -1, -1,
249 -1, -1, -1, -1, -1, -1, -1, -1,
253 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
254 #define X86_64_USER_REGS (GS + 1)
256 #else /* ! __x86_64__ */
258 /* Mapping between the general-purpose registers in `struct user'
259 format and GDB's register array layout. */
260 static /*const*/ int i386_regmap
[] =
262 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
263 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
264 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
265 DS
* 4, ES
* 4, FS
* 4, GS
* 4
268 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
276 /* Returns true if the current inferior belongs to a x86-64 process,
280 is_64bit_tdesc (void)
282 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
284 return register_size (regcache
->tdesc
, 0) == 8;
290 /* Called by libthread_db. */
293 ps_get_thread_area (struct ps_prochandle
*ph
,
294 lwpid_t lwpid
, int idx
, void **base
)
297 int use_64bit
= is_64bit_tdesc ();
304 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
308 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
319 unsigned int desc
[4];
321 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
322 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
325 /* Ensure we properly extend the value to 64-bits for x86_64. */
326 *base
= (void *) (uintptr_t) desc
[1];
331 /* Get the thread area address. This is used to recognize which
332 thread is which when tracing with the in-process agent library. We
333 don't read anything from the address, and treat it as opaque; it's
334 the address itself that we assume is unique per-thread. */
337 x86_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
340 int use_64bit
= is_64bit_tdesc ();
345 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
347 *addr
= (CORE_ADDR
) (uintptr_t) base
;
356 struct lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
357 struct thread_info
*thr
= get_lwp_thread (lwp
);
358 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
359 unsigned int desc
[4];
361 const int reg_thread_area
= 3; /* bits to scale down register value. */
364 collect_register_by_name (regcache
, "gs", &gs
);
366 idx
= gs
>> reg_thread_area
;
368 if (ptrace (PTRACE_GET_THREAD_AREA
,
370 (void *) (long) idx
, (unsigned long) &desc
) < 0)
381 x86_target::low_cannot_store_register (int regno
)
384 if (is_64bit_tdesc ())
388 return regno
>= I386_NUM_REGS
;
392 x86_target::low_cannot_fetch_register (int regno
)
395 if (is_64bit_tdesc ())
399 return regno
>= I386_NUM_REGS
;
403 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
408 if (register_size (regcache
->tdesc
, 0) == 8)
410 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
411 if (x86_64_regmap
[i
] != -1)
412 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
414 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
417 int lwpid
= lwpid_of (current_thread
);
419 collect_register_by_name (regcache
, "fs_base", &base
);
420 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_FS
);
422 collect_register_by_name (regcache
, "gs_base", &base
);
423 ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_SET_GS
);
430 /* 32-bit inferior registers need to be zero-extended.
431 Callers would read uninitialized memory otherwise. */
432 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
435 for (i
= 0; i
< I386_NUM_REGS
; i
++)
436 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
438 collect_register_by_name (regcache
, "orig_eax",
439 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
442 /* Sign extend EAX value to avoid potential syscall restart
445 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
446 for a detailed explanation. */
447 if (register_size (regcache
->tdesc
, 0) == 4)
449 void *ptr
= ((gdb_byte
*) buf
450 + i386_regmap
[find_regno (regcache
->tdesc
, "eax")]);
452 *(int64_t *) ptr
= *(int32_t *) ptr
;
458 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
463 if (register_size (regcache
->tdesc
, 0) == 8)
465 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
466 if (x86_64_regmap
[i
] != -1)
467 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
469 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
472 int lwpid
= lwpid_of (current_thread
);
474 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
475 supply_register_by_name (regcache
, "fs_base", &base
);
477 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_GS
) == 0)
478 supply_register_by_name (regcache
, "gs_base", &base
);
485 for (i
= 0; i
< I386_NUM_REGS
; i
++)
486 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
488 supply_register_by_name (regcache
, "orig_eax",
489 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
493 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
496 i387_cache_to_fxsave (regcache
, buf
);
498 i387_cache_to_fsave (regcache
, buf
);
503 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
506 i387_fxsave_to_cache (regcache
, buf
);
508 i387_fsave_to_cache (regcache
, buf
);
515 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
517 i387_cache_to_fxsave (regcache
, buf
);
521 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
523 i387_fxsave_to_cache (regcache
, buf
);
529 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
531 i387_cache_to_xsave (regcache
, buf
);
535 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
537 i387_xsave_to_cache (regcache
, buf
);
540 /* ??? The non-biarch i386 case stores all the i387 regs twice.
541 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
542 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
543 doesn't work. IWBN to avoid the duplication in the case where it
544 does work. Maybe the arch_setup routine could check whether it works
545 and update the supported regsets accordingly. */
547 static struct regset_info x86_regsets
[] =
549 #ifdef HAVE_PTRACE_GETREGS
550 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
552 x86_fill_gregset
, x86_store_gregset
},
553 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
554 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
556 # ifdef HAVE_PTRACE_GETFPXREGS
557 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
559 x86_fill_fpxregset
, x86_store_fpxregset
},
562 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
564 x86_fill_fpregset
, x86_store_fpregset
},
565 #endif /* HAVE_PTRACE_GETREGS */
570 x86_target::low_supports_breakpoints ()
576 x86_target::low_get_pc (regcache
*regcache
)
578 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
584 collect_register_by_name (regcache
, "rip", &pc
);
585 return (CORE_ADDR
) pc
;
591 collect_register_by_name (regcache
, "eip", &pc
);
592 return (CORE_ADDR
) pc
;
597 x86_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
599 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
605 supply_register_by_name (regcache
, "rip", &newpc
);
611 supply_register_by_name (regcache
, "eip", &newpc
);
616 x86_target::low_decr_pc_after_break ()
622 static const gdb_byte x86_breakpoint
[] = { 0xCC };
623 #define x86_breakpoint_len 1
626 x86_target::low_breakpoint_at (CORE_ADDR pc
)
630 read_memory (pc
, &c
, 1);
637 /* Low-level function vector. */
638 struct x86_dr_low_type x86_dr_low
=
640 x86_linux_dr_set_control
,
641 x86_linux_dr_set_addr
,
642 x86_linux_dr_get_addr
,
643 x86_linux_dr_get_status
,
644 x86_linux_dr_get_control
,
648 /* Breakpoint/Watchpoint support. */
651 x86_target::supports_z_point_type (char z_type
)
657 case Z_PACKET_WRITE_WP
:
658 case Z_PACKET_ACCESS_WP
:
666 x86_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
667 int size
, raw_breakpoint
*bp
)
669 struct process_info
*proc
= current_process ();
673 case raw_bkpt_type_hw
:
674 case raw_bkpt_type_write_wp
:
675 case raw_bkpt_type_access_wp
:
677 enum target_hw_bp_type hw_type
678 = raw_bkpt_type_to_target_hw_bp_type (type
);
679 struct x86_debug_reg_state
*state
680 = &proc
->priv
->arch_private
->debug_reg_state
;
682 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
692 x86_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
693 int size
, raw_breakpoint
*bp
)
695 struct process_info
*proc
= current_process ();
699 case raw_bkpt_type_hw
:
700 case raw_bkpt_type_write_wp
:
701 case raw_bkpt_type_access_wp
:
703 enum target_hw_bp_type hw_type
704 = raw_bkpt_type_to_target_hw_bp_type (type
);
705 struct x86_debug_reg_state
*state
706 = &proc
->priv
->arch_private
->debug_reg_state
;
708 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
717 x86_target::low_stopped_by_watchpoint ()
719 struct process_info
*proc
= current_process ();
720 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
724 x86_target::low_stopped_data_address ()
726 struct process_info
*proc
= current_process ();
728 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
734 /* Called when a new process is created. */
737 x86_target::low_new_process ()
739 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
741 x86_low_init_dregs (&info
->debug_reg_state
);
746 /* Called when a process is being deleted. */
749 x86_target::low_delete_process (arch_process_info
*info
)
755 x86_target::low_new_thread (lwp_info
*lwp
)
757 /* This comes from nat/. */
758 x86_linux_new_thread (lwp
);
762 x86_target::low_delete_thread (arch_lwp_info
*alwp
)
764 /* This comes from nat/. */
765 x86_linux_delete_thread (alwp
);
768 /* Target routine for new_fork. */
771 x86_target::low_new_fork (process_info
*parent
, process_info
*child
)
773 /* These are allocated by linux_add_process. */
774 gdb_assert (parent
->priv
!= NULL
775 && parent
->priv
->arch_private
!= NULL
);
776 gdb_assert (child
->priv
!= NULL
777 && child
->priv
->arch_private
!= NULL
);
779 /* Linux kernel before 2.6.33 commit
780 72f674d203cd230426437cdcf7dd6f681dad8b0d
781 will inherit hardware debug registers from parent
782 on fork/vfork/clone. Newer Linux kernels create such tasks with
783 zeroed debug registers.
785 GDB core assumes the child inherits the watchpoints/hw
786 breakpoints of the parent, and will remove them all from the
787 forked off process. Copy the debug registers mirrors into the
788 new process so that all breakpoints and watchpoints can be
789 removed together. The debug registers mirror will become zeroed
790 in the end before detaching the forked off process, thus making
791 this compatible with older Linux kernels too. */
793 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
797 x86_target::low_prepare_to_resume (lwp_info
*lwp
)
799 /* This comes from nat/. */
800 x86_linux_prepare_to_resume (lwp
);
803 /* See nat/x86-dregs.h. */
805 struct x86_debug_reg_state
*
806 x86_debug_reg_state (pid_t pid
)
808 struct process_info
*proc
= find_process_pid (pid
);
810 return &proc
->priv
->arch_private
->debug_reg_state
;
813 /* When GDBSERVER is built as a 64-bit application on linux, the
814 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
815 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
816 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
817 conversion in-place ourselves. */
819 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
820 layout of the inferiors' architecture. Returns true if any
821 conversion was done; false otherwise. If DIRECTION is 1, then copy
822 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
826 x86_target::low_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
829 unsigned int machine
;
830 int tid
= lwpid_of (current_thread
);
831 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
833 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
834 if (!is_64bit_tdesc ())
835 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
837 /* No fixup for native x32 GDB. */
838 else if (!is_elf64
&& sizeof (void *) == 8)
839 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
848 /* Format of XSAVE extended state is:
852 sw_usable_bytes[464..511]
853 xstate_hdr_bytes[512..575]
858 Same memory layout will be used for the coredump NT_X86_XSTATE
859 representing the XSAVE extended state registers.
861 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
862 extended state mask, which is the same as the extended control register
863 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
864 together with the mask saved in the xstate_hdr_bytes to determine what
865 states the processor/OS supports and what state, used or initialized,
866 the process/thread is in. */
867 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
869 /* Does the current host support the GETFPXREGS request? The header
870 file may or may not define it, and even if it is defined, the
871 kernel will return EIO if it's running on a pre-SSE processor. */
872 int have_ptrace_getfpxregs
=
873 #ifdef HAVE_PTRACE_GETFPXREGS
880 /* Get Linux/x86 target description from running target. */
882 static const struct target_desc
*
883 x86_linux_read_description (void)
885 unsigned int machine
;
889 static uint64_t xcr0
;
890 struct regset_info
*regset
;
892 tid
= lwpid_of (current_thread
);
894 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
896 if (sizeof (void *) == 4)
899 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
901 else if (machine
== EM_X86_64
)
902 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
906 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
907 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
909 elf_fpxregset_t fpxregs
;
911 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
913 have_ptrace_getfpxregs
= 0;
914 have_ptrace_getregset
= 0;
915 return i386_linux_read_description (X86_XSTATE_X87
);
918 have_ptrace_getfpxregs
= 1;
924 x86_xcr0
= X86_XSTATE_SSE_MASK
;
928 if (machine
== EM_X86_64
)
929 return tdesc_amd64_linux_no_xml
;
932 return tdesc_i386_linux_no_xml
;
935 if (have_ptrace_getregset
== -1)
937 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
940 iov
.iov_base
= xstateregs
;
941 iov
.iov_len
= sizeof (xstateregs
);
943 /* Check if PTRACE_GETREGSET works. */
944 if (ptrace (PTRACE_GETREGSET
, tid
,
945 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
946 have_ptrace_getregset
= 0;
949 have_ptrace_getregset
= 1;
951 /* Get XCR0 from XSAVE extended state. */
952 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
953 / sizeof (uint64_t))];
955 /* Use PTRACE_GETREGSET if it is available. */
956 for (regset
= x86_regsets
;
957 regset
->fill_function
!= NULL
; regset
++)
958 if (regset
->get_request
== PTRACE_GETREGSET
)
959 regset
->size
= X86_XSTATE_SIZE (xcr0
);
960 else if (regset
->type
!= GENERAL_REGS
)
965 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
966 xcr0_features
= (have_ptrace_getregset
967 && (xcr0
& X86_XSTATE_ALL_MASK
));
972 if (machine
== EM_X86_64
)
975 const target_desc
*tdesc
= NULL
;
979 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
984 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
990 const target_desc
*tdesc
= NULL
;
993 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
996 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
1001 gdb_assert_not_reached ("failed to return tdesc");
1004 /* Update all the target description of all processes; a new GDB
1005 connected, and it may or not support xml target descriptions. */
1008 x86_target::update_xmltarget ()
1010 struct thread_info
*saved_thread
= current_thread
;
1012 /* Before changing the register cache's internal layout, flush the
1013 contents of the current valid caches back to the threads, and
1014 release the current regcache objects. */
1015 regcache_release ();
1017 for_each_process ([this] (process_info
*proc
) {
1018 int pid
= proc
->pid
;
1020 /* Look up any thread of this process. */
1021 current_thread
= find_any_thread_of_pid (pid
);
1026 current_thread
= saved_thread
;
1029 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1030 PTRACE_GETREGSET. */
1033 x86_target::process_qsupported (char **features
, int count
)
1037 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1038 with "i386" in qSupported query, it supports x86 XML target
1041 for (i
= 0; i
< count
; i
++)
1043 const char *feature
= features
[i
];
1045 if (startswith (feature
, "xmlRegisters="))
1047 char *copy
= xstrdup (feature
+ 13);
1050 for (char *p
= strtok_r (copy
, ",", &saveptr
);
1052 p
= strtok_r (NULL
, ",", &saveptr
))
1054 if (strcmp (p
, "i386") == 0)
1064 update_xmltarget ();
1067 /* Common for x86/x86-64. */
1069 static struct regsets_info x86_regsets_info
=
1071 x86_regsets
, /* regsets */
1072 0, /* num_regsets */
1073 NULL
, /* disabled_regsets */
1077 static struct regs_info amd64_linux_regs_info
=
1079 NULL
, /* regset_bitmap */
1080 NULL
, /* usrregs_info */
1084 static struct usrregs_info i386_linux_usrregs_info
=
1090 static struct regs_info i386_linux_regs_info
=
1092 NULL
, /* regset_bitmap */
1093 &i386_linux_usrregs_info
,
1098 x86_target::get_regs_info ()
1101 if (is_64bit_tdesc ())
1102 return &amd64_linux_regs_info
;
1105 return &i386_linux_regs_info
;
1108 /* Initialize the target description for the architecture of the
1112 x86_target::low_arch_setup ()
1114 current_process ()->tdesc
= x86_linux_read_description ();
1118 x86_target::low_supports_catch_syscall ()
1123 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1124 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1127 x86_target::low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
)
1129 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1135 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1136 *sysno
= (int) l_sysno
;
1139 collect_register_by_name (regcache
, "orig_eax", sysno
);
1143 x86_target::supports_tracepoints ()
1149 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1151 target_write_memory (*to
, buf
, len
);
1156 push_opcode (unsigned char *buf
, const char *op
)
1158 unsigned char *buf_org
= buf
;
1163 unsigned long ul
= strtoul (op
, &endptr
, 16);
1172 return buf
- buf_org
;
1177 /* Build a jump pad that saves registers and calls a collection
1178 function. Writes a jump instruction to the jump pad to
1179 JJUMPAD_INSN. The caller is responsible to write it in at the
1180 tracepoint address. */
1183 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1184 CORE_ADDR collector
,
1187 CORE_ADDR
*jump_entry
,
1188 CORE_ADDR
*trampoline
,
1189 ULONGEST
*trampoline_size
,
1190 unsigned char *jjump_pad_insn
,
1191 ULONGEST
*jjump_pad_insn_size
,
1192 CORE_ADDR
*adjusted_insn_addr
,
1193 CORE_ADDR
*adjusted_insn_addr_end
,
1196 unsigned char buf
[40];
1200 CORE_ADDR buildaddr
= *jump_entry
;
1202 /* Build the jump pad. */
1204 /* First, do tracepoint data collection. Save registers. */
1206 /* Need to ensure stack pointer saved first. */
1207 buf
[i
++] = 0x54; /* push %rsp */
1208 buf
[i
++] = 0x55; /* push %rbp */
1209 buf
[i
++] = 0x57; /* push %rdi */
1210 buf
[i
++] = 0x56; /* push %rsi */
1211 buf
[i
++] = 0x52; /* push %rdx */
1212 buf
[i
++] = 0x51; /* push %rcx */
1213 buf
[i
++] = 0x53; /* push %rbx */
1214 buf
[i
++] = 0x50; /* push %rax */
1215 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1216 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1217 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1218 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1219 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1220 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1221 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1222 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1223 buf
[i
++] = 0x9c; /* pushfq */
1224 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1226 memcpy (buf
+ i
, &tpaddr
, 8);
1228 buf
[i
++] = 0x57; /* push %rdi */
1229 append_insns (&buildaddr
, i
, buf
);
1231 /* Stack space for the collecting_t object. */
1233 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1234 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1235 memcpy (buf
+ i
, &tpoint
, 8);
1237 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1238 i
+= push_opcode (&buf
[i
],
1239 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1240 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1241 append_insns (&buildaddr
, i
, buf
);
1245 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1246 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1248 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1249 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1250 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1251 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1252 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1253 append_insns (&buildaddr
, i
, buf
);
1255 /* Set up the gdb_collect call. */
1256 /* At this point, (stack pointer + 0x18) is the base of our saved
1260 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1261 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1263 /* tpoint address may be 64-bit wide. */
1264 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1265 memcpy (buf
+ i
, &tpoint
, 8);
1267 append_insns (&buildaddr
, i
, buf
);
1269 /* The collector function being in the shared library, may be
1270 >31-bits away off the jump pad. */
1272 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1273 memcpy (buf
+ i
, &collector
, 8);
1275 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1276 append_insns (&buildaddr
, i
, buf
);
1278 /* Clear the spin-lock. */
1280 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1281 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1282 memcpy (buf
+ i
, &lockaddr
, 8);
1284 append_insns (&buildaddr
, i
, buf
);
1286 /* Remove stack that had been used for the collect_t object. */
1288 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1289 append_insns (&buildaddr
, i
, buf
);
1291 /* Restore register state. */
1293 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1297 buf
[i
++] = 0x9d; /* popfq */
1298 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1299 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1300 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1301 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1302 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1303 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1304 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1305 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1306 buf
[i
++] = 0x58; /* pop %rax */
1307 buf
[i
++] = 0x5b; /* pop %rbx */
1308 buf
[i
++] = 0x59; /* pop %rcx */
1309 buf
[i
++] = 0x5a; /* pop %rdx */
1310 buf
[i
++] = 0x5e; /* pop %rsi */
1311 buf
[i
++] = 0x5f; /* pop %rdi */
1312 buf
[i
++] = 0x5d; /* pop %rbp */
1313 buf
[i
++] = 0x5c; /* pop %rsp */
1314 append_insns (&buildaddr
, i
, buf
);
1316 /* Now, adjust the original instruction to execute in the jump
1318 *adjusted_insn_addr
= buildaddr
;
1319 relocate_instruction (&buildaddr
, tpaddr
);
1320 *adjusted_insn_addr_end
= buildaddr
;
1322 /* Finally, write a jump back to the program. */
1324 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1325 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1328 "E.Jump back from jump pad too far from tracepoint "
1329 "(offset 0x%" PRIx64
" > int32).", loffset
);
1333 offset
= (int) loffset
;
1334 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1335 memcpy (buf
+ 1, &offset
, 4);
1336 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1338 /* The jump pad is now built. Wire in a jump to our jump pad. This
1339 is always done last (by our caller actually), so that we can
1340 install fast tracepoints with threads running. This relies on
1341 the agent's atomic write support. */
1342 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1343 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1346 "E.Jump pad too far from tracepoint "
1347 "(offset 0x%" PRIx64
" > int32).", loffset
);
1351 offset
= (int) loffset
;
1353 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1354 memcpy (buf
+ 1, &offset
, 4);
1355 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1356 *jjump_pad_insn_size
= sizeof (jump_insn
);
1358 /* Return the end address of our pad. */
1359 *jump_entry
= buildaddr
;
1364 #endif /* __x86_64__ */
1366 /* Build a jump pad that saves registers and calls a collection
1367 function. Writes a jump instruction to the jump pad to
1368 JJUMPAD_INSN. The caller is responsible to write it in at the
1369 tracepoint address. */
1372 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1373 CORE_ADDR collector
,
1376 CORE_ADDR
*jump_entry
,
1377 CORE_ADDR
*trampoline
,
1378 ULONGEST
*trampoline_size
,
1379 unsigned char *jjump_pad_insn
,
1380 ULONGEST
*jjump_pad_insn_size
,
1381 CORE_ADDR
*adjusted_insn_addr
,
1382 CORE_ADDR
*adjusted_insn_addr_end
,
1385 unsigned char buf
[0x100];
1387 CORE_ADDR buildaddr
= *jump_entry
;
1389 /* Build the jump pad. */
1391 /* First, do tracepoint data collection. Save registers. */
1393 buf
[i
++] = 0x60; /* pushad */
1394 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1395 *((int *)(buf
+ i
)) = (int) tpaddr
;
1397 buf
[i
++] = 0x9c; /* pushf */
1398 buf
[i
++] = 0x1e; /* push %ds */
1399 buf
[i
++] = 0x06; /* push %es */
1400 buf
[i
++] = 0x0f; /* push %fs */
1402 buf
[i
++] = 0x0f; /* push %gs */
1404 buf
[i
++] = 0x16; /* push %ss */
1405 buf
[i
++] = 0x0e; /* push %cs */
1406 append_insns (&buildaddr
, i
, buf
);
1408 /* Stack space for the collecting_t object. */
1410 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1412 /* Build the object. */
1413 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1414 memcpy (buf
+ i
, &tpoint
, 4);
1416 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1418 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1419 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1420 append_insns (&buildaddr
, i
, buf
);
1422 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1423 If we cared for it, this could be using xchg alternatively. */
1426 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1427 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1429 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1431 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1432 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1433 append_insns (&buildaddr
, i
, buf
);
1436 /* Set up arguments to the gdb_collect call. */
1438 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1439 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1440 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1441 append_insns (&buildaddr
, i
, buf
);
1444 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1445 append_insns (&buildaddr
, i
, buf
);
1448 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1449 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1451 append_insns (&buildaddr
, i
, buf
);
1453 buf
[0] = 0xe8; /* call <reladdr> */
1454 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1455 memcpy (buf
+ 1, &offset
, 4);
1456 append_insns (&buildaddr
, 5, buf
);
1457 /* Clean up after the call. */
1458 buf
[0] = 0x83; /* add $0x8,%esp */
1461 append_insns (&buildaddr
, 3, buf
);
1464 /* Clear the spin-lock. This would need the LOCK prefix on older
1467 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1468 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1469 memcpy (buf
+ i
, &lockaddr
, 4);
1471 append_insns (&buildaddr
, i
, buf
);
1474 /* Remove stack that had been used for the collect_t object. */
1476 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1477 append_insns (&buildaddr
, i
, buf
);
1480 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1483 buf
[i
++] = 0x17; /* pop %ss */
1484 buf
[i
++] = 0x0f; /* pop %gs */
1486 buf
[i
++] = 0x0f; /* pop %fs */
1488 buf
[i
++] = 0x07; /* pop %es */
1489 buf
[i
++] = 0x1f; /* pop %ds */
1490 buf
[i
++] = 0x9d; /* popf */
1491 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1494 buf
[i
++] = 0x61; /* popad */
1495 append_insns (&buildaddr
, i
, buf
);
1497 /* Now, adjust the original instruction to execute in the jump
1499 *adjusted_insn_addr
= buildaddr
;
1500 relocate_instruction (&buildaddr
, tpaddr
);
1501 *adjusted_insn_addr_end
= buildaddr
;
1503 /* Write the jump back to the program. */
1504 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1505 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1506 memcpy (buf
+ 1, &offset
, 4);
1507 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1509 /* The jump pad is now built. Wire in a jump to our jump pad. This
1510 is always done last (by our caller actually), so that we can
1511 install fast tracepoints with threads running. This relies on
1512 the agent's atomic write support. */
1515 /* Create a trampoline. */
1516 *trampoline_size
= sizeof (jump_insn
);
1517 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1519 /* No trampoline space available. */
1521 "E.Cannot allocate trampoline space needed for fast "
1522 "tracepoints on 4-byte instructions.");
1526 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1527 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1528 memcpy (buf
+ 1, &offset
, 4);
1529 target_write_memory (*trampoline
, buf
, sizeof (jump_insn
));
1531 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1532 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1533 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1534 memcpy (buf
+ 2, &offset
, 2);
1535 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1536 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1540 /* Else use a 32-bit relative jump instruction. */
1541 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1542 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1543 memcpy (buf
+ 1, &offset
, 4);
1544 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1545 *jjump_pad_insn_size
= sizeof (jump_insn
);
1548 /* Return the end address of our pad. */
1549 *jump_entry
= buildaddr
;
1555 x86_target::supports_fast_tracepoints ()
1561 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1563 CORE_ADDR collector
,
1566 CORE_ADDR
*jump_entry
,
1567 CORE_ADDR
*trampoline
,
1568 ULONGEST
*trampoline_size
,
1569 unsigned char *jjump_pad_insn
,
1570 ULONGEST
*jjump_pad_insn_size
,
1571 CORE_ADDR
*adjusted_insn_addr
,
1572 CORE_ADDR
*adjusted_insn_addr_end
,
1576 if (is_64bit_tdesc ())
1577 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1578 collector
, lockaddr
,
1579 orig_size
, jump_entry
,
1580 trampoline
, trampoline_size
,
1582 jjump_pad_insn_size
,
1584 adjusted_insn_addr_end
,
1588 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1589 collector
, lockaddr
,
1590 orig_size
, jump_entry
,
1591 trampoline
, trampoline_size
,
1593 jjump_pad_insn_size
,
1595 adjusted_insn_addr_end
,
1599 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1603 x86_target::get_min_fast_tracepoint_insn_len ()
1605 static int warned_about_fast_tracepoints
= 0;
1608 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1609 used for fast tracepoints. */
1610 if (is_64bit_tdesc ())
1614 if (agent_loaded_p ())
1616 char errbuf
[IPA_BUFSIZ
];
1620 /* On x86, if trampolines are available, then 4-byte jump instructions
1621 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1622 with a 4-byte offset are used instead. */
1623 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1627 /* GDB has no channel to explain to user why a shorter fast
1628 tracepoint is not possible, but at least make GDBserver
1629 mention that something has gone awry. */
1630 if (!warned_about_fast_tracepoints
)
1632 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1633 warned_about_fast_tracepoints
= 1;
1640 /* Indicate that the minimum length is currently unknown since the IPA
1641 has not loaded yet. */
1647 add_insns (unsigned char *start
, int len
)
1649 CORE_ADDR buildaddr
= current_insn_ptr
;
1652 debug_printf ("Adding %d bytes of insn at %s\n",
1653 len
, paddress (buildaddr
));
1655 append_insns (&buildaddr
, len
, start
);
1656 current_insn_ptr
= buildaddr
;
1659 /* Our general strategy for emitting code is to avoid specifying raw
1660 bytes whenever possible, and instead copy a block of inline asm
1661 that is embedded in the function. This is a little messy, because
1662 we need to keep the compiler from discarding what looks like dead
1663 code, plus suppress various warnings. */
1665 #define EMIT_ASM(NAME, INSNS) \
1668 extern unsigned char start_ ## NAME, end_ ## NAME; \
1669 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1670 __asm__ ("jmp end_" #NAME "\n" \
1671 "\t" "start_" #NAME ":" \
1673 "\t" "end_" #NAME ":"); \
1678 #define EMIT_ASM32(NAME,INSNS) \
1681 extern unsigned char start_ ## NAME, end_ ## NAME; \
1682 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1683 __asm__ (".code32\n" \
1684 "\t" "jmp end_" #NAME "\n" \
1685 "\t" "start_" #NAME ":\n" \
1687 "\t" "end_" #NAME ":\n" \
1693 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1700 amd64_emit_prologue (void)
1702 EMIT_ASM (amd64_prologue
,
1704 "movq %rsp,%rbp\n\t"
1705 "sub $0x20,%rsp\n\t"
1706 "movq %rdi,-8(%rbp)\n\t"
1707 "movq %rsi,-16(%rbp)");
1712 amd64_emit_epilogue (void)
1714 EMIT_ASM (amd64_epilogue
,
1715 "movq -16(%rbp),%rdi\n\t"
1716 "movq %rax,(%rdi)\n\t"
1723 amd64_emit_add (void)
1725 EMIT_ASM (amd64_add
,
1726 "add (%rsp),%rax\n\t"
1727 "lea 0x8(%rsp),%rsp");
1731 amd64_emit_sub (void)
1733 EMIT_ASM (amd64_sub
,
1734 "sub %rax,(%rsp)\n\t"
1739 amd64_emit_mul (void)
1745 amd64_emit_lsh (void)
1751 amd64_emit_rsh_signed (void)
1757 amd64_emit_rsh_unsigned (void)
1763 amd64_emit_ext (int arg
)
1768 EMIT_ASM (amd64_ext_8
,
1774 EMIT_ASM (amd64_ext_16
,
1779 EMIT_ASM (amd64_ext_32
,
1788 amd64_emit_log_not (void)
1790 EMIT_ASM (amd64_log_not
,
1791 "test %rax,%rax\n\t"
1797 amd64_emit_bit_and (void)
1799 EMIT_ASM (amd64_and
,
1800 "and (%rsp),%rax\n\t"
1801 "lea 0x8(%rsp),%rsp");
1805 amd64_emit_bit_or (void)
1808 "or (%rsp),%rax\n\t"
1809 "lea 0x8(%rsp),%rsp");
1813 amd64_emit_bit_xor (void)
1815 EMIT_ASM (amd64_xor
,
1816 "xor (%rsp),%rax\n\t"
1817 "lea 0x8(%rsp),%rsp");
1821 amd64_emit_bit_not (void)
1823 EMIT_ASM (amd64_bit_not
,
1824 "xorq $0xffffffffffffffff,%rax");
1828 amd64_emit_equal (void)
1830 EMIT_ASM (amd64_equal
,
1831 "cmp %rax,(%rsp)\n\t"
1832 "je .Lamd64_equal_true\n\t"
1834 "jmp .Lamd64_equal_end\n\t"
1835 ".Lamd64_equal_true:\n\t"
1837 ".Lamd64_equal_end:\n\t"
1838 "lea 0x8(%rsp),%rsp");
1842 amd64_emit_less_signed (void)
1844 EMIT_ASM (amd64_less_signed
,
1845 "cmp %rax,(%rsp)\n\t"
1846 "jl .Lamd64_less_signed_true\n\t"
1848 "jmp .Lamd64_less_signed_end\n\t"
1849 ".Lamd64_less_signed_true:\n\t"
1851 ".Lamd64_less_signed_end:\n\t"
1852 "lea 0x8(%rsp),%rsp");
1856 amd64_emit_less_unsigned (void)
1858 EMIT_ASM (amd64_less_unsigned
,
1859 "cmp %rax,(%rsp)\n\t"
1860 "jb .Lamd64_less_unsigned_true\n\t"
1862 "jmp .Lamd64_less_unsigned_end\n\t"
1863 ".Lamd64_less_unsigned_true:\n\t"
1865 ".Lamd64_less_unsigned_end:\n\t"
1866 "lea 0x8(%rsp),%rsp");
1870 amd64_emit_ref (int size
)
1875 EMIT_ASM (amd64_ref1
,
1879 EMIT_ASM (amd64_ref2
,
1883 EMIT_ASM (amd64_ref4
,
1884 "movl (%rax),%eax");
1887 EMIT_ASM (amd64_ref8
,
1888 "movq (%rax),%rax");
1894 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1896 EMIT_ASM (amd64_if_goto
,
1900 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1908 amd64_emit_goto (int *offset_p
, int *size_p
)
1910 EMIT_ASM (amd64_goto
,
1911 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1919 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1921 int diff
= (to
- (from
+ size
));
1922 unsigned char buf
[sizeof (int)];
1930 memcpy (buf
, &diff
, sizeof (int));
1931 target_write_memory (from
, buf
, sizeof (int));
1935 amd64_emit_const (LONGEST num
)
1937 unsigned char buf
[16];
1939 CORE_ADDR buildaddr
= current_insn_ptr
;
1942 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1943 memcpy (&buf
[i
], &num
, sizeof (num
));
1945 append_insns (&buildaddr
, i
, buf
);
1946 current_insn_ptr
= buildaddr
;
1950 amd64_emit_call (CORE_ADDR fn
)
1952 unsigned char buf
[16];
1954 CORE_ADDR buildaddr
;
1957 /* The destination function being in the shared library, may be
1958 >31-bits away off the compiled code pad. */
1960 buildaddr
= current_insn_ptr
;
1962 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1966 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1968 /* Offset is too large for a call. Use callq, but that requires
1969 a register, so avoid it if possible. Use r10, since it is
1970 call-clobbered, we don't have to push/pop it. */
1971 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1973 memcpy (buf
+ i
, &fn
, 8);
1975 buf
[i
++] = 0xff; /* callq *%r10 */
1980 int offset32
= offset64
; /* we know we can't overflow here. */
1982 buf
[i
++] = 0xe8; /* call <reladdr> */
1983 memcpy (buf
+ i
, &offset32
, 4);
1987 append_insns (&buildaddr
, i
, buf
);
1988 current_insn_ptr
= buildaddr
;
1992 amd64_emit_reg (int reg
)
1994 unsigned char buf
[16];
1996 CORE_ADDR buildaddr
;
1998 /* Assume raw_regs is still in %rdi. */
1999 buildaddr
= current_insn_ptr
;
2001 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2002 memcpy (&buf
[i
], ®
, sizeof (reg
));
2004 append_insns (&buildaddr
, i
, buf
);
2005 current_insn_ptr
= buildaddr
;
2006 amd64_emit_call (get_raw_reg_func_addr ());
2010 amd64_emit_pop (void)
2012 EMIT_ASM (amd64_pop
,
2017 amd64_emit_stack_flush (void)
2019 EMIT_ASM (amd64_stack_flush
,
2024 amd64_emit_zero_ext (int arg
)
2029 EMIT_ASM (amd64_zero_ext_8
,
2033 EMIT_ASM (amd64_zero_ext_16
,
2034 "and $0xffff,%rax");
2037 EMIT_ASM (amd64_zero_ext_32
,
2038 "mov $0xffffffff,%rcx\n\t"
2047 amd64_emit_swap (void)
2049 EMIT_ASM (amd64_swap
,
2056 amd64_emit_stack_adjust (int n
)
2058 unsigned char buf
[16];
2060 CORE_ADDR buildaddr
= current_insn_ptr
;
2063 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2067 /* This only handles adjustments up to 16, but we don't expect any more. */
2069 append_insns (&buildaddr
, i
, buf
);
2070 current_insn_ptr
= buildaddr
;
2073 /* FN's prototype is `LONGEST(*fn)(int)'. */
2076 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2078 unsigned char buf
[16];
2080 CORE_ADDR buildaddr
;
2082 buildaddr
= current_insn_ptr
;
2084 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2085 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2087 append_insns (&buildaddr
, i
, buf
);
2088 current_insn_ptr
= buildaddr
;
2089 amd64_emit_call (fn
);
2092 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2095 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2097 unsigned char buf
[16];
2099 CORE_ADDR buildaddr
;
2101 buildaddr
= current_insn_ptr
;
2103 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2104 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2106 append_insns (&buildaddr
, i
, buf
);
2107 current_insn_ptr
= buildaddr
;
2108 EMIT_ASM (amd64_void_call_2_a
,
2109 /* Save away a copy of the stack top. */
2111 /* Also pass top as the second argument. */
2113 amd64_emit_call (fn
);
2114 EMIT_ASM (amd64_void_call_2_b
,
2115 /* Restore the stack top, %rax may have been trashed. */
2120 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2123 "cmp %rax,(%rsp)\n\t"
2124 "jne .Lamd64_eq_fallthru\n\t"
2125 "lea 0x8(%rsp),%rsp\n\t"
2127 /* jmp, but don't trust the assembler to choose the right jump */
2128 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2129 ".Lamd64_eq_fallthru:\n\t"
2130 "lea 0x8(%rsp),%rsp\n\t"
2140 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2143 "cmp %rax,(%rsp)\n\t"
2144 "je .Lamd64_ne_fallthru\n\t"
2145 "lea 0x8(%rsp),%rsp\n\t"
2147 /* jmp, but don't trust the assembler to choose the right jump */
2148 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2149 ".Lamd64_ne_fallthru:\n\t"
2150 "lea 0x8(%rsp),%rsp\n\t"
2160 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2163 "cmp %rax,(%rsp)\n\t"
2164 "jnl .Lamd64_lt_fallthru\n\t"
2165 "lea 0x8(%rsp),%rsp\n\t"
2167 /* jmp, but don't trust the assembler to choose the right jump */
2168 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2169 ".Lamd64_lt_fallthru:\n\t"
2170 "lea 0x8(%rsp),%rsp\n\t"
2180 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2183 "cmp %rax,(%rsp)\n\t"
2184 "jnle .Lamd64_le_fallthru\n\t"
2185 "lea 0x8(%rsp),%rsp\n\t"
2187 /* jmp, but don't trust the assembler to choose the right jump */
2188 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2189 ".Lamd64_le_fallthru:\n\t"
2190 "lea 0x8(%rsp),%rsp\n\t"
2200 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2203 "cmp %rax,(%rsp)\n\t"
2204 "jng .Lamd64_gt_fallthru\n\t"
2205 "lea 0x8(%rsp),%rsp\n\t"
2207 /* jmp, but don't trust the assembler to choose the right jump */
2208 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2209 ".Lamd64_gt_fallthru:\n\t"
2210 "lea 0x8(%rsp),%rsp\n\t"
2220 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2223 "cmp %rax,(%rsp)\n\t"
2224 "jnge .Lamd64_ge_fallthru\n\t"
2225 ".Lamd64_ge_jump:\n\t"
2226 "lea 0x8(%rsp),%rsp\n\t"
2228 /* jmp, but don't trust the assembler to choose the right jump */
2229 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2230 ".Lamd64_ge_fallthru:\n\t"
2231 "lea 0x8(%rsp),%rsp\n\t"
2240 struct emit_ops amd64_emit_ops
=
2242 amd64_emit_prologue
,
2243 amd64_emit_epilogue
,
2248 amd64_emit_rsh_signed
,
2249 amd64_emit_rsh_unsigned
,
2257 amd64_emit_less_signed
,
2258 amd64_emit_less_unsigned
,
2262 amd64_write_goto_address
,
2267 amd64_emit_stack_flush
,
2268 amd64_emit_zero_ext
,
2270 amd64_emit_stack_adjust
,
2271 amd64_emit_int_call_1
,
2272 amd64_emit_void_call_2
,
2281 #endif /* __x86_64__ */
2284 i386_emit_prologue (void)
2286 EMIT_ASM32 (i386_prologue
,
2290 /* At this point, the raw regs base address is at 8(%ebp), and the
2291 value pointer is at 12(%ebp). */
2295 i386_emit_epilogue (void)
2297 EMIT_ASM32 (i386_epilogue
,
2298 "mov 12(%ebp),%ecx\n\t"
2299 "mov %eax,(%ecx)\n\t"
2300 "mov %ebx,0x4(%ecx)\n\t"
2308 i386_emit_add (void)
2310 EMIT_ASM32 (i386_add
,
2311 "add (%esp),%eax\n\t"
2312 "adc 0x4(%esp),%ebx\n\t"
2313 "lea 0x8(%esp),%esp");
2317 i386_emit_sub (void)
2319 EMIT_ASM32 (i386_sub
,
2320 "subl %eax,(%esp)\n\t"
2321 "sbbl %ebx,4(%esp)\n\t"
2327 i386_emit_mul (void)
2333 i386_emit_lsh (void)
2339 i386_emit_rsh_signed (void)
2345 i386_emit_rsh_unsigned (void)
2351 i386_emit_ext (int arg
)
2356 EMIT_ASM32 (i386_ext_8
,
2359 "movl %eax,%ebx\n\t"
2363 EMIT_ASM32 (i386_ext_16
,
2365 "movl %eax,%ebx\n\t"
2369 EMIT_ASM32 (i386_ext_32
,
2370 "movl %eax,%ebx\n\t"
2379 i386_emit_log_not (void)
2381 EMIT_ASM32 (i386_log_not
,
2383 "test %eax,%eax\n\t"
2390 i386_emit_bit_and (void)
2392 EMIT_ASM32 (i386_and
,
2393 "and (%esp),%eax\n\t"
2394 "and 0x4(%esp),%ebx\n\t"
2395 "lea 0x8(%esp),%esp");
2399 i386_emit_bit_or (void)
2401 EMIT_ASM32 (i386_or
,
2402 "or (%esp),%eax\n\t"
2403 "or 0x4(%esp),%ebx\n\t"
2404 "lea 0x8(%esp),%esp");
2408 i386_emit_bit_xor (void)
2410 EMIT_ASM32 (i386_xor
,
2411 "xor (%esp),%eax\n\t"
2412 "xor 0x4(%esp),%ebx\n\t"
2413 "lea 0x8(%esp),%esp");
2417 i386_emit_bit_not (void)
2419 EMIT_ASM32 (i386_bit_not
,
2420 "xor $0xffffffff,%eax\n\t"
2421 "xor $0xffffffff,%ebx\n\t");
2425 i386_emit_equal (void)
2427 EMIT_ASM32 (i386_equal
,
2428 "cmpl %ebx,4(%esp)\n\t"
2429 "jne .Li386_equal_false\n\t"
2430 "cmpl %eax,(%esp)\n\t"
2431 "je .Li386_equal_true\n\t"
2432 ".Li386_equal_false:\n\t"
2434 "jmp .Li386_equal_end\n\t"
2435 ".Li386_equal_true:\n\t"
2437 ".Li386_equal_end:\n\t"
2439 "lea 0x8(%esp),%esp");
2443 i386_emit_less_signed (void)
2445 EMIT_ASM32 (i386_less_signed
,
2446 "cmpl %ebx,4(%esp)\n\t"
2447 "jl .Li386_less_signed_true\n\t"
2448 "jne .Li386_less_signed_false\n\t"
2449 "cmpl %eax,(%esp)\n\t"
2450 "jl .Li386_less_signed_true\n\t"
2451 ".Li386_less_signed_false:\n\t"
2453 "jmp .Li386_less_signed_end\n\t"
2454 ".Li386_less_signed_true:\n\t"
2456 ".Li386_less_signed_end:\n\t"
2458 "lea 0x8(%esp),%esp");
2462 i386_emit_less_unsigned (void)
2464 EMIT_ASM32 (i386_less_unsigned
,
2465 "cmpl %ebx,4(%esp)\n\t"
2466 "jb .Li386_less_unsigned_true\n\t"
2467 "jne .Li386_less_unsigned_false\n\t"
2468 "cmpl %eax,(%esp)\n\t"
2469 "jb .Li386_less_unsigned_true\n\t"
2470 ".Li386_less_unsigned_false:\n\t"
2472 "jmp .Li386_less_unsigned_end\n\t"
2473 ".Li386_less_unsigned_true:\n\t"
2475 ".Li386_less_unsigned_end:\n\t"
2477 "lea 0x8(%esp),%esp");
2481 i386_emit_ref (int size
)
2486 EMIT_ASM32 (i386_ref1
,
2490 EMIT_ASM32 (i386_ref2
,
2494 EMIT_ASM32 (i386_ref4
,
2495 "movl (%eax),%eax");
2498 EMIT_ASM32 (i386_ref8
,
2499 "movl 4(%eax),%ebx\n\t"
2500 "movl (%eax),%eax");
2506 i386_emit_if_goto (int *offset_p
, int *size_p
)
2508 EMIT_ASM32 (i386_if_goto
,
2514 /* Don't trust the assembler to choose the right jump */
2515 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2518 *offset_p
= 11; /* be sure that this matches the sequence above */
2524 i386_emit_goto (int *offset_p
, int *size_p
)
2526 EMIT_ASM32 (i386_goto
,
2527 /* Don't trust the assembler to choose the right jump */
2528 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2536 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2538 int diff
= (to
- (from
+ size
));
2539 unsigned char buf
[sizeof (int)];
2541 /* We're only doing 4-byte sizes at the moment. */
2548 memcpy (buf
, &diff
, sizeof (int));
2549 target_write_memory (from
, buf
, sizeof (int));
2553 i386_emit_const (LONGEST num
)
2555 unsigned char buf
[16];
2557 CORE_ADDR buildaddr
= current_insn_ptr
;
2560 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2561 lo
= num
& 0xffffffff;
2562 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2564 hi
= ((num
>> 32) & 0xffffffff);
2567 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2568 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2573 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2575 append_insns (&buildaddr
, i
, buf
);
2576 current_insn_ptr
= buildaddr
;
2580 i386_emit_call (CORE_ADDR fn
)
2582 unsigned char buf
[16];
2584 CORE_ADDR buildaddr
;
2586 buildaddr
= current_insn_ptr
;
2588 buf
[i
++] = 0xe8; /* call <reladdr> */
2589 offset
= ((int) fn
) - (buildaddr
+ 5);
2590 memcpy (buf
+ 1, &offset
, 4);
2591 append_insns (&buildaddr
, 5, buf
);
2592 current_insn_ptr
= buildaddr
;
2596 i386_emit_reg (int reg
)
2598 unsigned char buf
[16];
2600 CORE_ADDR buildaddr
;
2602 EMIT_ASM32 (i386_reg_a
,
2604 buildaddr
= current_insn_ptr
;
2606 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2607 memcpy (&buf
[i
], ®
, sizeof (reg
));
2609 append_insns (&buildaddr
, i
, buf
);
2610 current_insn_ptr
= buildaddr
;
2611 EMIT_ASM32 (i386_reg_b
,
2612 "mov %eax,4(%esp)\n\t"
2613 "mov 8(%ebp),%eax\n\t"
2615 i386_emit_call (get_raw_reg_func_addr ());
2616 EMIT_ASM32 (i386_reg_c
,
2618 "lea 0x8(%esp),%esp");
2622 i386_emit_pop (void)
2624 EMIT_ASM32 (i386_pop
,
2630 i386_emit_stack_flush (void)
2632 EMIT_ASM32 (i386_stack_flush
,
2638 i386_emit_zero_ext (int arg
)
2643 EMIT_ASM32 (i386_zero_ext_8
,
2644 "and $0xff,%eax\n\t"
2648 EMIT_ASM32 (i386_zero_ext_16
,
2649 "and $0xffff,%eax\n\t"
2653 EMIT_ASM32 (i386_zero_ext_32
,
2662 i386_emit_swap (void)
2664 EMIT_ASM32 (i386_swap
,
2674 i386_emit_stack_adjust (int n
)
2676 unsigned char buf
[16];
2678 CORE_ADDR buildaddr
= current_insn_ptr
;
2681 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2685 append_insns (&buildaddr
, i
, buf
);
2686 current_insn_ptr
= buildaddr
;
2689 /* FN's prototype is `LONGEST(*fn)(int)'. */
2692 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2694 unsigned char buf
[16];
2696 CORE_ADDR buildaddr
;
2698 EMIT_ASM32 (i386_int_call_1_a
,
2699 /* Reserve a bit of stack space. */
2701 /* Put the one argument on the stack. */
2702 buildaddr
= current_insn_ptr
;
2704 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2707 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2709 append_insns (&buildaddr
, i
, buf
);
2710 current_insn_ptr
= buildaddr
;
2711 i386_emit_call (fn
);
2712 EMIT_ASM32 (i386_int_call_1_c
,
2714 "lea 0x8(%esp),%esp");
2717 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2720 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2722 unsigned char buf
[16];
2724 CORE_ADDR buildaddr
;
2726 EMIT_ASM32 (i386_void_call_2_a
,
2727 /* Preserve %eax only; we don't have to worry about %ebx. */
2729 /* Reserve a bit of stack space for arguments. */
2730 "sub $0x10,%esp\n\t"
2731 /* Copy "top" to the second argument position. (Note that
2732 we can't assume function won't scribble on its
2733 arguments, so don't try to restore from this.) */
2734 "mov %eax,4(%esp)\n\t"
2735 "mov %ebx,8(%esp)");
2736 /* Put the first argument on the stack. */
2737 buildaddr
= current_insn_ptr
;
2739 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2742 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2744 append_insns (&buildaddr
, i
, buf
);
2745 current_insn_ptr
= buildaddr
;
2746 i386_emit_call (fn
);
2747 EMIT_ASM32 (i386_void_call_2_b
,
2748 "lea 0x10(%esp),%esp\n\t"
2749 /* Restore original stack top. */
2755 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2758 /* Check low half first, more likely to be decider */
2759 "cmpl %eax,(%esp)\n\t"
2760 "jne .Leq_fallthru\n\t"
2761 "cmpl %ebx,4(%esp)\n\t"
2762 "jne .Leq_fallthru\n\t"
2763 "lea 0x8(%esp),%esp\n\t"
2766 /* jmp, but don't trust the assembler to choose the right jump */
2767 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2768 ".Leq_fallthru:\n\t"
2769 "lea 0x8(%esp),%esp\n\t"
2780 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2783 /* Check low half first, more likely to be decider */
2784 "cmpl %eax,(%esp)\n\t"
2786 "cmpl %ebx,4(%esp)\n\t"
2787 "je .Lne_fallthru\n\t"
2789 "lea 0x8(%esp),%esp\n\t"
2792 /* jmp, but don't trust the assembler to choose the right jump */
2793 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2794 ".Lne_fallthru:\n\t"
2795 "lea 0x8(%esp),%esp\n\t"
2806 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2809 "cmpl %ebx,4(%esp)\n\t"
2811 "jne .Llt_fallthru\n\t"
2812 "cmpl %eax,(%esp)\n\t"
2813 "jnl .Llt_fallthru\n\t"
2815 "lea 0x8(%esp),%esp\n\t"
2818 /* jmp, but don't trust the assembler to choose the right jump */
2819 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2820 ".Llt_fallthru:\n\t"
2821 "lea 0x8(%esp),%esp\n\t"
2832 i386_emit_le_goto (int *offset_p
, int *size_p
)
2835 "cmpl %ebx,4(%esp)\n\t"
2837 "jne .Lle_fallthru\n\t"
2838 "cmpl %eax,(%esp)\n\t"
2839 "jnle .Lle_fallthru\n\t"
2841 "lea 0x8(%esp),%esp\n\t"
2844 /* jmp, but don't trust the assembler to choose the right jump */
2845 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2846 ".Lle_fallthru:\n\t"
2847 "lea 0x8(%esp),%esp\n\t"
2858 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2861 "cmpl %ebx,4(%esp)\n\t"
2863 "jne .Lgt_fallthru\n\t"
2864 "cmpl %eax,(%esp)\n\t"
2865 "jng .Lgt_fallthru\n\t"
2867 "lea 0x8(%esp),%esp\n\t"
2870 /* jmp, but don't trust the assembler to choose the right jump */
2871 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2872 ".Lgt_fallthru:\n\t"
2873 "lea 0x8(%esp),%esp\n\t"
2884 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2887 "cmpl %ebx,4(%esp)\n\t"
2889 "jne .Lge_fallthru\n\t"
2890 "cmpl %eax,(%esp)\n\t"
2891 "jnge .Lge_fallthru\n\t"
2893 "lea 0x8(%esp),%esp\n\t"
2896 /* jmp, but don't trust the assembler to choose the right jump */
2897 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2898 ".Lge_fallthru:\n\t"
2899 "lea 0x8(%esp),%esp\n\t"
2909 struct emit_ops i386_emit_ops
=
2917 i386_emit_rsh_signed
,
2918 i386_emit_rsh_unsigned
,
2926 i386_emit_less_signed
,
2927 i386_emit_less_unsigned
,
2931 i386_write_goto_address
,
2936 i386_emit_stack_flush
,
2939 i386_emit_stack_adjust
,
2940 i386_emit_int_call_1
,
2941 i386_emit_void_call_2
,
2952 x86_target::emit_ops ()
2955 if (is_64bit_tdesc ())
2956 return &amd64_emit_ops
;
2959 return &i386_emit_ops
;
2962 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2965 x86_target::sw_breakpoint_from_kind (int kind
, int *size
)
2967 *size
= x86_breakpoint_len
;
2968 return x86_breakpoint
;
2972 x86_target::low_supports_range_stepping ()
2978 x86_get_ipa_tdesc_idx (void)
2980 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2981 const struct target_desc
*tdesc
= regcache
->tdesc
;
2984 return amd64_get_ipa_tdesc_idx (tdesc
);
2987 if (tdesc
== tdesc_i386_linux_no_xml
)
2988 return X86_TDESC_SSE
;
2990 return i386_get_ipa_tdesc_idx (tdesc
);
2993 /* This is initialized assuming an amd64 target.
2994 x86_arch_setup will correct it for i386 or amd64 targets. */
2996 struct linux_target_ops the_low_target
=
2998 x86_get_ipa_tdesc_idx
,
3001 /* The linux target ops object. */
3003 linux_process_target
*the_linux_target
= &the_x86_target
;
3006 initialize_low_arch (void)
3008 /* Initialize the Linux target descriptions. */
3010 tdesc_amd64_linux_no_xml
= allocate_target_description ();
3011 copy_target_description (tdesc_amd64_linux_no_xml
,
3012 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
3014 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3017 tdesc_i386_linux_no_xml
= allocate_target_description ();
3018 copy_target_description (tdesc_i386_linux_no_xml
,
3019 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
3020 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3022 initialize_regsets_info (&x86_regsets_info
);