1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2024 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "linux-low.h"
26 #include "gdbsupport/x86-xstate.h"
27 #include "nat/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
31 #include "nat/amd64-linux-siginfo.h"
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
38 #include "elf/common.h"
41 #include "gdbsupport/agent.h"
43 #include "tracepoint.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
51 static target_desc_up tdesc_amd64_linux_no_xml
;
53 static target_desc_up tdesc_i386_linux_no_xml
;
56 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
59 /* Backward compatibility for gdb without XML support. */
61 static const char xmltarget_i386_linux_no_xml
[] = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
67 static const char xmltarget_amd64_linux_no_xml
[] = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
74 #include <sys/procfs.h>
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
99 class x86_target
: public linux_process_target
103 const regs_info
*get_regs_info () override
;
105 const gdb_byte
*sw_breakpoint_from_kind (int kind
, int *size
) override
;
107 bool supports_z_point_type (char z_type
) override
;
109 void process_qsupported (gdb::array_view
<const char * const> features
) override
;
111 bool supports_tracepoints () override
;
113 bool supports_fast_tracepoints () override
;
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint
, CORE_ADDR tpaddr
, CORE_ADDR collector
,
117 CORE_ADDR lockaddr
, ULONGEST orig_size
, CORE_ADDR
*jump_entry
,
118 CORE_ADDR
*trampoline
, ULONGEST
*trampoline_size
,
119 unsigned char *jjump_pad_insn
, ULONGEST
*jjump_pad_insn_size
,
120 CORE_ADDR
*adjusted_insn_addr
, CORE_ADDR
*adjusted_insn_addr_end
,
123 int get_min_fast_tracepoint_insn_len () override
;
125 struct emit_ops
*emit_ops () override
;
127 int get_ipa_tdesc_idx () override
;
131 void low_arch_setup () override
;
133 bool low_cannot_fetch_register (int regno
) override
;
135 bool low_cannot_store_register (int regno
) override
;
137 bool low_supports_breakpoints () override
;
139 CORE_ADDR
low_get_pc (regcache
*regcache
) override
;
141 void low_set_pc (regcache
*regcache
, CORE_ADDR newpc
) override
;
143 int low_decr_pc_after_break () override
;
145 bool low_breakpoint_at (CORE_ADDR pc
) override
;
147 int low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
148 int size
, raw_breakpoint
*bp
) override
;
150 int low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
151 int size
, raw_breakpoint
*bp
) override
;
153 bool low_stopped_by_watchpoint () override
;
155 CORE_ADDR
low_stopped_data_address () override
;
157 /* collect_ptrace_register/supply_ptrace_register are not needed in the
158 native i386 case (no registers smaller than an xfer unit), and are not
159 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
161 /* Need to fix up i386 siginfo if host is amd64. */
162 bool low_siginfo_fixup (siginfo_t
*native
, gdb_byte
*inf
,
163 int direction
) override
;
165 arch_process_info
*low_new_process () override
;
167 void low_delete_process (arch_process_info
*info
) override
;
169 void low_new_thread (lwp_info
*) override
;
171 void low_delete_thread (arch_lwp_info
*) override
;
173 void low_new_fork (process_info
*parent
, process_info
*child
) override
;
175 void low_prepare_to_resume (lwp_info
*lwp
) override
;
177 int low_get_thread_area (int lwpid
, CORE_ADDR
*addrp
) override
;
179 bool low_supports_range_stepping () override
;
181 bool low_supports_catch_syscall () override
;
183 void low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
) override
;
187 /* Update all the target description of all processes; a new GDB
188 connected, and it may or not support xml target descriptions. */
189 void update_xmltarget ();
192 /* The singleton target ops object. */
194 static x86_target the_x86_target
;
196 /* Per-process arch-specific data we want to keep. */
198 struct arch_process_info
200 struct x86_debug_reg_state debug_reg_state
;
205 /* Mapping between the general-purpose registers in `struct user'
206 format and GDB's register array layout.
207 Note that the transfer layout uses 64-bit regs. */
208 static /*const*/ int i386_regmap
[] =
210 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
211 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
212 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
213 DS
* 8, ES
* 8, FS
* 8, GS
* 8
216 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
218 /* So code below doesn't have to care, i386 or amd64. */
219 #define ORIG_EAX ORIG_RAX
222 static const int x86_64_regmap
[] =
224 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
225 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
226 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
227 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
228 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
229 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
230 -1, -1, -1, -1, -1, -1, -1, -1,
231 -1, -1, -1, -1, -1, -1, -1, -1,
232 -1, -1, -1, -1, -1, -1, -1, -1,
234 -1, -1, -1, -1, -1, -1, -1, -1,
237 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
238 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
239 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
240 -1, -1, -1, -1, -1, -1, -1, -1,
241 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
242 -1, -1, -1, -1, -1, -1, -1, -1,
243 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
244 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
245 -1, -1, -1, -1, -1, -1, -1, -1,
246 -1, -1, -1, -1, -1, -1, -1, -1,
247 -1, -1, -1, -1, -1, -1, -1, -1,
251 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
252 #define X86_64_USER_REGS (GS + 1)
254 #else /* ! __x86_64__ */
256 /* Mapping between the general-purpose registers in `struct user'
257 format and GDB's register array layout. */
258 static /*const*/ int i386_regmap
[] =
260 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
261 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
262 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
263 DS
* 4, ES
* 4, FS
* 4, GS
* 4
266 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
274 /* Returns true if THREAD belongs to a x86-64 process, per the tdesc. */
277 is_64bit_tdesc (thread_info
*thread
)
279 struct regcache
*regcache
= get_thread_regcache (thread
, 0);
281 return register_size (regcache
->tdesc
, 0) == 8;
287 /* Called by libthread_db. */
290 ps_get_thread_area (struct ps_prochandle
*ph
,
291 lwpid_t lwpid
, int idx
, void **base
)
294 lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
295 gdb_assert (lwp
!= nullptr);
296 int use_64bit
= is_64bit_tdesc (get_lwp_thread (lwp
));
303 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
307 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
318 unsigned int desc
[4];
320 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
321 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
324 /* Ensure we properly extend the value to 64-bits for x86_64. */
325 *base
= (void *) (uintptr_t) desc
[1];
330 /* Get the thread area address. This is used to recognize which
331 thread is which when tracing with the in-process agent library. We
332 don't read anything from the address, and treat it as opaque; it's
333 the address itself that we assume is unique per-thread. */
336 x86_target::low_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
338 lwp_info
*lwp
= find_lwp_pid (ptid_t (lwpid
));
339 gdb_assert (lwp
!= nullptr);
341 int use_64bit
= is_64bit_tdesc (get_lwp_thread (lwp
));
346 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
348 *addr
= (CORE_ADDR
) (uintptr_t) base
;
357 struct thread_info
*thr
= get_lwp_thread (lwp
);
358 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
359 unsigned int desc
[4];
361 const int reg_thread_area
= 3; /* bits to scale down register value. */
364 collect_register_by_name (regcache
, "gs", &gs
);
366 idx
= gs
>> reg_thread_area
;
368 if (ptrace (PTRACE_GET_THREAD_AREA
,
370 (void *) (long) idx
, (unsigned long) &desc
) < 0)
381 x86_target::low_cannot_store_register (int regno
)
384 if (is_64bit_tdesc (current_thread
))
388 return regno
>= I386_NUM_REGS
;
392 x86_target::low_cannot_fetch_register (int regno
)
395 if (is_64bit_tdesc (current_thread
))
399 return regno
>= I386_NUM_REGS
;
403 collect_register_i386 (struct regcache
*regcache
, int regno
, void *buf
)
405 collect_register (regcache
, regno
, buf
);
408 /* In case of x86_64 -m32, collect_register only writes 4 bytes, but the
409 space reserved in buf for the register is 8 bytes. Make sure the entire
410 reserved space is initialized. */
412 gdb_assert (register_size (regcache
->tdesc
, regno
) == 4);
416 /* Sign extend EAX value to avoid potential syscall restart
419 See amd64_linux_collect_native_gregset() in
420 gdb/amd64-linux-nat.c for a detailed explanation. */
421 *(int64_t *) buf
= *(int32_t *) buf
;
426 *(uint64_t *) buf
= *(uint32_t *) buf
;
432 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
437 if (register_size (regcache
->tdesc
, 0) == 8)
439 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
440 if (x86_64_regmap
[i
] != -1)
441 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
447 for (i
= 0; i
< I386_NUM_REGS
; i
++)
448 collect_register_i386 (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
450 /* Handle ORIG_EAX, which is not in i386_regmap. */
451 collect_register_i386 (regcache
, find_regno (regcache
->tdesc
, "orig_eax"),
452 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
456 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
461 if (register_size (regcache
->tdesc
, 0) == 8)
463 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
464 if (x86_64_regmap
[i
] != -1)
465 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
471 for (i
= 0; i
< I386_NUM_REGS
; i
++)
472 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
474 supply_register_by_name (regcache
, "orig_eax",
475 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
479 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
482 i387_cache_to_fxsave (regcache
, buf
);
484 i387_cache_to_fsave (regcache
, buf
);
489 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
492 i387_fxsave_to_cache (regcache
, buf
);
494 i387_fsave_to_cache (regcache
, buf
);
501 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
503 i387_cache_to_fxsave (regcache
, buf
);
507 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
509 i387_fxsave_to_cache (regcache
, buf
);
515 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
517 i387_cache_to_xsave (regcache
, buf
);
521 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
523 i387_xsave_to_cache (regcache
, buf
);
526 /* ??? The non-biarch i386 case stores all the i387 regs twice.
527 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
528 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
529 doesn't work. IWBN to avoid the duplication in the case where it
530 does work. Maybe the arch_setup routine could check whether it works
531 and update the supported regsets accordingly. */
533 static struct regset_info x86_regsets
[] =
535 #ifdef HAVE_PTRACE_GETREGS
536 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
538 x86_fill_gregset
, x86_store_gregset
},
539 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
540 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
542 # ifdef HAVE_PTRACE_GETFPXREGS
543 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
545 x86_fill_fpxregset
, x86_store_fpxregset
},
548 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
550 x86_fill_fpregset
, x86_store_fpregset
},
551 #endif /* HAVE_PTRACE_GETREGS */
556 x86_target::low_supports_breakpoints ()
562 x86_target::low_get_pc (regcache
*regcache
)
564 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
570 collect_register_by_name (regcache
, "rip", &pc
);
571 return (CORE_ADDR
) pc
;
577 collect_register_by_name (regcache
, "eip", &pc
);
578 return (CORE_ADDR
) pc
;
583 x86_target::low_set_pc (regcache
*regcache
, CORE_ADDR pc
)
585 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
591 supply_register_by_name (regcache
, "rip", &newpc
);
597 supply_register_by_name (regcache
, "eip", &newpc
);
602 x86_target::low_decr_pc_after_break ()
608 static const gdb_byte x86_breakpoint
[] = { 0xCC };
609 #define x86_breakpoint_len 1
612 x86_target::low_breakpoint_at (CORE_ADDR pc
)
616 read_memory (pc
, &c
, 1);
623 /* Low-level function vector. */
624 struct x86_dr_low_type x86_dr_low
=
626 x86_linux_dr_set_control
,
627 x86_linux_dr_set_addr
,
628 x86_linux_dr_get_addr
,
629 x86_linux_dr_get_status
,
630 x86_linux_dr_get_control
,
634 /* Breakpoint/Watchpoint support. */
637 x86_target::supports_z_point_type (char z_type
)
643 case Z_PACKET_WRITE_WP
:
644 case Z_PACKET_ACCESS_WP
:
652 x86_target::low_insert_point (raw_bkpt_type type
, CORE_ADDR addr
,
653 int size
, raw_breakpoint
*bp
)
655 struct process_info
*proc
= current_process ();
659 case raw_bkpt_type_hw
:
660 case raw_bkpt_type_write_wp
:
661 case raw_bkpt_type_access_wp
:
663 enum target_hw_bp_type hw_type
664 = raw_bkpt_type_to_target_hw_bp_type (type
);
665 struct x86_debug_reg_state
*state
666 = &proc
->priv
->arch_private
->debug_reg_state
;
668 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
678 x86_target::low_remove_point (raw_bkpt_type type
, CORE_ADDR addr
,
679 int size
, raw_breakpoint
*bp
)
681 struct process_info
*proc
= current_process ();
685 case raw_bkpt_type_hw
:
686 case raw_bkpt_type_write_wp
:
687 case raw_bkpt_type_access_wp
:
689 enum target_hw_bp_type hw_type
690 = raw_bkpt_type_to_target_hw_bp_type (type
);
691 struct x86_debug_reg_state
*state
692 = &proc
->priv
->arch_private
->debug_reg_state
;
694 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
703 x86_target::low_stopped_by_watchpoint ()
705 struct process_info
*proc
= current_process ();
706 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
710 x86_target::low_stopped_data_address ()
712 struct process_info
*proc
= current_process ();
714 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
720 /* Called when a new process is created. */
723 x86_target::low_new_process ()
725 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
727 x86_low_init_dregs (&info
->debug_reg_state
);
732 /* Called when a process is being deleted. */
735 x86_target::low_delete_process (arch_process_info
*info
)
741 x86_target::low_new_thread (lwp_info
*lwp
)
743 /* This comes from nat/. */
744 x86_linux_new_thread (lwp
);
748 x86_target::low_delete_thread (arch_lwp_info
*alwp
)
750 /* This comes from nat/. */
751 x86_linux_delete_thread (alwp
);
754 /* Target routine for new_fork. */
757 x86_target::low_new_fork (process_info
*parent
, process_info
*child
)
759 /* These are allocated by linux_add_process. */
760 gdb_assert (parent
->priv
!= NULL
761 && parent
->priv
->arch_private
!= NULL
);
762 gdb_assert (child
->priv
!= NULL
763 && child
->priv
->arch_private
!= NULL
);
765 /* Linux kernel before 2.6.33 commit
766 72f674d203cd230426437cdcf7dd6f681dad8b0d
767 will inherit hardware debug registers from parent
768 on fork/vfork/clone. Newer Linux kernels create such tasks with
769 zeroed debug registers.
771 GDB core assumes the child inherits the watchpoints/hw
772 breakpoints of the parent, and will remove them all from the
773 forked off process. Copy the debug registers mirrors into the
774 new process so that all breakpoints and watchpoints can be
775 removed together. The debug registers mirror will become zeroed
776 in the end before detaching the forked off process, thus making
777 this compatible with older Linux kernels too. */
779 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
783 x86_target::low_prepare_to_resume (lwp_info
*lwp
)
785 /* This comes from nat/. */
786 x86_linux_prepare_to_resume (lwp
);
789 /* See nat/x86-dregs.h. */
791 struct x86_debug_reg_state
*
792 x86_debug_reg_state (pid_t pid
)
794 struct process_info
*proc
= find_process_pid (pid
);
796 return &proc
->priv
->arch_private
->debug_reg_state
;
799 /* When GDBSERVER is built as a 64-bit application on linux, the
800 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
801 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
802 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
803 conversion in-place ourselves. */
805 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
806 layout of the inferiors' architecture. Returns true if any
807 conversion was done; false otherwise. If DIRECTION is 1, then copy
808 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
812 x86_target::low_siginfo_fixup (siginfo_t
*ptrace
, gdb_byte
*inf
, int direction
)
815 unsigned int machine
;
816 int tid
= lwpid_of (current_thread
);
817 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
819 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
820 if (!is_64bit_tdesc (current_thread
))
821 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
823 /* No fixup for native x32 GDB. */
824 else if (!is_elf64
&& sizeof (void *) == 8)
825 return amd64_linux_siginfo_fixup_common (ptrace
, inf
, direction
,
834 /* Format of XSAVE extended state is:
838 sw_usable_bytes[464..511]
839 xstate_hdr_bytes[512..575]
844 Same memory layout will be used for the coredump NT_X86_XSTATE
845 representing the XSAVE extended state registers.
847 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
848 extended state mask, which is the same as the extended control register
849 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
850 together with the mask saved in the xstate_hdr_bytes to determine what
851 states the processor/OS supports and what state, used or initialized,
852 the process/thread is in. */
853 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
855 /* Does the current host support the GETFPXREGS request? The header
856 file may or may not define it, and even if it is defined, the
857 kernel will return EIO if it's running on a pre-SSE processor. */
858 int have_ptrace_getfpxregs
=
859 #ifdef HAVE_PTRACE_GETFPXREGS
866 /* Get Linux/x86 target description from running target. */
868 static const struct target_desc
*
869 x86_linux_read_description (void)
871 unsigned int machine
;
875 static uint64_t xcr0
;
876 static int xsave_len
;
877 struct regset_info
*regset
;
879 tid
= lwpid_of (current_thread
);
881 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
883 if (sizeof (void *) == 4)
886 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
888 else if (machine
== EM_X86_64
)
889 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
893 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
894 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
896 elf_fpxregset_t fpxregs
;
898 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
900 have_ptrace_getfpxregs
= 0;
901 have_ptrace_getregset
= TRIBOOL_FALSE
;
902 return i386_linux_read_description (X86_XSTATE_X87
);
905 have_ptrace_getfpxregs
= 1;
913 if (machine
== EM_X86_64
)
914 return tdesc_amd64_linux_no_xml
.get ();
917 return tdesc_i386_linux_no_xml
.get ();
920 if (have_ptrace_getregset
== TRIBOOL_UNKNOWN
)
922 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
925 iov
.iov_base
= xstateregs
;
926 iov
.iov_len
= sizeof (xstateregs
);
928 /* Check if PTRACE_GETREGSET works. */
929 if (ptrace (PTRACE_GETREGSET
, tid
,
930 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
931 have_ptrace_getregset
= TRIBOOL_FALSE
;
934 have_ptrace_getregset
= TRIBOOL_TRUE
;
936 /* Get XCR0 from XSAVE extended state. */
937 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
938 / sizeof (uint64_t))];
941 if (machine
== EM_X86_64
&& !is_elf64
)
942 xcr0
&= ~X86_XSTATE_MPX
;
944 xsave_len
= x86_xsave_length ();
946 /* Use PTRACE_GETREGSET if it is available. */
947 for (regset
= x86_regsets
;
948 regset
->fill_function
!= NULL
; regset
++)
949 if (regset
->get_request
== PTRACE_GETREGSET
)
950 regset
->size
= xsave_len
;
951 else if (regset
->type
!= GENERAL_REGS
)
956 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
957 xcr0_features
= (have_ptrace_getregset
== TRIBOOL_TRUE
958 && (xcr0
& X86_XSTATE_ALL_MASK
));
961 i387_set_xsave_mask (xcr0
, xsave_len
);
963 if (machine
== EM_X86_64
)
966 const target_desc
*tdesc
= NULL
;
970 tdesc
= amd64_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
,
975 tdesc
= amd64_linux_read_description (X86_XSTATE_SSE_MASK
, !is_elf64
);
981 const target_desc
*tdesc
= NULL
;
984 tdesc
= i386_linux_read_description (xcr0
& X86_XSTATE_ALL_MASK
);
987 tdesc
= i386_linux_read_description (X86_XSTATE_SSE
);
992 gdb_assert_not_reached ("failed to return tdesc");
995 /* Update all the target description of all processes; a new GDB
996 connected, and it may or not support xml target descriptions. */
999 x86_target::update_xmltarget ()
1001 scoped_restore_current_thread restore_thread
;
1003 /* Before changing the register cache's internal layout, flush the
1004 contents of the current valid caches back to the threads, and
1005 release the current regcache objects. */
1006 regcache_release ();
1008 for_each_process ([this] (process_info
*proc
) {
1009 int pid
= proc
->pid
;
1011 /* Look up any thread of this process. */
1012 switch_to_thread (find_any_thread_of_pid (pid
));
1018 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1019 PTRACE_GETREGSET. */
1022 x86_target::process_qsupported (gdb::array_view
<const char * const> features
)
1024 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1025 with "i386" in qSupported query, it supports x86 XML target
1029 for (const char *feature
: features
)
1031 if (startswith (feature
, "xmlRegisters="))
1033 char *copy
= xstrdup (feature
+ 13);
1036 for (char *p
= strtok_r (copy
, ",", &saveptr
);
1038 p
= strtok_r (NULL
, ",", &saveptr
))
1040 if (strcmp (p
, "i386") == 0)
1051 update_xmltarget ();
1054 /* Common for x86/x86-64. */
1056 static struct regsets_info x86_regsets_info
=
1058 x86_regsets
, /* regsets */
1059 0, /* num_regsets */
1060 NULL
, /* disabled_regsets */
1064 static struct regs_info amd64_linux_regs_info
=
1066 NULL
, /* regset_bitmap */
1067 NULL
, /* usrregs_info */
1071 static struct usrregs_info i386_linux_usrregs_info
=
1077 static struct regs_info i386_linux_regs_info
=
1079 NULL
, /* regset_bitmap */
1080 &i386_linux_usrregs_info
,
1085 x86_target::get_regs_info ()
1088 if (is_64bit_tdesc (current_thread
))
1089 return &amd64_linux_regs_info
;
1092 return &i386_linux_regs_info
;
1095 /* Initialize the target description for the architecture of the
1099 x86_target::low_arch_setup ()
1101 current_process ()->tdesc
= x86_linux_read_description ();
1105 x86_target::low_supports_catch_syscall ()
1110 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1111 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1114 x86_target::low_get_syscall_trapinfo (regcache
*regcache
, int *sysno
)
1116 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
1122 collect_register_by_name (regcache
, "orig_rax", &l_sysno
);
1123 *sysno
= (int) l_sysno
;
1126 collect_register_by_name (regcache
, "orig_eax", sysno
);
1130 x86_target::supports_tracepoints ()
1136 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1138 target_write_memory (*to
, buf
, len
);
1143 push_opcode (unsigned char *buf
, const char *op
)
1145 unsigned char *buf_org
= buf
;
1150 unsigned long ul
= strtoul (op
, &endptr
, 16);
1159 return buf
- buf_org
;
1164 /* Build a jump pad that saves registers and calls a collection
1165 function. Writes a jump instruction to the jump pad to
1166 JJUMPAD_INSN. The caller is responsible to write it in at the
1167 tracepoint address. */
1170 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1171 CORE_ADDR collector
,
1174 CORE_ADDR
*jump_entry
,
1175 CORE_ADDR
*trampoline
,
1176 ULONGEST
*trampoline_size
,
1177 unsigned char *jjump_pad_insn
,
1178 ULONGEST
*jjump_pad_insn_size
,
1179 CORE_ADDR
*adjusted_insn_addr
,
1180 CORE_ADDR
*adjusted_insn_addr_end
,
1183 unsigned char buf
[40];
1187 CORE_ADDR buildaddr
= *jump_entry
;
1189 /* Build the jump pad. */
1191 /* First, do tracepoint data collection. Save registers. */
1193 /* Need to ensure stack pointer saved first. */
1194 buf
[i
++] = 0x54; /* push %rsp */
1195 buf
[i
++] = 0x55; /* push %rbp */
1196 buf
[i
++] = 0x57; /* push %rdi */
1197 buf
[i
++] = 0x56; /* push %rsi */
1198 buf
[i
++] = 0x52; /* push %rdx */
1199 buf
[i
++] = 0x51; /* push %rcx */
1200 buf
[i
++] = 0x53; /* push %rbx */
1201 buf
[i
++] = 0x50; /* push %rax */
1202 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1203 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1204 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1205 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1206 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1207 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1208 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1209 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1210 buf
[i
++] = 0x9c; /* pushfq */
1211 buf
[i
++] = 0x48; /* movabs <addr>,%rdi */
1213 memcpy (buf
+ i
, &tpaddr
, 8);
1215 buf
[i
++] = 0x57; /* push %rdi */
1216 append_insns (&buildaddr
, i
, buf
);
1218 /* Stack space for the collecting_t object. */
1220 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1221 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1222 memcpy (buf
+ i
, &tpoint
, 8);
1224 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1225 i
+= push_opcode (&buf
[i
],
1226 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1227 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1228 append_insns (&buildaddr
, i
, buf
);
1232 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1233 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1235 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1236 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1237 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1238 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1239 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1240 append_insns (&buildaddr
, i
, buf
);
1242 /* Set up the gdb_collect call. */
1243 /* At this point, (stack pointer + 0x18) is the base of our saved
1247 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1248 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1250 /* tpoint address may be 64-bit wide. */
1251 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1252 memcpy (buf
+ i
, &tpoint
, 8);
1254 append_insns (&buildaddr
, i
, buf
);
1256 /* The collector function being in the shared library, may be
1257 >31-bits away off the jump pad. */
1259 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1260 memcpy (buf
+ i
, &collector
, 8);
1262 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1263 append_insns (&buildaddr
, i
, buf
);
1265 /* Clear the spin-lock. */
1267 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1268 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1269 memcpy (buf
+ i
, &lockaddr
, 8);
1271 append_insns (&buildaddr
, i
, buf
);
1273 /* Remove stack that had been used for the collect_t object. */
1275 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1276 append_insns (&buildaddr
, i
, buf
);
1278 /* Restore register state. */
1280 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1284 buf
[i
++] = 0x9d; /* popfq */
1285 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1286 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1287 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1288 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1289 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1290 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1291 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1292 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1293 buf
[i
++] = 0x58; /* pop %rax */
1294 buf
[i
++] = 0x5b; /* pop %rbx */
1295 buf
[i
++] = 0x59; /* pop %rcx */
1296 buf
[i
++] = 0x5a; /* pop %rdx */
1297 buf
[i
++] = 0x5e; /* pop %rsi */
1298 buf
[i
++] = 0x5f; /* pop %rdi */
1299 buf
[i
++] = 0x5d; /* pop %rbp */
1300 buf
[i
++] = 0x5c; /* pop %rsp */
1301 append_insns (&buildaddr
, i
, buf
);
1303 /* Now, adjust the original instruction to execute in the jump
1305 *adjusted_insn_addr
= buildaddr
;
1306 relocate_instruction (&buildaddr
, tpaddr
);
1307 *adjusted_insn_addr_end
= buildaddr
;
1309 /* Finally, write a jump back to the program. */
1311 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1312 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1315 "E.Jump back from jump pad too far from tracepoint "
1316 "(offset 0x%" PRIx64
" > int32).", loffset
);
1320 offset
= (int) loffset
;
1321 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1322 memcpy (buf
+ 1, &offset
, 4);
1323 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1325 /* The jump pad is now built. Wire in a jump to our jump pad. This
1326 is always done last (by our caller actually), so that we can
1327 install fast tracepoints with threads running. This relies on
1328 the agent's atomic write support. */
1329 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1330 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1333 "E.Jump pad too far from tracepoint "
1334 "(offset 0x%" PRIx64
" > int32).", loffset
);
1338 offset
= (int) loffset
;
1340 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1341 memcpy (buf
+ 1, &offset
, 4);
1342 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1343 *jjump_pad_insn_size
= sizeof (jump_insn
);
1345 /* Return the end address of our pad. */
1346 *jump_entry
= buildaddr
;
1351 #endif /* __x86_64__ */
1353 /* Build a jump pad that saves registers and calls a collection
1354 function. Writes a jump instruction to the jump pad to
1355 JJUMPAD_INSN. The caller is responsible to write it in at the
1356 tracepoint address. */
1359 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1360 CORE_ADDR collector
,
1363 CORE_ADDR
*jump_entry
,
1364 CORE_ADDR
*trampoline
,
1365 ULONGEST
*trampoline_size
,
1366 unsigned char *jjump_pad_insn
,
1367 ULONGEST
*jjump_pad_insn_size
,
1368 CORE_ADDR
*adjusted_insn_addr
,
1369 CORE_ADDR
*adjusted_insn_addr_end
,
1372 unsigned char buf
[0x100];
1374 CORE_ADDR buildaddr
= *jump_entry
;
1376 /* Build the jump pad. */
1378 /* First, do tracepoint data collection. Save registers. */
1380 buf
[i
++] = 0x60; /* pushad */
1381 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1382 *((int *)(buf
+ i
)) = (int) tpaddr
;
1384 buf
[i
++] = 0x9c; /* pushf */
1385 buf
[i
++] = 0x1e; /* push %ds */
1386 buf
[i
++] = 0x06; /* push %es */
1387 buf
[i
++] = 0x0f; /* push %fs */
1389 buf
[i
++] = 0x0f; /* push %gs */
1391 buf
[i
++] = 0x16; /* push %ss */
1392 buf
[i
++] = 0x0e; /* push %cs */
1393 append_insns (&buildaddr
, i
, buf
);
1395 /* Stack space for the collecting_t object. */
1397 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1399 /* Build the object. */
1400 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1401 memcpy (buf
+ i
, &tpoint
, 4);
1403 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1405 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1406 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1407 append_insns (&buildaddr
, i
, buf
);
1409 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1410 If we cared for it, this could be using xchg alternatively. */
1413 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1414 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1416 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1418 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1419 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1420 append_insns (&buildaddr
, i
, buf
);
1423 /* Set up arguments to the gdb_collect call. */
1425 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1426 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1427 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1428 append_insns (&buildaddr
, i
, buf
);
1431 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1432 append_insns (&buildaddr
, i
, buf
);
1435 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1436 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1438 append_insns (&buildaddr
, i
, buf
);
1440 buf
[0] = 0xe8; /* call <reladdr> */
1441 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1442 memcpy (buf
+ 1, &offset
, 4);
1443 append_insns (&buildaddr
, 5, buf
);
1444 /* Clean up after the call. */
1445 buf
[0] = 0x83; /* add $0x8,%esp */
1448 append_insns (&buildaddr
, 3, buf
);
1451 /* Clear the spin-lock. This would need the LOCK prefix on older
1454 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1455 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1456 memcpy (buf
+ i
, &lockaddr
, 4);
1458 append_insns (&buildaddr
, i
, buf
);
1461 /* Remove stack that had been used for the collect_t object. */
1463 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1464 append_insns (&buildaddr
, i
, buf
);
1467 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1470 buf
[i
++] = 0x17; /* pop %ss */
1471 buf
[i
++] = 0x0f; /* pop %gs */
1473 buf
[i
++] = 0x0f; /* pop %fs */
1475 buf
[i
++] = 0x07; /* pop %es */
1476 buf
[i
++] = 0x1f; /* pop %ds */
1477 buf
[i
++] = 0x9d; /* popf */
1478 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1481 buf
[i
++] = 0x61; /* popad */
1482 append_insns (&buildaddr
, i
, buf
);
1484 /* Now, adjust the original instruction to execute in the jump
1486 *adjusted_insn_addr
= buildaddr
;
1487 relocate_instruction (&buildaddr
, tpaddr
);
1488 *adjusted_insn_addr_end
= buildaddr
;
1490 /* Write the jump back to the program. */
1491 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1492 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1493 memcpy (buf
+ 1, &offset
, 4);
1494 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1496 /* The jump pad is now built. Wire in a jump to our jump pad. This
1497 is always done last (by our caller actually), so that we can
1498 install fast tracepoints with threads running. This relies on
1499 the agent's atomic write support. */
1502 /* Create a trampoline. */
1503 *trampoline_size
= sizeof (jump_insn
);
1504 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1506 /* No trampoline space available. */
1508 "E.Cannot allocate trampoline space needed for fast "
1509 "tracepoints on 4-byte instructions.");
1513 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1514 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1515 memcpy (buf
+ 1, &offset
, 4);
1516 target_write_memory (*trampoline
, buf
, sizeof (jump_insn
));
1518 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1519 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1520 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1521 memcpy (buf
+ 2, &offset
, 2);
1522 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1523 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1527 /* Else use a 32-bit relative jump instruction. */
1528 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1529 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1530 memcpy (buf
+ 1, &offset
, 4);
1531 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1532 *jjump_pad_insn_size
= sizeof (jump_insn
);
1535 /* Return the end address of our pad. */
1536 *jump_entry
= buildaddr
;
1542 x86_target::supports_fast_tracepoints ()
1548 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
,
1550 CORE_ADDR collector
,
1553 CORE_ADDR
*jump_entry
,
1554 CORE_ADDR
*trampoline
,
1555 ULONGEST
*trampoline_size
,
1556 unsigned char *jjump_pad_insn
,
1557 ULONGEST
*jjump_pad_insn_size
,
1558 CORE_ADDR
*adjusted_insn_addr
,
1559 CORE_ADDR
*adjusted_insn_addr_end
,
1563 if (is_64bit_tdesc (current_thread
))
1564 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1565 collector
, lockaddr
,
1566 orig_size
, jump_entry
,
1567 trampoline
, trampoline_size
,
1569 jjump_pad_insn_size
,
1571 adjusted_insn_addr_end
,
1575 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1576 collector
, lockaddr
,
1577 orig_size
, jump_entry
,
1578 trampoline
, trampoline_size
,
1580 jjump_pad_insn_size
,
1582 adjusted_insn_addr_end
,
1586 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1590 x86_target::get_min_fast_tracepoint_insn_len ()
1592 static int warned_about_fast_tracepoints
= 0;
1595 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1596 used for fast tracepoints. */
1597 if (is_64bit_tdesc (current_thread
))
1601 if (agent_loaded_p ())
1603 char errbuf
[IPA_BUFSIZ
];
1607 /* On x86, if trampolines are available, then 4-byte jump instructions
1608 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1609 with a 4-byte offset are used instead. */
1610 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1614 /* GDB has no channel to explain to user why a shorter fast
1615 tracepoint is not possible, but at least make GDBserver
1616 mention that something has gone awry. */
1617 if (!warned_about_fast_tracepoints
)
1619 warning ("4-byte fast tracepoints not available; %s", errbuf
);
1620 warned_about_fast_tracepoints
= 1;
1627 /* Indicate that the minimum length is currently unknown since the IPA
1628 has not loaded yet. */
1634 add_insns (unsigned char *start
, int len
)
1636 CORE_ADDR buildaddr
= current_insn_ptr
;
1638 threads_debug_printf ("Adding %d bytes of insn at %s",
1639 len
, paddress (buildaddr
));
1641 append_insns (&buildaddr
, len
, start
);
1642 current_insn_ptr
= buildaddr
;
1645 /* Our general strategy for emitting code is to avoid specifying raw
1646 bytes whenever possible, and instead copy a block of inline asm
1647 that is embedded in the function. This is a little messy, because
1648 we need to keep the compiler from discarding what looks like dead
1649 code, plus suppress various warnings. */
1651 #define EMIT_ASM(NAME, INSNS) \
1654 extern unsigned char start_ ## NAME, end_ ## NAME; \
1655 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1656 __asm__ ("jmp end_" #NAME "\n" \
1657 "\t" "start_" #NAME ":" \
1659 "\t" "end_" #NAME ":"); \
1664 #define EMIT_ASM32(NAME,INSNS) \
1667 extern unsigned char start_ ## NAME, end_ ## NAME; \
1668 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1669 __asm__ (".code32\n" \
1670 "\t" "jmp end_" #NAME "\n" \
1671 "\t" "start_" #NAME ":\n" \
1673 "\t" "end_" #NAME ":\n" \
1679 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1686 amd64_emit_prologue (void)
1688 EMIT_ASM (amd64_prologue
,
1690 "movq %rsp,%rbp\n\t"
1691 "sub $0x20,%rsp\n\t"
1692 "movq %rdi,-8(%rbp)\n\t"
1693 "movq %rsi,-16(%rbp)");
1698 amd64_emit_epilogue (void)
1700 EMIT_ASM (amd64_epilogue
,
1701 "movq -16(%rbp),%rdi\n\t"
1702 "movq %rax,(%rdi)\n\t"
1709 amd64_emit_add (void)
1711 EMIT_ASM (amd64_add
,
1712 "add (%rsp),%rax\n\t"
1713 "lea 0x8(%rsp),%rsp");
1717 amd64_emit_sub (void)
1719 EMIT_ASM (amd64_sub
,
1720 "sub %rax,(%rsp)\n\t"
1725 amd64_emit_mul (void)
1731 amd64_emit_lsh (void)
1737 amd64_emit_rsh_signed (void)
1743 amd64_emit_rsh_unsigned (void)
1749 amd64_emit_ext (int arg
)
1754 EMIT_ASM (amd64_ext_8
,
1760 EMIT_ASM (amd64_ext_16
,
1765 EMIT_ASM (amd64_ext_32
,
1774 amd64_emit_log_not (void)
1776 EMIT_ASM (amd64_log_not
,
1777 "test %rax,%rax\n\t"
1783 amd64_emit_bit_and (void)
1785 EMIT_ASM (amd64_and
,
1786 "and (%rsp),%rax\n\t"
1787 "lea 0x8(%rsp),%rsp");
1791 amd64_emit_bit_or (void)
1794 "or (%rsp),%rax\n\t"
1795 "lea 0x8(%rsp),%rsp");
1799 amd64_emit_bit_xor (void)
1801 EMIT_ASM (amd64_xor
,
1802 "xor (%rsp),%rax\n\t"
1803 "lea 0x8(%rsp),%rsp");
1807 amd64_emit_bit_not (void)
1809 EMIT_ASM (amd64_bit_not
,
1810 "xorq $0xffffffffffffffff,%rax");
1814 amd64_emit_equal (void)
1816 EMIT_ASM (amd64_equal
,
1817 "cmp %rax,(%rsp)\n\t"
1818 "je .Lamd64_equal_true\n\t"
1820 "jmp .Lamd64_equal_end\n\t"
1821 ".Lamd64_equal_true:\n\t"
1823 ".Lamd64_equal_end:\n\t"
1824 "lea 0x8(%rsp),%rsp");
1828 amd64_emit_less_signed (void)
1830 EMIT_ASM (amd64_less_signed
,
1831 "cmp %rax,(%rsp)\n\t"
1832 "jl .Lamd64_less_signed_true\n\t"
1834 "jmp .Lamd64_less_signed_end\n\t"
1835 ".Lamd64_less_signed_true:\n\t"
1837 ".Lamd64_less_signed_end:\n\t"
1838 "lea 0x8(%rsp),%rsp");
1842 amd64_emit_less_unsigned (void)
1844 EMIT_ASM (amd64_less_unsigned
,
1845 "cmp %rax,(%rsp)\n\t"
1846 "jb .Lamd64_less_unsigned_true\n\t"
1848 "jmp .Lamd64_less_unsigned_end\n\t"
1849 ".Lamd64_less_unsigned_true:\n\t"
1851 ".Lamd64_less_unsigned_end:\n\t"
1852 "lea 0x8(%rsp),%rsp");
1856 amd64_emit_ref (int size
)
1861 EMIT_ASM (amd64_ref1
,
1865 EMIT_ASM (amd64_ref2
,
1869 EMIT_ASM (amd64_ref4
,
1870 "movl (%rax),%eax");
1873 EMIT_ASM (amd64_ref8
,
1874 "movq (%rax),%rax");
1880 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1882 EMIT_ASM (amd64_if_goto
,
1886 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1894 amd64_emit_goto (int *offset_p
, int *size_p
)
1896 EMIT_ASM (amd64_goto
,
1897 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1905 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1907 int diff
= (to
- (from
+ size
));
1908 unsigned char buf
[sizeof (int)];
1916 memcpy (buf
, &diff
, sizeof (int));
1917 target_write_memory (from
, buf
, sizeof (int));
1921 amd64_emit_const (LONGEST num
)
1923 unsigned char buf
[16];
1925 CORE_ADDR buildaddr
= current_insn_ptr
;
1928 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1929 memcpy (&buf
[i
], &num
, sizeof (num
));
1931 append_insns (&buildaddr
, i
, buf
);
1932 current_insn_ptr
= buildaddr
;
1936 amd64_emit_call (CORE_ADDR fn
)
1938 unsigned char buf
[16];
1940 CORE_ADDR buildaddr
;
1943 /* The destination function being in the shared library, may be
1944 >31-bits away off the compiled code pad. */
1946 buildaddr
= current_insn_ptr
;
1948 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1952 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1954 /* Offset is too large for a call. Use callq, but that requires
1955 a register, so avoid it if possible. Use r10, since it is
1956 call-clobbered, we don't have to push/pop it. */
1957 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1959 memcpy (buf
+ i
, &fn
, 8);
1961 buf
[i
++] = 0xff; /* callq *%r10 */
1966 int offset32
= offset64
; /* we know we can't overflow here. */
1968 buf
[i
++] = 0xe8; /* call <reladdr> */
1969 memcpy (buf
+ i
, &offset32
, 4);
1973 append_insns (&buildaddr
, i
, buf
);
1974 current_insn_ptr
= buildaddr
;
1978 amd64_emit_reg (int reg
)
1980 unsigned char buf
[16];
1982 CORE_ADDR buildaddr
;
1984 /* Assume raw_regs is still in %rdi. */
1985 buildaddr
= current_insn_ptr
;
1987 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1988 memcpy (&buf
[i
], ®
, sizeof (reg
));
1990 append_insns (&buildaddr
, i
, buf
);
1991 current_insn_ptr
= buildaddr
;
1992 amd64_emit_call (get_raw_reg_func_addr ());
1996 amd64_emit_pop (void)
1998 EMIT_ASM (amd64_pop
,
2003 amd64_emit_stack_flush (void)
2005 EMIT_ASM (amd64_stack_flush
,
2010 amd64_emit_zero_ext (int arg
)
2015 EMIT_ASM (amd64_zero_ext_8
,
2019 EMIT_ASM (amd64_zero_ext_16
,
2020 "and $0xffff,%rax");
2023 EMIT_ASM (amd64_zero_ext_32
,
2024 "mov $0xffffffff,%rcx\n\t"
2033 amd64_emit_swap (void)
2035 EMIT_ASM (amd64_swap
,
2042 amd64_emit_stack_adjust (int n
)
2044 unsigned char buf
[16];
2046 CORE_ADDR buildaddr
= current_insn_ptr
;
2049 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2053 /* This only handles adjustments up to 16, but we don't expect any more. */
2055 append_insns (&buildaddr
, i
, buf
);
2056 current_insn_ptr
= buildaddr
;
2059 /* FN's prototype is `LONGEST(*fn)(int)'. */
2062 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2064 unsigned char buf
[16];
2066 CORE_ADDR buildaddr
;
2068 buildaddr
= current_insn_ptr
;
2070 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2071 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2073 append_insns (&buildaddr
, i
, buf
);
2074 current_insn_ptr
= buildaddr
;
2075 amd64_emit_call (fn
);
2078 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2081 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2083 unsigned char buf
[16];
2085 CORE_ADDR buildaddr
;
2087 buildaddr
= current_insn_ptr
;
2089 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2090 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2092 append_insns (&buildaddr
, i
, buf
);
2093 current_insn_ptr
= buildaddr
;
2094 EMIT_ASM (amd64_void_call_2_a
,
2095 /* Save away a copy of the stack top. */
2097 /* Also pass top as the second argument. */
2099 amd64_emit_call (fn
);
2100 EMIT_ASM (amd64_void_call_2_b
,
2101 /* Restore the stack top, %rax may have been trashed. */
2106 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2109 "cmp %rax,(%rsp)\n\t"
2110 "jne .Lamd64_eq_fallthru\n\t"
2111 "lea 0x8(%rsp),%rsp\n\t"
2113 /* jmp, but don't trust the assembler to choose the right jump */
2114 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2115 ".Lamd64_eq_fallthru:\n\t"
2116 "lea 0x8(%rsp),%rsp\n\t"
2126 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2129 "cmp %rax,(%rsp)\n\t"
2130 "je .Lamd64_ne_fallthru\n\t"
2131 "lea 0x8(%rsp),%rsp\n\t"
2133 /* jmp, but don't trust the assembler to choose the right jump */
2134 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2135 ".Lamd64_ne_fallthru:\n\t"
2136 "lea 0x8(%rsp),%rsp\n\t"
2146 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2149 "cmp %rax,(%rsp)\n\t"
2150 "jnl .Lamd64_lt_fallthru\n\t"
2151 "lea 0x8(%rsp),%rsp\n\t"
2153 /* jmp, but don't trust the assembler to choose the right jump */
2154 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2155 ".Lamd64_lt_fallthru:\n\t"
2156 "lea 0x8(%rsp),%rsp\n\t"
2166 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2169 "cmp %rax,(%rsp)\n\t"
2170 "jnle .Lamd64_le_fallthru\n\t"
2171 "lea 0x8(%rsp),%rsp\n\t"
2173 /* jmp, but don't trust the assembler to choose the right jump */
2174 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2175 ".Lamd64_le_fallthru:\n\t"
2176 "lea 0x8(%rsp),%rsp\n\t"
2186 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2189 "cmp %rax,(%rsp)\n\t"
2190 "jng .Lamd64_gt_fallthru\n\t"
2191 "lea 0x8(%rsp),%rsp\n\t"
2193 /* jmp, but don't trust the assembler to choose the right jump */
2194 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2195 ".Lamd64_gt_fallthru:\n\t"
2196 "lea 0x8(%rsp),%rsp\n\t"
2206 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2209 "cmp %rax,(%rsp)\n\t"
2210 "jnge .Lamd64_ge_fallthru\n\t"
2211 ".Lamd64_ge_jump:\n\t"
2212 "lea 0x8(%rsp),%rsp\n\t"
2214 /* jmp, but don't trust the assembler to choose the right jump */
2215 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2216 ".Lamd64_ge_fallthru:\n\t"
2217 "lea 0x8(%rsp),%rsp\n\t"
2226 static emit_ops amd64_emit_ops
=
2228 amd64_emit_prologue
,
2229 amd64_emit_epilogue
,
2234 amd64_emit_rsh_signed
,
2235 amd64_emit_rsh_unsigned
,
2243 amd64_emit_less_signed
,
2244 amd64_emit_less_unsigned
,
2248 amd64_write_goto_address
,
2253 amd64_emit_stack_flush
,
2254 amd64_emit_zero_ext
,
2256 amd64_emit_stack_adjust
,
2257 amd64_emit_int_call_1
,
2258 amd64_emit_void_call_2
,
2267 #endif /* __x86_64__ */
2270 i386_emit_prologue (void)
2272 EMIT_ASM32 (i386_prologue
,
2276 /* At this point, the raw regs base address is at 8(%ebp), and the
2277 value pointer is at 12(%ebp). */
2281 i386_emit_epilogue (void)
2283 EMIT_ASM32 (i386_epilogue
,
2284 "mov 12(%ebp),%ecx\n\t"
2285 "mov %eax,(%ecx)\n\t"
2286 "mov %ebx,0x4(%ecx)\n\t"
2294 i386_emit_add (void)
2296 EMIT_ASM32 (i386_add
,
2297 "add (%esp),%eax\n\t"
2298 "adc 0x4(%esp),%ebx\n\t"
2299 "lea 0x8(%esp),%esp");
2303 i386_emit_sub (void)
2305 EMIT_ASM32 (i386_sub
,
2306 "subl %eax,(%esp)\n\t"
2307 "sbbl %ebx,4(%esp)\n\t"
2313 i386_emit_mul (void)
2319 i386_emit_lsh (void)
2325 i386_emit_rsh_signed (void)
2331 i386_emit_rsh_unsigned (void)
2337 i386_emit_ext (int arg
)
2342 EMIT_ASM32 (i386_ext_8
,
2345 "movl %eax,%ebx\n\t"
2349 EMIT_ASM32 (i386_ext_16
,
2351 "movl %eax,%ebx\n\t"
2355 EMIT_ASM32 (i386_ext_32
,
2356 "movl %eax,%ebx\n\t"
2365 i386_emit_log_not (void)
2367 EMIT_ASM32 (i386_log_not
,
2369 "test %eax,%eax\n\t"
2376 i386_emit_bit_and (void)
2378 EMIT_ASM32 (i386_and
,
2379 "and (%esp),%eax\n\t"
2380 "and 0x4(%esp),%ebx\n\t"
2381 "lea 0x8(%esp),%esp");
2385 i386_emit_bit_or (void)
2387 EMIT_ASM32 (i386_or
,
2388 "or (%esp),%eax\n\t"
2389 "or 0x4(%esp),%ebx\n\t"
2390 "lea 0x8(%esp),%esp");
2394 i386_emit_bit_xor (void)
2396 EMIT_ASM32 (i386_xor
,
2397 "xor (%esp),%eax\n\t"
2398 "xor 0x4(%esp),%ebx\n\t"
2399 "lea 0x8(%esp),%esp");
2403 i386_emit_bit_not (void)
2405 EMIT_ASM32 (i386_bit_not
,
2406 "xor $0xffffffff,%eax\n\t"
2407 "xor $0xffffffff,%ebx\n\t");
2411 i386_emit_equal (void)
2413 EMIT_ASM32 (i386_equal
,
2414 "cmpl %ebx,4(%esp)\n\t"
2415 "jne .Li386_equal_false\n\t"
2416 "cmpl %eax,(%esp)\n\t"
2417 "je .Li386_equal_true\n\t"
2418 ".Li386_equal_false:\n\t"
2420 "jmp .Li386_equal_end\n\t"
2421 ".Li386_equal_true:\n\t"
2423 ".Li386_equal_end:\n\t"
2425 "lea 0x8(%esp),%esp");
2429 i386_emit_less_signed (void)
2431 EMIT_ASM32 (i386_less_signed
,
2432 "cmpl %ebx,4(%esp)\n\t"
2433 "jl .Li386_less_signed_true\n\t"
2434 "jne .Li386_less_signed_false\n\t"
2435 "cmpl %eax,(%esp)\n\t"
2436 "jl .Li386_less_signed_true\n\t"
2437 ".Li386_less_signed_false:\n\t"
2439 "jmp .Li386_less_signed_end\n\t"
2440 ".Li386_less_signed_true:\n\t"
2442 ".Li386_less_signed_end:\n\t"
2444 "lea 0x8(%esp),%esp");
2448 i386_emit_less_unsigned (void)
2450 EMIT_ASM32 (i386_less_unsigned
,
2451 "cmpl %ebx,4(%esp)\n\t"
2452 "jb .Li386_less_unsigned_true\n\t"
2453 "jne .Li386_less_unsigned_false\n\t"
2454 "cmpl %eax,(%esp)\n\t"
2455 "jb .Li386_less_unsigned_true\n\t"
2456 ".Li386_less_unsigned_false:\n\t"
2458 "jmp .Li386_less_unsigned_end\n\t"
2459 ".Li386_less_unsigned_true:\n\t"
2461 ".Li386_less_unsigned_end:\n\t"
2463 "lea 0x8(%esp),%esp");
2467 i386_emit_ref (int size
)
2472 EMIT_ASM32 (i386_ref1
,
2476 EMIT_ASM32 (i386_ref2
,
2480 EMIT_ASM32 (i386_ref4
,
2481 "movl (%eax),%eax");
2484 EMIT_ASM32 (i386_ref8
,
2485 "movl 4(%eax),%ebx\n\t"
2486 "movl (%eax),%eax");
2492 i386_emit_if_goto (int *offset_p
, int *size_p
)
2494 EMIT_ASM32 (i386_if_goto
,
2500 /* Don't trust the assembler to choose the right jump */
2501 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2504 *offset_p
= 11; /* be sure that this matches the sequence above */
2510 i386_emit_goto (int *offset_p
, int *size_p
)
2512 EMIT_ASM32 (i386_goto
,
2513 /* Don't trust the assembler to choose the right jump */
2514 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2522 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2524 int diff
= (to
- (from
+ size
));
2525 unsigned char buf
[sizeof (int)];
2527 /* We're only doing 4-byte sizes at the moment. */
2534 memcpy (buf
, &diff
, sizeof (int));
2535 target_write_memory (from
, buf
, sizeof (int));
2539 i386_emit_const (LONGEST num
)
2541 unsigned char buf
[16];
2543 CORE_ADDR buildaddr
= current_insn_ptr
;
2546 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2547 lo
= num
& 0xffffffff;
2548 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2550 hi
= ((num
>> 32) & 0xffffffff);
2553 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2554 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2559 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2561 append_insns (&buildaddr
, i
, buf
);
2562 current_insn_ptr
= buildaddr
;
2566 i386_emit_call (CORE_ADDR fn
)
2568 unsigned char buf
[16];
2570 CORE_ADDR buildaddr
;
2572 buildaddr
= current_insn_ptr
;
2574 buf
[i
++] = 0xe8; /* call <reladdr> */
2575 offset
= ((int) fn
) - (buildaddr
+ 5);
2576 memcpy (buf
+ 1, &offset
, 4);
2577 append_insns (&buildaddr
, 5, buf
);
2578 current_insn_ptr
= buildaddr
;
2582 i386_emit_reg (int reg
)
2584 unsigned char buf
[16];
2586 CORE_ADDR buildaddr
;
2588 EMIT_ASM32 (i386_reg_a
,
2590 buildaddr
= current_insn_ptr
;
2592 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2593 memcpy (&buf
[i
], ®
, sizeof (reg
));
2595 append_insns (&buildaddr
, i
, buf
);
2596 current_insn_ptr
= buildaddr
;
2597 EMIT_ASM32 (i386_reg_b
,
2598 "mov %eax,4(%esp)\n\t"
2599 "mov 8(%ebp),%eax\n\t"
2601 i386_emit_call (get_raw_reg_func_addr ());
2602 EMIT_ASM32 (i386_reg_c
,
2604 "lea 0x8(%esp),%esp");
2608 i386_emit_pop (void)
2610 EMIT_ASM32 (i386_pop
,
2616 i386_emit_stack_flush (void)
2618 EMIT_ASM32 (i386_stack_flush
,
2624 i386_emit_zero_ext (int arg
)
2629 EMIT_ASM32 (i386_zero_ext_8
,
2630 "and $0xff,%eax\n\t"
2634 EMIT_ASM32 (i386_zero_ext_16
,
2635 "and $0xffff,%eax\n\t"
2639 EMIT_ASM32 (i386_zero_ext_32
,
2648 i386_emit_swap (void)
2650 EMIT_ASM32 (i386_swap
,
2660 i386_emit_stack_adjust (int n
)
2662 unsigned char buf
[16];
2664 CORE_ADDR buildaddr
= current_insn_ptr
;
2667 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2671 append_insns (&buildaddr
, i
, buf
);
2672 current_insn_ptr
= buildaddr
;
2675 /* FN's prototype is `LONGEST(*fn)(int)'. */
2678 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2680 unsigned char buf
[16];
2682 CORE_ADDR buildaddr
;
2684 EMIT_ASM32 (i386_int_call_1_a
,
2685 /* Reserve a bit of stack space. */
2687 /* Put the one argument on the stack. */
2688 buildaddr
= current_insn_ptr
;
2690 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2693 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2695 append_insns (&buildaddr
, i
, buf
);
2696 current_insn_ptr
= buildaddr
;
2697 i386_emit_call (fn
);
2698 EMIT_ASM32 (i386_int_call_1_c
,
2700 "lea 0x8(%esp),%esp");
2703 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2706 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2708 unsigned char buf
[16];
2710 CORE_ADDR buildaddr
;
2712 EMIT_ASM32 (i386_void_call_2_a
,
2713 /* Preserve %eax only; we don't have to worry about %ebx. */
2715 /* Reserve a bit of stack space for arguments. */
2716 "sub $0x10,%esp\n\t"
2717 /* Copy "top" to the second argument position. (Note that
2718 we can't assume function won't scribble on its
2719 arguments, so don't try to restore from this.) */
2720 "mov %eax,4(%esp)\n\t"
2721 "mov %ebx,8(%esp)");
2722 /* Put the first argument on the stack. */
2723 buildaddr
= current_insn_ptr
;
2725 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2728 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2730 append_insns (&buildaddr
, i
, buf
);
2731 current_insn_ptr
= buildaddr
;
2732 i386_emit_call (fn
);
2733 EMIT_ASM32 (i386_void_call_2_b
,
2734 "lea 0x10(%esp),%esp\n\t"
2735 /* Restore original stack top. */
2741 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2744 /* Check low half first, more likely to be decider */
2745 "cmpl %eax,(%esp)\n\t"
2746 "jne .Leq_fallthru\n\t"
2747 "cmpl %ebx,4(%esp)\n\t"
2748 "jne .Leq_fallthru\n\t"
2749 "lea 0x8(%esp),%esp\n\t"
2752 /* jmp, but don't trust the assembler to choose the right jump */
2753 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2754 ".Leq_fallthru:\n\t"
2755 "lea 0x8(%esp),%esp\n\t"
2766 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2769 /* Check low half first, more likely to be decider */
2770 "cmpl %eax,(%esp)\n\t"
2772 "cmpl %ebx,4(%esp)\n\t"
2773 "je .Lne_fallthru\n\t"
2775 "lea 0x8(%esp),%esp\n\t"
2778 /* jmp, but don't trust the assembler to choose the right jump */
2779 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2780 ".Lne_fallthru:\n\t"
2781 "lea 0x8(%esp),%esp\n\t"
2792 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2795 "cmpl %ebx,4(%esp)\n\t"
2797 "jne .Llt_fallthru\n\t"
2798 "cmpl %eax,(%esp)\n\t"
2799 "jnl .Llt_fallthru\n\t"
2801 "lea 0x8(%esp),%esp\n\t"
2804 /* jmp, but don't trust the assembler to choose the right jump */
2805 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2806 ".Llt_fallthru:\n\t"
2807 "lea 0x8(%esp),%esp\n\t"
2818 i386_emit_le_goto (int *offset_p
, int *size_p
)
2821 "cmpl %ebx,4(%esp)\n\t"
2823 "jne .Lle_fallthru\n\t"
2824 "cmpl %eax,(%esp)\n\t"
2825 "jnle .Lle_fallthru\n\t"
2827 "lea 0x8(%esp),%esp\n\t"
2830 /* jmp, but don't trust the assembler to choose the right jump */
2831 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2832 ".Lle_fallthru:\n\t"
2833 "lea 0x8(%esp),%esp\n\t"
2844 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2847 "cmpl %ebx,4(%esp)\n\t"
2849 "jne .Lgt_fallthru\n\t"
2850 "cmpl %eax,(%esp)\n\t"
2851 "jng .Lgt_fallthru\n\t"
2853 "lea 0x8(%esp),%esp\n\t"
2856 /* jmp, but don't trust the assembler to choose the right jump */
2857 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2858 ".Lgt_fallthru:\n\t"
2859 "lea 0x8(%esp),%esp\n\t"
2870 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2873 "cmpl %ebx,4(%esp)\n\t"
2875 "jne .Lge_fallthru\n\t"
2876 "cmpl %eax,(%esp)\n\t"
2877 "jnge .Lge_fallthru\n\t"
2879 "lea 0x8(%esp),%esp\n\t"
2882 /* jmp, but don't trust the assembler to choose the right jump */
2883 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2884 ".Lge_fallthru:\n\t"
2885 "lea 0x8(%esp),%esp\n\t"
2895 static emit_ops i386_emit_ops
=
2903 i386_emit_rsh_signed
,
2904 i386_emit_rsh_unsigned
,
2912 i386_emit_less_signed
,
2913 i386_emit_less_unsigned
,
2917 i386_write_goto_address
,
2922 i386_emit_stack_flush
,
2925 i386_emit_stack_adjust
,
2926 i386_emit_int_call_1
,
2927 i386_emit_void_call_2
,
2938 x86_target::emit_ops ()
2941 if (is_64bit_tdesc (current_thread
))
2942 return &amd64_emit_ops
;
2945 return &i386_emit_ops
;
2948 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2951 x86_target::sw_breakpoint_from_kind (int kind
, int *size
)
2953 *size
= x86_breakpoint_len
;
2954 return x86_breakpoint
;
2958 x86_target::low_supports_range_stepping ()
2964 x86_target::get_ipa_tdesc_idx ()
2966 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
2967 const struct target_desc
*tdesc
= regcache
->tdesc
;
2970 return amd64_get_ipa_tdesc_idx (tdesc
);
2973 if (tdesc
== tdesc_i386_linux_no_xml
.get ())
2974 return X86_TDESC_SSE
;
2976 return i386_get_ipa_tdesc_idx (tdesc
);
2979 /* The linux target ops object. */
2981 linux_process_target
*the_linux_target
= &the_x86_target
;
2984 initialize_low_arch (void)
2986 /* Initialize the Linux target descriptions. */
2988 tdesc_amd64_linux_no_xml
= allocate_target_description ();
2989 copy_target_description (tdesc_amd64_linux_no_xml
.get (),
2990 amd64_linux_read_description (X86_XSTATE_SSE_MASK
,
2992 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
2995 tdesc_i386_linux_no_xml
= allocate_target_description ();
2996 copy_target_description (tdesc_i386_linux_no_xml
.get (),
2997 i386_linux_read_description (X86_XSTATE_SSE_MASK
));
2998 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3000 initialize_regsets_info (&x86_regsets_info
);