1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "x86-xstate.h"
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
33 #include "elf/common.h"
38 #include "tracepoint.h"
40 #include "nat/linux-nat.h"
41 #include "nat/x86-linux.h"
42 #include "nat/x86-linux-dregs.h"
45 /* Defined in auto-generated file amd64-linux.c. */
46 void init_registers_amd64_linux (void);
47 extern const struct target_desc
*tdesc_amd64_linux
;
49 /* Defined in auto-generated file amd64-avx-linux.c. */
50 void init_registers_amd64_avx_linux (void);
51 extern const struct target_desc
*tdesc_amd64_avx_linux
;
53 /* Defined in auto-generated file amd64-avx512-linux.c. */
54 void init_registers_amd64_avx512_linux (void);
55 extern const struct target_desc
*tdesc_amd64_avx512_linux
;
57 /* Defined in auto-generated file amd64-mpx-linux.c. */
58 void init_registers_amd64_mpx_linux (void);
59 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
61 /* Defined in auto-generated file x32-linux.c. */
62 void init_registers_x32_linux (void);
63 extern const struct target_desc
*tdesc_x32_linux
;
65 /* Defined in auto-generated file x32-avx-linux.c. */
66 void init_registers_x32_avx_linux (void);
67 extern const struct target_desc
*tdesc_x32_avx_linux
;
69 /* Defined in auto-generated file x32-avx512-linux.c. */
70 void init_registers_x32_avx512_linux (void);
71 extern const struct target_desc
*tdesc_x32_avx512_linux
;
75 /* Defined in auto-generated file i386-linux.c. */
76 void init_registers_i386_linux (void);
77 extern const struct target_desc
*tdesc_i386_linux
;
79 /* Defined in auto-generated file i386-mmx-linux.c. */
80 void init_registers_i386_mmx_linux (void);
81 extern const struct target_desc
*tdesc_i386_mmx_linux
;
83 /* Defined in auto-generated file i386-avx-linux.c. */
84 void init_registers_i386_avx_linux (void);
85 extern const struct target_desc
*tdesc_i386_avx_linux
;
87 /* Defined in auto-generated file i386-avx512-linux.c. */
88 void init_registers_i386_avx512_linux (void);
89 extern const struct target_desc
*tdesc_i386_avx512_linux
;
91 /* Defined in auto-generated file i386-mpx-linux.c. */
92 void init_registers_i386_mpx_linux (void);
93 extern const struct target_desc
*tdesc_i386_mpx_linux
;
96 static struct target_desc
*tdesc_amd64_linux_no_xml
;
98 static struct target_desc
*tdesc_i386_linux_no_xml
;
101 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
102 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
104 /* Backward compatibility for gdb without XML support. */
106 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
107 <architecture>i386</architecture>\
108 <osabi>GNU/Linux</osabi>\
112 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
113 <architecture>i386:x86-64</architecture>\
114 <osabi>GNU/Linux</osabi>\
119 #include <sys/procfs.h>
120 #include <sys/ptrace.h>
123 #ifndef PTRACE_GETREGSET
124 #define PTRACE_GETREGSET 0x4204
127 #ifndef PTRACE_SETREGSET
128 #define PTRACE_SETREGSET 0x4205
132 #ifndef PTRACE_GET_THREAD_AREA
133 #define PTRACE_GET_THREAD_AREA 25
136 /* This definition comes from prctl.h, but some kernels may not have it. */
137 #ifndef PTRACE_ARCH_PRCTL
138 #define PTRACE_ARCH_PRCTL 30
141 /* The following definitions come from prctl.h, but may be absent
142 for certain configurations. */
144 #define ARCH_SET_GS 0x1001
145 #define ARCH_SET_FS 0x1002
146 #define ARCH_GET_FS 0x1003
147 #define ARCH_GET_GS 0x1004
150 /* Per-process arch-specific data we want to keep. */
152 struct arch_process_info
154 struct x86_debug_reg_state debug_reg_state
;
159 /* Mapping between the general-purpose registers in `struct user'
160 format and GDB's register array layout.
161 Note that the transfer layout uses 64-bit regs. */
162 static /*const*/ int i386_regmap
[] =
164 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
165 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
166 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
167 DS
* 8, ES
* 8, FS
* 8, GS
* 8
170 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
172 /* So code below doesn't have to care, i386 or amd64. */
173 #define ORIG_EAX ORIG_RAX
176 static const int x86_64_regmap
[] =
178 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
179 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
180 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
181 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
182 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
183 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1,
186 -1, -1, -1, -1, -1, -1, -1, -1,
188 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
191 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
192 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
193 -1, -1, -1, -1, -1, -1, -1, -1,
194 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
195 -1, -1, -1, -1, -1, -1, -1, -1,
196 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
197 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1
203 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
204 #define X86_64_USER_REGS (GS + 1)
206 #else /* ! __x86_64__ */
208 /* Mapping between the general-purpose registers in `struct user'
209 format and GDB's register array layout. */
210 static /*const*/ int i386_regmap
[] =
212 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
213 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
214 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
215 DS
* 4, ES
* 4, FS
* 4, GS
* 4
218 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
226 /* Returns true if the current inferior belongs to a x86-64 process,
230 is_64bit_tdesc (void)
232 struct regcache
*regcache
= get_thread_regcache (current_thread
, 0);
234 return register_size (regcache
->tdesc
, 0) == 8;
240 /* Called by libthread_db. */
243 ps_get_thread_area (const struct ps_prochandle
*ph
,
244 lwpid_t lwpid
, int idx
, void **base
)
247 int use_64bit
= is_64bit_tdesc ();
254 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
258 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
269 unsigned int desc
[4];
271 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
272 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
275 /* Ensure we properly extend the value to 64-bits for x86_64. */
276 *base
= (void *) (uintptr_t) desc
[1];
281 /* Get the thread area address. This is used to recognize which
282 thread is which when tracing with the in-process agent library. We
283 don't read anything from the address, and treat it as opaque; it's
284 the address itself that we assume is unique per-thread. */
287 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
290 int use_64bit
= is_64bit_tdesc ();
295 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
297 *addr
= (CORE_ADDR
) (uintptr_t) base
;
306 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
307 struct thread_info
*thr
= get_lwp_thread (lwp
);
308 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
309 unsigned int desc
[4];
311 const int reg_thread_area
= 3; /* bits to scale down register value. */
314 collect_register_by_name (regcache
, "gs", &gs
);
316 idx
= gs
>> reg_thread_area
;
318 if (ptrace (PTRACE_GET_THREAD_AREA
,
320 (void *) (long) idx
, (unsigned long) &desc
) < 0)
331 x86_cannot_store_register (int regno
)
334 if (is_64bit_tdesc ())
338 return regno
>= I386_NUM_REGS
;
342 x86_cannot_fetch_register (int regno
)
345 if (is_64bit_tdesc ())
349 return regno
>= I386_NUM_REGS
;
353 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
358 if (register_size (regcache
->tdesc
, 0) == 8)
360 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
361 if (x86_64_regmap
[i
] != -1)
362 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
366 /* 32-bit inferior registers need to be zero-extended.
367 Callers would read uninitialized memory otherwise. */
368 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
371 for (i
= 0; i
< I386_NUM_REGS
; i
++)
372 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
374 collect_register_by_name (regcache
, "orig_eax",
375 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
379 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
384 if (register_size (regcache
->tdesc
, 0) == 8)
386 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
387 if (x86_64_regmap
[i
] != -1)
388 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
393 for (i
= 0; i
< I386_NUM_REGS
; i
++)
394 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
396 supply_register_by_name (regcache
, "orig_eax",
397 ((char *) buf
) + ORIG_EAX
* REGSIZE
);
401 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
404 i387_cache_to_fxsave (regcache
, buf
);
406 i387_cache_to_fsave (regcache
, buf
);
411 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
414 i387_fxsave_to_cache (regcache
, buf
);
416 i387_fsave_to_cache (regcache
, buf
);
423 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
425 i387_cache_to_fxsave (regcache
, buf
);
429 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
431 i387_fxsave_to_cache (regcache
, buf
);
437 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
439 i387_cache_to_xsave (regcache
, buf
);
443 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
445 i387_xsave_to_cache (regcache
, buf
);
448 /* ??? The non-biarch i386 case stores all the i387 regs twice.
449 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
450 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
451 doesn't work. IWBN to avoid the duplication in the case where it
452 does work. Maybe the arch_setup routine could check whether it works
453 and update the supported regsets accordingly. */
455 static struct regset_info x86_regsets
[] =
457 #ifdef HAVE_PTRACE_GETREGS
458 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
460 x86_fill_gregset
, x86_store_gregset
},
461 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
462 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
464 # ifdef HAVE_PTRACE_GETFPXREGS
465 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
467 x86_fill_fpxregset
, x86_store_fpxregset
},
470 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
472 x86_fill_fpregset
, x86_store_fpregset
},
473 #endif /* HAVE_PTRACE_GETREGS */
474 { 0, 0, 0, -1, -1, NULL
, NULL
}
478 x86_get_pc (struct regcache
*regcache
)
480 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
485 collect_register_by_name (regcache
, "rip", &pc
);
486 return (CORE_ADDR
) pc
;
491 collect_register_by_name (regcache
, "eip", &pc
);
492 return (CORE_ADDR
) pc
;
497 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
499 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
503 unsigned long newpc
= pc
;
504 supply_register_by_name (regcache
, "rip", &newpc
);
508 unsigned int newpc
= pc
;
509 supply_register_by_name (regcache
, "eip", &newpc
);
513 static const unsigned char x86_breakpoint
[] = { 0xCC };
514 #define x86_breakpoint_len 1
517 x86_breakpoint_at (CORE_ADDR pc
)
521 (*the_target
->read_memory
) (pc
, &c
, 1);
528 /* Low-level function vector. */
529 struct x86_dr_low_type x86_dr_low
=
531 x86_linux_dr_set_control
,
532 x86_linux_dr_set_addr
,
533 x86_linux_dr_get_addr
,
534 x86_linux_dr_get_status
,
535 x86_linux_dr_get_control
,
539 /* Breakpoint/Watchpoint support. */
542 x86_supports_z_point_type (char z_type
)
548 case Z_PACKET_WRITE_WP
:
549 case Z_PACKET_ACCESS_WP
:
557 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
558 int size
, struct raw_breakpoint
*bp
)
560 struct process_info
*proc
= current_process ();
564 case raw_bkpt_type_hw
:
565 case raw_bkpt_type_write_wp
:
566 case raw_bkpt_type_access_wp
:
568 enum target_hw_bp_type hw_type
569 = raw_bkpt_type_to_target_hw_bp_type (type
);
570 struct x86_debug_reg_state
*state
571 = &proc
->priv
->arch_private
->debug_reg_state
;
573 return x86_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
583 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
584 int size
, struct raw_breakpoint
*bp
)
586 struct process_info
*proc
= current_process ();
590 case raw_bkpt_type_hw
:
591 case raw_bkpt_type_write_wp
:
592 case raw_bkpt_type_access_wp
:
594 enum target_hw_bp_type hw_type
595 = raw_bkpt_type_to_target_hw_bp_type (type
);
596 struct x86_debug_reg_state
*state
597 = &proc
->priv
->arch_private
->debug_reg_state
;
599 return x86_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
608 x86_stopped_by_watchpoint (void)
610 struct process_info
*proc
= current_process ();
611 return x86_dr_stopped_by_watchpoint (&proc
->priv
->arch_private
->debug_reg_state
);
615 x86_stopped_data_address (void)
617 struct process_info
*proc
= current_process ();
619 if (x86_dr_stopped_data_address (&proc
->priv
->arch_private
->debug_reg_state
,
625 /* Called when a new process is created. */
627 static struct arch_process_info
*
628 x86_linux_new_process (void)
630 struct arch_process_info
*info
= XCNEW (struct arch_process_info
);
632 x86_low_init_dregs (&info
->debug_reg_state
);
637 /* Target routine for linux_new_fork. */
640 x86_linux_new_fork (struct process_info
*parent
, struct process_info
*child
)
642 /* These are allocated by linux_add_process. */
643 gdb_assert (parent
->priv
!= NULL
644 && parent
->priv
->arch_private
!= NULL
);
645 gdb_assert (child
->priv
!= NULL
646 && child
->priv
->arch_private
!= NULL
);
648 /* Linux kernel before 2.6.33 commit
649 72f674d203cd230426437cdcf7dd6f681dad8b0d
650 will inherit hardware debug registers from parent
651 on fork/vfork/clone. Newer Linux kernels create such tasks with
652 zeroed debug registers.
654 GDB core assumes the child inherits the watchpoints/hw
655 breakpoints of the parent, and will remove them all from the
656 forked off process. Copy the debug registers mirrors into the
657 new process so that all breakpoints and watchpoints can be
658 removed together. The debug registers mirror will become zeroed
659 in the end before detaching the forked off process, thus making
660 this compatible with older Linux kernels too. */
662 *child
->priv
->arch_private
= *parent
->priv
->arch_private
;
665 /* See nat/x86-dregs.h. */
667 struct x86_debug_reg_state
*
668 x86_debug_reg_state (pid_t pid
)
670 struct process_info
*proc
= find_process_pid (pid
);
672 return &proc
->priv
->arch_private
->debug_reg_state
;
675 /* When GDBSERVER is built as a 64-bit application on linux, the
676 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
677 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
678 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
679 conversion in-place ourselves. */
681 /* These types below (compat_*) define a siginfo type that is layout
682 compatible with the siginfo type exported by the 32-bit userspace
687 typedef int compat_int_t
;
688 typedef unsigned int compat_uptr_t
;
690 typedef int compat_time_t
;
691 typedef int compat_timer_t
;
692 typedef int compat_clock_t
;
694 struct compat_timeval
696 compat_time_t tv_sec
;
700 typedef union compat_sigval
702 compat_int_t sival_int
;
703 compat_uptr_t sival_ptr
;
706 typedef struct compat_siginfo
714 int _pad
[((128 / sizeof (int)) - 3)];
723 /* POSIX.1b timers */
728 compat_sigval_t _sigval
;
731 /* POSIX.1b signals */
736 compat_sigval_t _sigval
;
745 compat_clock_t _utime
;
746 compat_clock_t _stime
;
749 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
764 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
765 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
767 typedef struct compat_x32_siginfo
775 int _pad
[((128 / sizeof (int)) - 3)];
784 /* POSIX.1b timers */
789 compat_sigval_t _sigval
;
792 /* POSIX.1b signals */
797 compat_sigval_t _sigval
;
806 compat_x32_clock_t _utime
;
807 compat_x32_clock_t _stime
;
810 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
823 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
825 #define cpt_si_pid _sifields._kill._pid
826 #define cpt_si_uid _sifields._kill._uid
827 #define cpt_si_timerid _sifields._timer._tid
828 #define cpt_si_overrun _sifields._timer._overrun
829 #define cpt_si_status _sifields._sigchld._status
830 #define cpt_si_utime _sifields._sigchld._utime
831 #define cpt_si_stime _sifields._sigchld._stime
832 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
833 #define cpt_si_addr _sifields._sigfault._addr
834 #define cpt_si_band _sifields._sigpoll._band
835 #define cpt_si_fd _sifields._sigpoll._fd
837 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
838 In their place is si_timer1,si_timer2. */
840 #define si_timerid si_timer1
843 #define si_overrun si_timer2
847 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
849 memset (to
, 0, sizeof (*to
));
851 to
->si_signo
= from
->si_signo
;
852 to
->si_errno
= from
->si_errno
;
853 to
->si_code
= from
->si_code
;
855 if (to
->si_code
== SI_TIMER
)
857 to
->cpt_si_timerid
= from
->si_timerid
;
858 to
->cpt_si_overrun
= from
->si_overrun
;
859 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
861 else if (to
->si_code
== SI_USER
)
863 to
->cpt_si_pid
= from
->si_pid
;
864 to
->cpt_si_uid
= from
->si_uid
;
866 else if (to
->si_code
< 0)
868 to
->cpt_si_pid
= from
->si_pid
;
869 to
->cpt_si_uid
= from
->si_uid
;
870 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
874 switch (to
->si_signo
)
877 to
->cpt_si_pid
= from
->si_pid
;
878 to
->cpt_si_uid
= from
->si_uid
;
879 to
->cpt_si_status
= from
->si_status
;
880 to
->cpt_si_utime
= from
->si_utime
;
881 to
->cpt_si_stime
= from
->si_stime
;
887 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
890 to
->cpt_si_band
= from
->si_band
;
891 to
->cpt_si_fd
= from
->si_fd
;
894 to
->cpt_si_pid
= from
->si_pid
;
895 to
->cpt_si_uid
= from
->si_uid
;
896 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
903 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
905 memset (to
, 0, sizeof (*to
));
907 to
->si_signo
= from
->si_signo
;
908 to
->si_errno
= from
->si_errno
;
909 to
->si_code
= from
->si_code
;
911 if (to
->si_code
== SI_TIMER
)
913 to
->si_timerid
= from
->cpt_si_timerid
;
914 to
->si_overrun
= from
->cpt_si_overrun
;
915 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
917 else if (to
->si_code
== SI_USER
)
919 to
->si_pid
= from
->cpt_si_pid
;
920 to
->si_uid
= from
->cpt_si_uid
;
922 else if (to
->si_code
< 0)
924 to
->si_pid
= from
->cpt_si_pid
;
925 to
->si_uid
= from
->cpt_si_uid
;
926 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
930 switch (to
->si_signo
)
933 to
->si_pid
= from
->cpt_si_pid
;
934 to
->si_uid
= from
->cpt_si_uid
;
935 to
->si_status
= from
->cpt_si_status
;
936 to
->si_utime
= from
->cpt_si_utime
;
937 to
->si_stime
= from
->cpt_si_stime
;
943 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
946 to
->si_band
= from
->cpt_si_band
;
947 to
->si_fd
= from
->cpt_si_fd
;
950 to
->si_pid
= from
->cpt_si_pid
;
951 to
->si_uid
= from
->cpt_si_uid
;
952 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
959 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
962 memset (to
, 0, sizeof (*to
));
964 to
->si_signo
= from
->si_signo
;
965 to
->si_errno
= from
->si_errno
;
966 to
->si_code
= from
->si_code
;
968 if (to
->si_code
== SI_TIMER
)
970 to
->cpt_si_timerid
= from
->si_timerid
;
971 to
->cpt_si_overrun
= from
->si_overrun
;
972 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
974 else if (to
->si_code
== SI_USER
)
976 to
->cpt_si_pid
= from
->si_pid
;
977 to
->cpt_si_uid
= from
->si_uid
;
979 else if (to
->si_code
< 0)
981 to
->cpt_si_pid
= from
->si_pid
;
982 to
->cpt_si_uid
= from
->si_uid
;
983 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
987 switch (to
->si_signo
)
990 to
->cpt_si_pid
= from
->si_pid
;
991 to
->cpt_si_uid
= from
->si_uid
;
992 to
->cpt_si_status
= from
->si_status
;
993 to
->cpt_si_utime
= from
->si_utime
;
994 to
->cpt_si_stime
= from
->si_stime
;
1000 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1003 to
->cpt_si_band
= from
->si_band
;
1004 to
->cpt_si_fd
= from
->si_fd
;
1007 to
->cpt_si_pid
= from
->si_pid
;
1008 to
->cpt_si_uid
= from
->si_uid
;
1009 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1016 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1017 compat_x32_siginfo_t
*from
)
1019 memset (to
, 0, sizeof (*to
));
1021 to
->si_signo
= from
->si_signo
;
1022 to
->si_errno
= from
->si_errno
;
1023 to
->si_code
= from
->si_code
;
1025 if (to
->si_code
== SI_TIMER
)
1027 to
->si_timerid
= from
->cpt_si_timerid
;
1028 to
->si_overrun
= from
->cpt_si_overrun
;
1029 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1031 else if (to
->si_code
== SI_USER
)
1033 to
->si_pid
= from
->cpt_si_pid
;
1034 to
->si_uid
= from
->cpt_si_uid
;
1036 else if (to
->si_code
< 0)
1038 to
->si_pid
= from
->cpt_si_pid
;
1039 to
->si_uid
= from
->cpt_si_uid
;
1040 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1044 switch (to
->si_signo
)
1047 to
->si_pid
= from
->cpt_si_pid
;
1048 to
->si_uid
= from
->cpt_si_uid
;
1049 to
->si_status
= from
->cpt_si_status
;
1050 to
->si_utime
= from
->cpt_si_utime
;
1051 to
->si_stime
= from
->cpt_si_stime
;
1057 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1060 to
->si_band
= from
->cpt_si_band
;
1061 to
->si_fd
= from
->cpt_si_fd
;
1064 to
->si_pid
= from
->cpt_si_pid
;
1065 to
->si_uid
= from
->cpt_si_uid
;
1066 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1072 #endif /* __x86_64__ */
1074 /* Convert a native/host siginfo object, into/from the siginfo in the
1075 layout of the inferiors' architecture. Returns true if any
1076 conversion was done; false otherwise. If DIRECTION is 1, then copy
1077 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1081 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1084 unsigned int machine
;
1085 int tid
= lwpid_of (current_thread
);
1086 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1088 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1089 if (!is_64bit_tdesc ())
1091 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_siginfo_t
));
1094 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1096 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1100 /* No fixup for native x32 GDB. */
1101 else if (!is_elf64
&& sizeof (void *) == 8)
1103 gdb_assert (sizeof (siginfo_t
) == sizeof (compat_x32_siginfo_t
));
1106 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1109 siginfo_from_compat_x32_siginfo (native
,
1110 (struct compat_x32_siginfo
*) inf
);
1121 /* Format of XSAVE extended state is:
1124 fxsave_bytes[0..463]
1125 sw_usable_bytes[464..511]
1126 xstate_hdr_bytes[512..575]
1131 Same memory layout will be used for the coredump NT_X86_XSTATE
1132 representing the XSAVE extended state registers.
1134 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1135 extended state mask, which is the same as the extended control register
1136 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1137 together with the mask saved in the xstate_hdr_bytes to determine what
1138 states the processor/OS supports and what state, used or initialized,
1139 the process/thread is in. */
1140 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1142 /* Does the current host support the GETFPXREGS request? The header
1143 file may or may not define it, and even if it is defined, the
1144 kernel will return EIO if it's running on a pre-SSE processor. */
1145 int have_ptrace_getfpxregs
=
1146 #ifdef HAVE_PTRACE_GETFPXREGS
1153 /* Does the current host support PTRACE_GETREGSET? */
1154 static int have_ptrace_getregset
= -1;
1156 /* Get Linux/x86 target description from running target. */
1158 static const struct target_desc
*
1159 x86_linux_read_description (void)
1161 unsigned int machine
;
1165 static uint64_t xcr0
;
1166 struct regset_info
*regset
;
1168 tid
= lwpid_of (current_thread
);
1170 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1172 if (sizeof (void *) == 4)
1175 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1177 else if (machine
== EM_X86_64
)
1178 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1182 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1183 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1185 elf_fpxregset_t fpxregs
;
1187 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1189 have_ptrace_getfpxregs
= 0;
1190 have_ptrace_getregset
= 0;
1191 return tdesc_i386_mmx_linux
;
1194 have_ptrace_getfpxregs
= 1;
1200 x86_xcr0
= X86_XSTATE_SSE_MASK
;
1202 /* Don't use XML. */
1204 if (machine
== EM_X86_64
)
1205 return tdesc_amd64_linux_no_xml
;
1208 return tdesc_i386_linux_no_xml
;
1211 if (have_ptrace_getregset
== -1)
1213 uint64_t xstateregs
[(X86_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1216 iov
.iov_base
= xstateregs
;
1217 iov
.iov_len
= sizeof (xstateregs
);
1219 /* Check if PTRACE_GETREGSET works. */
1220 if (ptrace (PTRACE_GETREGSET
, tid
,
1221 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1222 have_ptrace_getregset
= 0;
1225 have_ptrace_getregset
= 1;
1227 /* Get XCR0 from XSAVE extended state. */
1228 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1229 / sizeof (uint64_t))];
1231 /* Use PTRACE_GETREGSET if it is available. */
1232 for (regset
= x86_regsets
;
1233 regset
->fill_function
!= NULL
; regset
++)
1234 if (regset
->get_request
== PTRACE_GETREGSET
)
1235 regset
->size
= X86_XSTATE_SIZE (xcr0
);
1236 else if (regset
->type
!= GENERAL_REGS
)
1241 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1242 xcr0_features
= (have_ptrace_getregset
1243 && (xcr0
& X86_XSTATE_ALL_MASK
));
1248 if (machine
== EM_X86_64
)
1255 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1257 case X86_XSTATE_AVX512_MASK
:
1258 return tdesc_amd64_avx512_linux
;
1260 case X86_XSTATE_MPX_MASK
:
1261 return tdesc_amd64_mpx_linux
;
1263 case X86_XSTATE_AVX_MASK
:
1264 return tdesc_amd64_avx_linux
;
1267 return tdesc_amd64_linux
;
1271 return tdesc_amd64_linux
;
1277 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1279 case X86_XSTATE_AVX512_MASK
:
1280 return tdesc_x32_avx512_linux
;
1282 case X86_XSTATE_MPX_MASK
: /* No MPX on x32. */
1283 case X86_XSTATE_AVX_MASK
:
1284 return tdesc_x32_avx_linux
;
1287 return tdesc_x32_linux
;
1291 return tdesc_x32_linux
;
1299 switch (xcr0
& X86_XSTATE_ALL_MASK
)
1301 case (X86_XSTATE_AVX512_MASK
):
1302 return tdesc_i386_avx512_linux
;
1304 case (X86_XSTATE_MPX_MASK
):
1305 return tdesc_i386_mpx_linux
;
1307 case (X86_XSTATE_AVX_MASK
):
1308 return tdesc_i386_avx_linux
;
1311 return tdesc_i386_linux
;
1315 return tdesc_i386_linux
;
1318 gdb_assert_not_reached ("failed to return tdesc");
1321 /* Callback for find_inferior. Stops iteration when a thread with a
1322 given PID is found. */
1325 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1327 int pid
= *(int *) data
;
1329 return (ptid_get_pid (entry
->id
) == pid
);
1332 /* Callback for for_each_inferior. Calls the arch_setup routine for
1336 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1338 int pid
= ptid_get_pid (entry
->id
);
1340 /* Look up any thread of this processes. */
1342 = (struct thread_info
*) find_inferior (&all_threads
,
1343 same_process_callback
, &pid
);
1345 the_low_target
.arch_setup ();
1348 /* Update all the target description of all processes; a new GDB
1349 connected, and it may or not support xml target descriptions. */
1352 x86_linux_update_xmltarget (void)
1354 struct thread_info
*saved_thread
= current_thread
;
1356 /* Before changing the register cache's internal layout, flush the
1357 contents of the current valid caches back to the threads, and
1358 release the current regcache objects. */
1359 regcache_release ();
1361 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1363 current_thread
= saved_thread
;
1366 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1367 PTRACE_GETREGSET. */
1370 x86_linux_process_qsupported (const char *query
)
1372 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1373 with "i386" in qSupported query, it supports x86 XML target
1376 if (query
!= NULL
&& startswith (query
, "xmlRegisters="))
1378 char *copy
= xstrdup (query
+ 13);
1381 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1383 if (strcmp (p
, "i386") == 0)
1393 x86_linux_update_xmltarget ();
1396 /* Common for x86/x86-64. */
1398 static struct regsets_info x86_regsets_info
=
1400 x86_regsets
, /* regsets */
1401 0, /* num_regsets */
1402 NULL
, /* disabled_regsets */
1406 static struct regs_info amd64_linux_regs_info
=
1408 NULL
, /* regset_bitmap */
1409 NULL
, /* usrregs_info */
1413 static struct usrregs_info i386_linux_usrregs_info
=
1419 static struct regs_info i386_linux_regs_info
=
1421 NULL
, /* regset_bitmap */
1422 &i386_linux_usrregs_info
,
1426 const struct regs_info
*
1427 x86_linux_regs_info (void)
1430 if (is_64bit_tdesc ())
1431 return &amd64_linux_regs_info
;
1434 return &i386_linux_regs_info
;
1437 /* Initialize the target description for the architecture of the
1441 x86_arch_setup (void)
1443 current_process ()->tdesc
= x86_linux_read_description ();
1447 x86_supports_tracepoints (void)
1453 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1455 write_inferior_memory (*to
, buf
, len
);
1460 push_opcode (unsigned char *buf
, char *op
)
1462 unsigned char *buf_org
= buf
;
1467 unsigned long ul
= strtoul (op
, &endptr
, 16);
1476 return buf
- buf_org
;
1481 /* Build a jump pad that saves registers and calls a collection
1482 function. Writes a jump instruction to the jump pad to
1483 JJUMPAD_INSN. The caller is responsible to write it in at the
1484 tracepoint address. */
1487 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1488 CORE_ADDR collector
,
1491 CORE_ADDR
*jump_entry
,
1492 CORE_ADDR
*trampoline
,
1493 ULONGEST
*trampoline_size
,
1494 unsigned char *jjump_pad_insn
,
1495 ULONGEST
*jjump_pad_insn_size
,
1496 CORE_ADDR
*adjusted_insn_addr
,
1497 CORE_ADDR
*adjusted_insn_addr_end
,
1500 unsigned char buf
[40];
1504 CORE_ADDR buildaddr
= *jump_entry
;
1506 /* Build the jump pad. */
1508 /* First, do tracepoint data collection. Save registers. */
1510 /* Need to ensure stack pointer saved first. */
1511 buf
[i
++] = 0x54; /* push %rsp */
1512 buf
[i
++] = 0x55; /* push %rbp */
1513 buf
[i
++] = 0x57; /* push %rdi */
1514 buf
[i
++] = 0x56; /* push %rsi */
1515 buf
[i
++] = 0x52; /* push %rdx */
1516 buf
[i
++] = 0x51; /* push %rcx */
1517 buf
[i
++] = 0x53; /* push %rbx */
1518 buf
[i
++] = 0x50; /* push %rax */
1519 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1520 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1521 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1522 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1523 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1524 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1525 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1526 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1527 buf
[i
++] = 0x9c; /* pushfq */
1528 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1530 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1531 i
+= sizeof (unsigned long);
1532 buf
[i
++] = 0x57; /* push %rdi */
1533 append_insns (&buildaddr
, i
, buf
);
1535 /* Stack space for the collecting_t object. */
1537 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1538 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1539 memcpy (buf
+ i
, &tpoint
, 8);
1541 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1542 i
+= push_opcode (&buf
[i
],
1543 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1544 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1545 append_insns (&buildaddr
, i
, buf
);
1549 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1550 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1552 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1553 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1554 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1555 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1556 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1557 append_insns (&buildaddr
, i
, buf
);
1559 /* Set up the gdb_collect call. */
1560 /* At this point, (stack pointer + 0x18) is the base of our saved
1564 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1565 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1567 /* tpoint address may be 64-bit wide. */
1568 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1569 memcpy (buf
+ i
, &tpoint
, 8);
1571 append_insns (&buildaddr
, i
, buf
);
1573 /* The collector function being in the shared library, may be
1574 >31-bits away off the jump pad. */
1576 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1577 memcpy (buf
+ i
, &collector
, 8);
1579 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1580 append_insns (&buildaddr
, i
, buf
);
1582 /* Clear the spin-lock. */
1584 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1585 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1586 memcpy (buf
+ i
, &lockaddr
, 8);
1588 append_insns (&buildaddr
, i
, buf
);
1590 /* Remove stack that had been used for the collect_t object. */
1592 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1593 append_insns (&buildaddr
, i
, buf
);
1595 /* Restore register state. */
1597 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1601 buf
[i
++] = 0x9d; /* popfq */
1602 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1603 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1604 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1605 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1606 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1607 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1608 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1609 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1610 buf
[i
++] = 0x58; /* pop %rax */
1611 buf
[i
++] = 0x5b; /* pop %rbx */
1612 buf
[i
++] = 0x59; /* pop %rcx */
1613 buf
[i
++] = 0x5a; /* pop %rdx */
1614 buf
[i
++] = 0x5e; /* pop %rsi */
1615 buf
[i
++] = 0x5f; /* pop %rdi */
1616 buf
[i
++] = 0x5d; /* pop %rbp */
1617 buf
[i
++] = 0x5c; /* pop %rsp */
1618 append_insns (&buildaddr
, i
, buf
);
1620 /* Now, adjust the original instruction to execute in the jump
1622 *adjusted_insn_addr
= buildaddr
;
1623 relocate_instruction (&buildaddr
, tpaddr
);
1624 *adjusted_insn_addr_end
= buildaddr
;
1626 /* Finally, write a jump back to the program. */
1628 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1629 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1632 "E.Jump back from jump pad too far from tracepoint "
1633 "(offset 0x%" PRIx64
" > int32).", loffset
);
1637 offset
= (int) loffset
;
1638 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1639 memcpy (buf
+ 1, &offset
, 4);
1640 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1642 /* The jump pad is now built. Wire in a jump to our jump pad. This
1643 is always done last (by our caller actually), so that we can
1644 install fast tracepoints with threads running. This relies on
1645 the agent's atomic write support. */
1646 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1647 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1650 "E.Jump pad too far from tracepoint "
1651 "(offset 0x%" PRIx64
" > int32).", loffset
);
1655 offset
= (int) loffset
;
1657 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1658 memcpy (buf
+ 1, &offset
, 4);
1659 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1660 *jjump_pad_insn_size
= sizeof (jump_insn
);
1662 /* Return the end address of our pad. */
1663 *jump_entry
= buildaddr
;
1668 #endif /* __x86_64__ */
1670 /* Build a jump pad that saves registers and calls a collection
1671 function. Writes a jump instruction to the jump pad to
1672 JJUMPAD_INSN. The caller is responsible to write it in at the
1673 tracepoint address. */
1676 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1677 CORE_ADDR collector
,
1680 CORE_ADDR
*jump_entry
,
1681 CORE_ADDR
*trampoline
,
1682 ULONGEST
*trampoline_size
,
1683 unsigned char *jjump_pad_insn
,
1684 ULONGEST
*jjump_pad_insn_size
,
1685 CORE_ADDR
*adjusted_insn_addr
,
1686 CORE_ADDR
*adjusted_insn_addr_end
,
1689 unsigned char buf
[0x100];
1691 CORE_ADDR buildaddr
= *jump_entry
;
1693 /* Build the jump pad. */
1695 /* First, do tracepoint data collection. Save registers. */
1697 buf
[i
++] = 0x60; /* pushad */
1698 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1699 *((int *)(buf
+ i
)) = (int) tpaddr
;
1701 buf
[i
++] = 0x9c; /* pushf */
1702 buf
[i
++] = 0x1e; /* push %ds */
1703 buf
[i
++] = 0x06; /* push %es */
1704 buf
[i
++] = 0x0f; /* push %fs */
1706 buf
[i
++] = 0x0f; /* push %gs */
1708 buf
[i
++] = 0x16; /* push %ss */
1709 buf
[i
++] = 0x0e; /* push %cs */
1710 append_insns (&buildaddr
, i
, buf
);
1712 /* Stack space for the collecting_t object. */
1714 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1716 /* Build the object. */
1717 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1718 memcpy (buf
+ i
, &tpoint
, 4);
1720 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1722 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1723 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1724 append_insns (&buildaddr
, i
, buf
);
1726 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1727 If we cared for it, this could be using xchg alternatively. */
1730 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1731 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1733 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1735 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1736 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1737 append_insns (&buildaddr
, i
, buf
);
1740 /* Set up arguments to the gdb_collect call. */
1742 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1743 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1744 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1745 append_insns (&buildaddr
, i
, buf
);
1748 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1749 append_insns (&buildaddr
, i
, buf
);
1752 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1753 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1755 append_insns (&buildaddr
, i
, buf
);
1757 buf
[0] = 0xe8; /* call <reladdr> */
1758 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1759 memcpy (buf
+ 1, &offset
, 4);
1760 append_insns (&buildaddr
, 5, buf
);
1761 /* Clean up after the call. */
1762 buf
[0] = 0x83; /* add $0x8,%esp */
1765 append_insns (&buildaddr
, 3, buf
);
1768 /* Clear the spin-lock. This would need the LOCK prefix on older
1771 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1772 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1773 memcpy (buf
+ i
, &lockaddr
, 4);
1775 append_insns (&buildaddr
, i
, buf
);
1778 /* Remove stack that had been used for the collect_t object. */
1780 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1781 append_insns (&buildaddr
, i
, buf
);
1784 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1787 buf
[i
++] = 0x17; /* pop %ss */
1788 buf
[i
++] = 0x0f; /* pop %gs */
1790 buf
[i
++] = 0x0f; /* pop %fs */
1792 buf
[i
++] = 0x07; /* pop %es */
1793 buf
[i
++] = 0x1f; /* pop %ds */
1794 buf
[i
++] = 0x9d; /* popf */
1795 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1798 buf
[i
++] = 0x61; /* popad */
1799 append_insns (&buildaddr
, i
, buf
);
1801 /* Now, adjust the original instruction to execute in the jump
1803 *adjusted_insn_addr
= buildaddr
;
1804 relocate_instruction (&buildaddr
, tpaddr
);
1805 *adjusted_insn_addr_end
= buildaddr
;
1807 /* Write the jump back to the program. */
1808 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1809 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1810 memcpy (buf
+ 1, &offset
, 4);
1811 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1813 /* The jump pad is now built. Wire in a jump to our jump pad. This
1814 is always done last (by our caller actually), so that we can
1815 install fast tracepoints with threads running. This relies on
1816 the agent's atomic write support. */
1819 /* Create a trampoline. */
1820 *trampoline_size
= sizeof (jump_insn
);
1821 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1823 /* No trampoline space available. */
1825 "E.Cannot allocate trampoline space needed for fast "
1826 "tracepoints on 4-byte instructions.");
1830 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1831 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1832 memcpy (buf
+ 1, &offset
, 4);
1833 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1835 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1836 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1837 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1838 memcpy (buf
+ 2, &offset
, 2);
1839 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1840 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1844 /* Else use a 32-bit relative jump instruction. */
1845 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1846 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1847 memcpy (buf
+ 1, &offset
, 4);
1848 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1849 *jjump_pad_insn_size
= sizeof (jump_insn
);
1852 /* Return the end address of our pad. */
1853 *jump_entry
= buildaddr
;
1859 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1860 CORE_ADDR collector
,
1863 CORE_ADDR
*jump_entry
,
1864 CORE_ADDR
*trampoline
,
1865 ULONGEST
*trampoline_size
,
1866 unsigned char *jjump_pad_insn
,
1867 ULONGEST
*jjump_pad_insn_size
,
1868 CORE_ADDR
*adjusted_insn_addr
,
1869 CORE_ADDR
*adjusted_insn_addr_end
,
1873 if (is_64bit_tdesc ())
1874 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1875 collector
, lockaddr
,
1876 orig_size
, jump_entry
,
1877 trampoline
, trampoline_size
,
1879 jjump_pad_insn_size
,
1881 adjusted_insn_addr_end
,
1885 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1886 collector
, lockaddr
,
1887 orig_size
, jump_entry
,
1888 trampoline
, trampoline_size
,
1890 jjump_pad_insn_size
,
1892 adjusted_insn_addr_end
,
1896 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1900 x86_get_min_fast_tracepoint_insn_len (void)
1902 static int warned_about_fast_tracepoints
= 0;
1905 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1906 used for fast tracepoints. */
1907 if (is_64bit_tdesc ())
1911 if (agent_loaded_p ())
1913 char errbuf
[IPA_BUFSIZ
];
1917 /* On x86, if trampolines are available, then 4-byte jump instructions
1918 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1919 with a 4-byte offset are used instead. */
1920 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1924 /* GDB has no channel to explain to user why a shorter fast
1925 tracepoint is not possible, but at least make GDBserver
1926 mention that something has gone awry. */
1927 if (!warned_about_fast_tracepoints
)
1929 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1930 warned_about_fast_tracepoints
= 1;
1937 /* Indicate that the minimum length is currently unknown since the IPA
1938 has not loaded yet. */
1944 add_insns (unsigned char *start
, int len
)
1946 CORE_ADDR buildaddr
= current_insn_ptr
;
1949 debug_printf ("Adding %d bytes of insn at %s\n",
1950 len
, paddress (buildaddr
));
1952 append_insns (&buildaddr
, len
, start
);
1953 current_insn_ptr
= buildaddr
;
1956 /* Our general strategy for emitting code is to avoid specifying raw
1957 bytes whenever possible, and instead copy a block of inline asm
1958 that is embedded in the function. This is a little messy, because
1959 we need to keep the compiler from discarding what looks like dead
1960 code, plus suppress various warnings. */
1962 #define EMIT_ASM(NAME, INSNS) \
1965 extern unsigned char start_ ## NAME, end_ ## NAME; \
1966 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1967 __asm__ ("jmp end_" #NAME "\n" \
1968 "\t" "start_" #NAME ":" \
1970 "\t" "end_" #NAME ":"); \
1975 #define EMIT_ASM32(NAME,INSNS) \
1978 extern unsigned char start_ ## NAME, end_ ## NAME; \
1979 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1980 __asm__ (".code32\n" \
1981 "\t" "jmp end_" #NAME "\n" \
1982 "\t" "start_" #NAME ":\n" \
1984 "\t" "end_" #NAME ":\n" \
1990 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1997 amd64_emit_prologue (void)
1999 EMIT_ASM (amd64_prologue
,
2001 "movq %rsp,%rbp\n\t"
2002 "sub $0x20,%rsp\n\t"
2003 "movq %rdi,-8(%rbp)\n\t"
2004 "movq %rsi,-16(%rbp)");
2009 amd64_emit_epilogue (void)
2011 EMIT_ASM (amd64_epilogue
,
2012 "movq -16(%rbp),%rdi\n\t"
2013 "movq %rax,(%rdi)\n\t"
2020 amd64_emit_add (void)
2022 EMIT_ASM (amd64_add
,
2023 "add (%rsp),%rax\n\t"
2024 "lea 0x8(%rsp),%rsp");
2028 amd64_emit_sub (void)
2030 EMIT_ASM (amd64_sub
,
2031 "sub %rax,(%rsp)\n\t"
2036 amd64_emit_mul (void)
2042 amd64_emit_lsh (void)
2048 amd64_emit_rsh_signed (void)
2054 amd64_emit_rsh_unsigned (void)
2060 amd64_emit_ext (int arg
)
2065 EMIT_ASM (amd64_ext_8
,
2071 EMIT_ASM (amd64_ext_16
,
2076 EMIT_ASM (amd64_ext_32
,
2085 amd64_emit_log_not (void)
2087 EMIT_ASM (amd64_log_not
,
2088 "test %rax,%rax\n\t"
2094 amd64_emit_bit_and (void)
2096 EMIT_ASM (amd64_and
,
2097 "and (%rsp),%rax\n\t"
2098 "lea 0x8(%rsp),%rsp");
2102 amd64_emit_bit_or (void)
2105 "or (%rsp),%rax\n\t"
2106 "lea 0x8(%rsp),%rsp");
2110 amd64_emit_bit_xor (void)
2112 EMIT_ASM (amd64_xor
,
2113 "xor (%rsp),%rax\n\t"
2114 "lea 0x8(%rsp),%rsp");
2118 amd64_emit_bit_not (void)
2120 EMIT_ASM (amd64_bit_not
,
2121 "xorq $0xffffffffffffffff,%rax");
2125 amd64_emit_equal (void)
2127 EMIT_ASM (amd64_equal
,
2128 "cmp %rax,(%rsp)\n\t"
2129 "je .Lamd64_equal_true\n\t"
2131 "jmp .Lamd64_equal_end\n\t"
2132 ".Lamd64_equal_true:\n\t"
2134 ".Lamd64_equal_end:\n\t"
2135 "lea 0x8(%rsp),%rsp");
2139 amd64_emit_less_signed (void)
2141 EMIT_ASM (amd64_less_signed
,
2142 "cmp %rax,(%rsp)\n\t"
2143 "jl .Lamd64_less_signed_true\n\t"
2145 "jmp .Lamd64_less_signed_end\n\t"
2146 ".Lamd64_less_signed_true:\n\t"
2148 ".Lamd64_less_signed_end:\n\t"
2149 "lea 0x8(%rsp),%rsp");
2153 amd64_emit_less_unsigned (void)
2155 EMIT_ASM (amd64_less_unsigned
,
2156 "cmp %rax,(%rsp)\n\t"
2157 "jb .Lamd64_less_unsigned_true\n\t"
2159 "jmp .Lamd64_less_unsigned_end\n\t"
2160 ".Lamd64_less_unsigned_true:\n\t"
2162 ".Lamd64_less_unsigned_end:\n\t"
2163 "lea 0x8(%rsp),%rsp");
2167 amd64_emit_ref (int size
)
2172 EMIT_ASM (amd64_ref1
,
2176 EMIT_ASM (amd64_ref2
,
2180 EMIT_ASM (amd64_ref4
,
2181 "movl (%rax),%eax");
2184 EMIT_ASM (amd64_ref8
,
2185 "movq (%rax),%rax");
2191 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2193 EMIT_ASM (amd64_if_goto
,
2197 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2205 amd64_emit_goto (int *offset_p
, int *size_p
)
2207 EMIT_ASM (amd64_goto
,
2208 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2216 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2218 int diff
= (to
- (from
+ size
));
2219 unsigned char buf
[sizeof (int)];
2227 memcpy (buf
, &diff
, sizeof (int));
2228 write_inferior_memory (from
, buf
, sizeof (int));
2232 amd64_emit_const (LONGEST num
)
2234 unsigned char buf
[16];
2236 CORE_ADDR buildaddr
= current_insn_ptr
;
2239 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2240 memcpy (&buf
[i
], &num
, sizeof (num
));
2242 append_insns (&buildaddr
, i
, buf
);
2243 current_insn_ptr
= buildaddr
;
2247 amd64_emit_call (CORE_ADDR fn
)
2249 unsigned char buf
[16];
2251 CORE_ADDR buildaddr
;
2254 /* The destination function being in the shared library, may be
2255 >31-bits away off the compiled code pad. */
2257 buildaddr
= current_insn_ptr
;
2259 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2263 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2265 /* Offset is too large for a call. Use callq, but that requires
2266 a register, so avoid it if possible. Use r10, since it is
2267 call-clobbered, we don't have to push/pop it. */
2268 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2270 memcpy (buf
+ i
, &fn
, 8);
2272 buf
[i
++] = 0xff; /* callq *%r10 */
2277 int offset32
= offset64
; /* we know we can't overflow here. */
2278 memcpy (buf
+ i
, &offset32
, 4);
2282 append_insns (&buildaddr
, i
, buf
);
2283 current_insn_ptr
= buildaddr
;
2287 amd64_emit_reg (int reg
)
2289 unsigned char buf
[16];
2291 CORE_ADDR buildaddr
;
2293 /* Assume raw_regs is still in %rdi. */
2294 buildaddr
= current_insn_ptr
;
2296 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2297 memcpy (&buf
[i
], ®
, sizeof (reg
));
2299 append_insns (&buildaddr
, i
, buf
);
2300 current_insn_ptr
= buildaddr
;
2301 amd64_emit_call (get_raw_reg_func_addr ());
2305 amd64_emit_pop (void)
2307 EMIT_ASM (amd64_pop
,
2312 amd64_emit_stack_flush (void)
2314 EMIT_ASM (amd64_stack_flush
,
2319 amd64_emit_zero_ext (int arg
)
2324 EMIT_ASM (amd64_zero_ext_8
,
2328 EMIT_ASM (amd64_zero_ext_16
,
2329 "and $0xffff,%rax");
2332 EMIT_ASM (amd64_zero_ext_32
,
2333 "mov $0xffffffff,%rcx\n\t"
2342 amd64_emit_swap (void)
2344 EMIT_ASM (amd64_swap
,
2351 amd64_emit_stack_adjust (int n
)
2353 unsigned char buf
[16];
2355 CORE_ADDR buildaddr
= current_insn_ptr
;
2358 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2362 /* This only handles adjustments up to 16, but we don't expect any more. */
2364 append_insns (&buildaddr
, i
, buf
);
2365 current_insn_ptr
= buildaddr
;
2368 /* FN's prototype is `LONGEST(*fn)(int)'. */
2371 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2373 unsigned char buf
[16];
2375 CORE_ADDR buildaddr
;
2377 buildaddr
= current_insn_ptr
;
2379 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2380 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2382 append_insns (&buildaddr
, i
, buf
);
2383 current_insn_ptr
= buildaddr
;
2384 amd64_emit_call (fn
);
2387 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2390 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2392 unsigned char buf
[16];
2394 CORE_ADDR buildaddr
;
2396 buildaddr
= current_insn_ptr
;
2398 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2399 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2401 append_insns (&buildaddr
, i
, buf
);
2402 current_insn_ptr
= buildaddr
;
2403 EMIT_ASM (amd64_void_call_2_a
,
2404 /* Save away a copy of the stack top. */
2406 /* Also pass top as the second argument. */
2408 amd64_emit_call (fn
);
2409 EMIT_ASM (amd64_void_call_2_b
,
2410 /* Restore the stack top, %rax may have been trashed. */
2415 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2418 "cmp %rax,(%rsp)\n\t"
2419 "jne .Lamd64_eq_fallthru\n\t"
2420 "lea 0x8(%rsp),%rsp\n\t"
2422 /* jmp, but don't trust the assembler to choose the right jump */
2423 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2424 ".Lamd64_eq_fallthru:\n\t"
2425 "lea 0x8(%rsp),%rsp\n\t"
2435 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2438 "cmp %rax,(%rsp)\n\t"
2439 "je .Lamd64_ne_fallthru\n\t"
2440 "lea 0x8(%rsp),%rsp\n\t"
2442 /* jmp, but don't trust the assembler to choose the right jump */
2443 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2444 ".Lamd64_ne_fallthru:\n\t"
2445 "lea 0x8(%rsp),%rsp\n\t"
2455 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2458 "cmp %rax,(%rsp)\n\t"
2459 "jnl .Lamd64_lt_fallthru\n\t"
2460 "lea 0x8(%rsp),%rsp\n\t"
2462 /* jmp, but don't trust the assembler to choose the right jump */
2463 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2464 ".Lamd64_lt_fallthru:\n\t"
2465 "lea 0x8(%rsp),%rsp\n\t"
2475 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2478 "cmp %rax,(%rsp)\n\t"
2479 "jnle .Lamd64_le_fallthru\n\t"
2480 "lea 0x8(%rsp),%rsp\n\t"
2482 /* jmp, but don't trust the assembler to choose the right jump */
2483 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2484 ".Lamd64_le_fallthru:\n\t"
2485 "lea 0x8(%rsp),%rsp\n\t"
2495 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2498 "cmp %rax,(%rsp)\n\t"
2499 "jng .Lamd64_gt_fallthru\n\t"
2500 "lea 0x8(%rsp),%rsp\n\t"
2502 /* jmp, but don't trust the assembler to choose the right jump */
2503 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2504 ".Lamd64_gt_fallthru:\n\t"
2505 "lea 0x8(%rsp),%rsp\n\t"
2515 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2518 "cmp %rax,(%rsp)\n\t"
2519 "jnge .Lamd64_ge_fallthru\n\t"
2520 ".Lamd64_ge_jump:\n\t"
2521 "lea 0x8(%rsp),%rsp\n\t"
2523 /* jmp, but don't trust the assembler to choose the right jump */
2524 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2525 ".Lamd64_ge_fallthru:\n\t"
2526 "lea 0x8(%rsp),%rsp\n\t"
2535 struct emit_ops amd64_emit_ops
=
2537 amd64_emit_prologue
,
2538 amd64_emit_epilogue
,
2543 amd64_emit_rsh_signed
,
2544 amd64_emit_rsh_unsigned
,
2552 amd64_emit_less_signed
,
2553 amd64_emit_less_unsigned
,
2557 amd64_write_goto_address
,
2562 amd64_emit_stack_flush
,
2563 amd64_emit_zero_ext
,
2565 amd64_emit_stack_adjust
,
2566 amd64_emit_int_call_1
,
2567 amd64_emit_void_call_2
,
2576 #endif /* __x86_64__ */
2579 i386_emit_prologue (void)
2581 EMIT_ASM32 (i386_prologue
,
2585 /* At this point, the raw regs base address is at 8(%ebp), and the
2586 value pointer is at 12(%ebp). */
2590 i386_emit_epilogue (void)
2592 EMIT_ASM32 (i386_epilogue
,
2593 "mov 12(%ebp),%ecx\n\t"
2594 "mov %eax,(%ecx)\n\t"
2595 "mov %ebx,0x4(%ecx)\n\t"
2603 i386_emit_add (void)
2605 EMIT_ASM32 (i386_add
,
2606 "add (%esp),%eax\n\t"
2607 "adc 0x4(%esp),%ebx\n\t"
2608 "lea 0x8(%esp),%esp");
2612 i386_emit_sub (void)
2614 EMIT_ASM32 (i386_sub
,
2615 "subl %eax,(%esp)\n\t"
2616 "sbbl %ebx,4(%esp)\n\t"
2622 i386_emit_mul (void)
2628 i386_emit_lsh (void)
2634 i386_emit_rsh_signed (void)
2640 i386_emit_rsh_unsigned (void)
2646 i386_emit_ext (int arg
)
2651 EMIT_ASM32 (i386_ext_8
,
2654 "movl %eax,%ebx\n\t"
2658 EMIT_ASM32 (i386_ext_16
,
2660 "movl %eax,%ebx\n\t"
2664 EMIT_ASM32 (i386_ext_32
,
2665 "movl %eax,%ebx\n\t"
2674 i386_emit_log_not (void)
2676 EMIT_ASM32 (i386_log_not
,
2678 "test %eax,%eax\n\t"
2685 i386_emit_bit_and (void)
2687 EMIT_ASM32 (i386_and
,
2688 "and (%esp),%eax\n\t"
2689 "and 0x4(%esp),%ebx\n\t"
2690 "lea 0x8(%esp),%esp");
2694 i386_emit_bit_or (void)
2696 EMIT_ASM32 (i386_or
,
2697 "or (%esp),%eax\n\t"
2698 "or 0x4(%esp),%ebx\n\t"
2699 "lea 0x8(%esp),%esp");
2703 i386_emit_bit_xor (void)
2705 EMIT_ASM32 (i386_xor
,
2706 "xor (%esp),%eax\n\t"
2707 "xor 0x4(%esp),%ebx\n\t"
2708 "lea 0x8(%esp),%esp");
2712 i386_emit_bit_not (void)
2714 EMIT_ASM32 (i386_bit_not
,
2715 "xor $0xffffffff,%eax\n\t"
2716 "xor $0xffffffff,%ebx\n\t");
2720 i386_emit_equal (void)
2722 EMIT_ASM32 (i386_equal
,
2723 "cmpl %ebx,4(%esp)\n\t"
2724 "jne .Li386_equal_false\n\t"
2725 "cmpl %eax,(%esp)\n\t"
2726 "je .Li386_equal_true\n\t"
2727 ".Li386_equal_false:\n\t"
2729 "jmp .Li386_equal_end\n\t"
2730 ".Li386_equal_true:\n\t"
2732 ".Li386_equal_end:\n\t"
2734 "lea 0x8(%esp),%esp");
2738 i386_emit_less_signed (void)
2740 EMIT_ASM32 (i386_less_signed
,
2741 "cmpl %ebx,4(%esp)\n\t"
2742 "jl .Li386_less_signed_true\n\t"
2743 "jne .Li386_less_signed_false\n\t"
2744 "cmpl %eax,(%esp)\n\t"
2745 "jl .Li386_less_signed_true\n\t"
2746 ".Li386_less_signed_false:\n\t"
2748 "jmp .Li386_less_signed_end\n\t"
2749 ".Li386_less_signed_true:\n\t"
2751 ".Li386_less_signed_end:\n\t"
2753 "lea 0x8(%esp),%esp");
2757 i386_emit_less_unsigned (void)
2759 EMIT_ASM32 (i386_less_unsigned
,
2760 "cmpl %ebx,4(%esp)\n\t"
2761 "jb .Li386_less_unsigned_true\n\t"
2762 "jne .Li386_less_unsigned_false\n\t"
2763 "cmpl %eax,(%esp)\n\t"
2764 "jb .Li386_less_unsigned_true\n\t"
2765 ".Li386_less_unsigned_false:\n\t"
2767 "jmp .Li386_less_unsigned_end\n\t"
2768 ".Li386_less_unsigned_true:\n\t"
2770 ".Li386_less_unsigned_end:\n\t"
2772 "lea 0x8(%esp),%esp");
2776 i386_emit_ref (int size
)
2781 EMIT_ASM32 (i386_ref1
,
2785 EMIT_ASM32 (i386_ref2
,
2789 EMIT_ASM32 (i386_ref4
,
2790 "movl (%eax),%eax");
2793 EMIT_ASM32 (i386_ref8
,
2794 "movl 4(%eax),%ebx\n\t"
2795 "movl (%eax),%eax");
2801 i386_emit_if_goto (int *offset_p
, int *size_p
)
2803 EMIT_ASM32 (i386_if_goto
,
2809 /* Don't trust the assembler to choose the right jump */
2810 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2813 *offset_p
= 11; /* be sure that this matches the sequence above */
2819 i386_emit_goto (int *offset_p
, int *size_p
)
2821 EMIT_ASM32 (i386_goto
,
2822 /* Don't trust the assembler to choose the right jump */
2823 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2831 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2833 int diff
= (to
- (from
+ size
));
2834 unsigned char buf
[sizeof (int)];
2836 /* We're only doing 4-byte sizes at the moment. */
2843 memcpy (buf
, &diff
, sizeof (int));
2844 write_inferior_memory (from
, buf
, sizeof (int));
2848 i386_emit_const (LONGEST num
)
2850 unsigned char buf
[16];
2852 CORE_ADDR buildaddr
= current_insn_ptr
;
2855 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2856 lo
= num
& 0xffffffff;
2857 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2859 hi
= ((num
>> 32) & 0xffffffff);
2862 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2863 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2868 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2870 append_insns (&buildaddr
, i
, buf
);
2871 current_insn_ptr
= buildaddr
;
2875 i386_emit_call (CORE_ADDR fn
)
2877 unsigned char buf
[16];
2879 CORE_ADDR buildaddr
;
2881 buildaddr
= current_insn_ptr
;
2883 buf
[i
++] = 0xe8; /* call <reladdr> */
2884 offset
= ((int) fn
) - (buildaddr
+ 5);
2885 memcpy (buf
+ 1, &offset
, 4);
2886 append_insns (&buildaddr
, 5, buf
);
2887 current_insn_ptr
= buildaddr
;
2891 i386_emit_reg (int reg
)
2893 unsigned char buf
[16];
2895 CORE_ADDR buildaddr
;
2897 EMIT_ASM32 (i386_reg_a
,
2899 buildaddr
= current_insn_ptr
;
2901 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2902 memcpy (&buf
[i
], ®
, sizeof (reg
));
2904 append_insns (&buildaddr
, i
, buf
);
2905 current_insn_ptr
= buildaddr
;
2906 EMIT_ASM32 (i386_reg_b
,
2907 "mov %eax,4(%esp)\n\t"
2908 "mov 8(%ebp),%eax\n\t"
2910 i386_emit_call (get_raw_reg_func_addr ());
2911 EMIT_ASM32 (i386_reg_c
,
2913 "lea 0x8(%esp),%esp");
2917 i386_emit_pop (void)
2919 EMIT_ASM32 (i386_pop
,
2925 i386_emit_stack_flush (void)
2927 EMIT_ASM32 (i386_stack_flush
,
2933 i386_emit_zero_ext (int arg
)
2938 EMIT_ASM32 (i386_zero_ext_8
,
2939 "and $0xff,%eax\n\t"
2943 EMIT_ASM32 (i386_zero_ext_16
,
2944 "and $0xffff,%eax\n\t"
2948 EMIT_ASM32 (i386_zero_ext_32
,
2957 i386_emit_swap (void)
2959 EMIT_ASM32 (i386_swap
,
2969 i386_emit_stack_adjust (int n
)
2971 unsigned char buf
[16];
2973 CORE_ADDR buildaddr
= current_insn_ptr
;
2976 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2980 append_insns (&buildaddr
, i
, buf
);
2981 current_insn_ptr
= buildaddr
;
2984 /* FN's prototype is `LONGEST(*fn)(int)'. */
2987 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2989 unsigned char buf
[16];
2991 CORE_ADDR buildaddr
;
2993 EMIT_ASM32 (i386_int_call_1_a
,
2994 /* Reserve a bit of stack space. */
2996 /* Put the one argument on the stack. */
2997 buildaddr
= current_insn_ptr
;
2999 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3002 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3004 append_insns (&buildaddr
, i
, buf
);
3005 current_insn_ptr
= buildaddr
;
3006 i386_emit_call (fn
);
3007 EMIT_ASM32 (i386_int_call_1_c
,
3009 "lea 0x8(%esp),%esp");
3012 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3015 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3017 unsigned char buf
[16];
3019 CORE_ADDR buildaddr
;
3021 EMIT_ASM32 (i386_void_call_2_a
,
3022 /* Preserve %eax only; we don't have to worry about %ebx. */
3024 /* Reserve a bit of stack space for arguments. */
3025 "sub $0x10,%esp\n\t"
3026 /* Copy "top" to the second argument position. (Note that
3027 we can't assume function won't scribble on its
3028 arguments, so don't try to restore from this.) */
3029 "mov %eax,4(%esp)\n\t"
3030 "mov %ebx,8(%esp)");
3031 /* Put the first argument on the stack. */
3032 buildaddr
= current_insn_ptr
;
3034 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3037 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3039 append_insns (&buildaddr
, i
, buf
);
3040 current_insn_ptr
= buildaddr
;
3041 i386_emit_call (fn
);
3042 EMIT_ASM32 (i386_void_call_2_b
,
3043 "lea 0x10(%esp),%esp\n\t"
3044 /* Restore original stack top. */
3050 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3053 /* Check low half first, more likely to be decider */
3054 "cmpl %eax,(%esp)\n\t"
3055 "jne .Leq_fallthru\n\t"
3056 "cmpl %ebx,4(%esp)\n\t"
3057 "jne .Leq_fallthru\n\t"
3058 "lea 0x8(%esp),%esp\n\t"
3061 /* jmp, but don't trust the assembler to choose the right jump */
3062 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3063 ".Leq_fallthru:\n\t"
3064 "lea 0x8(%esp),%esp\n\t"
3075 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3078 /* Check low half first, more likely to be decider */
3079 "cmpl %eax,(%esp)\n\t"
3081 "cmpl %ebx,4(%esp)\n\t"
3082 "je .Lne_fallthru\n\t"
3084 "lea 0x8(%esp),%esp\n\t"
3087 /* jmp, but don't trust the assembler to choose the right jump */
3088 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3089 ".Lne_fallthru:\n\t"
3090 "lea 0x8(%esp),%esp\n\t"
3101 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3104 "cmpl %ebx,4(%esp)\n\t"
3106 "jne .Llt_fallthru\n\t"
3107 "cmpl %eax,(%esp)\n\t"
3108 "jnl .Llt_fallthru\n\t"
3110 "lea 0x8(%esp),%esp\n\t"
3113 /* jmp, but don't trust the assembler to choose the right jump */
3114 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3115 ".Llt_fallthru:\n\t"
3116 "lea 0x8(%esp),%esp\n\t"
3127 i386_emit_le_goto (int *offset_p
, int *size_p
)
3130 "cmpl %ebx,4(%esp)\n\t"
3132 "jne .Lle_fallthru\n\t"
3133 "cmpl %eax,(%esp)\n\t"
3134 "jnle .Lle_fallthru\n\t"
3136 "lea 0x8(%esp),%esp\n\t"
3139 /* jmp, but don't trust the assembler to choose the right jump */
3140 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3141 ".Lle_fallthru:\n\t"
3142 "lea 0x8(%esp),%esp\n\t"
3153 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3156 "cmpl %ebx,4(%esp)\n\t"
3158 "jne .Lgt_fallthru\n\t"
3159 "cmpl %eax,(%esp)\n\t"
3160 "jng .Lgt_fallthru\n\t"
3162 "lea 0x8(%esp),%esp\n\t"
3165 /* jmp, but don't trust the assembler to choose the right jump */
3166 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3167 ".Lgt_fallthru:\n\t"
3168 "lea 0x8(%esp),%esp\n\t"
3179 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3182 "cmpl %ebx,4(%esp)\n\t"
3184 "jne .Lge_fallthru\n\t"
3185 "cmpl %eax,(%esp)\n\t"
3186 "jnge .Lge_fallthru\n\t"
3188 "lea 0x8(%esp),%esp\n\t"
3191 /* jmp, but don't trust the assembler to choose the right jump */
3192 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3193 ".Lge_fallthru:\n\t"
3194 "lea 0x8(%esp),%esp\n\t"
3204 struct emit_ops i386_emit_ops
=
3212 i386_emit_rsh_signed
,
3213 i386_emit_rsh_unsigned
,
3221 i386_emit_less_signed
,
3222 i386_emit_less_unsigned
,
3226 i386_write_goto_address
,
3231 i386_emit_stack_flush
,
3234 i386_emit_stack_adjust
,
3235 i386_emit_int_call_1
,
3236 i386_emit_void_call_2
,
3246 static struct emit_ops
*
3250 if (is_64bit_tdesc ())
3251 return &amd64_emit_ops
;
3254 return &i386_emit_ops
;
3258 x86_supports_range_stepping (void)
3263 /* This is initialized assuming an amd64 target.
3264 x86_arch_setup will correct it for i386 or amd64 targets. */
3266 struct linux_target_ops the_low_target
=
3269 x86_linux_regs_info
,
3270 x86_cannot_fetch_register
,
3271 x86_cannot_store_register
,
3272 NULL
, /* fetch_register */
3280 x86_supports_z_point_type
,
3283 x86_stopped_by_watchpoint
,
3284 x86_stopped_data_address
,
3285 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3286 native i386 case (no registers smaller than an xfer unit), and are not
3287 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3290 /* need to fix up i386 siginfo if host is amd64 */
3292 x86_linux_new_process
,
3293 x86_linux_new_thread
,
3295 x86_linux_prepare_to_resume
,
3296 x86_linux_process_qsupported
,
3297 x86_supports_tracepoints
,
3298 x86_get_thread_area
,
3299 x86_install_fast_tracepoint_jump_pad
,
3301 x86_get_min_fast_tracepoint_insn_len
,
3302 x86_supports_range_stepping
,
3306 initialize_low_arch (void)
3308 /* Initialize the Linux target descriptions. */
3310 init_registers_amd64_linux ();
3311 init_registers_amd64_avx_linux ();
3312 init_registers_amd64_avx512_linux ();
3313 init_registers_amd64_mpx_linux ();
3315 init_registers_x32_linux ();
3316 init_registers_x32_avx_linux ();
3317 init_registers_x32_avx512_linux ();
3319 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3320 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3321 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3323 init_registers_i386_linux ();
3324 init_registers_i386_mmx_linux ();
3325 init_registers_i386_avx_linux ();
3326 init_registers_i386_avx512_linux ();
3327 init_registers_i386_mpx_linux ();
3329 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3330 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3331 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3333 initialize_regsets_info (&x86_regsets_info
);