1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2014 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "i386-xstate.h"
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
33 #include "elf/common.h"
38 #include "tracepoint.h"
42 /* Defined in auto-generated file amd64-linux.c. */
43 void init_registers_amd64_linux (void);
44 extern const struct target_desc
*tdesc_amd64_linux
;
46 /* Defined in auto-generated file amd64-avx-linux.c. */
47 void init_registers_amd64_avx_linux (void);
48 extern const struct target_desc
*tdesc_amd64_avx_linux
;
50 /* Defined in auto-generated file amd64-avx512-linux.c. */
51 void init_registers_amd64_avx512_linux (void);
52 extern const struct target_desc
*tdesc_amd64_avx512_linux
;
54 /* Defined in auto-generated file amd64-mpx-linux.c. */
55 void init_registers_amd64_mpx_linux (void);
56 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
58 /* Defined in auto-generated file x32-linux.c. */
59 void init_registers_x32_linux (void);
60 extern const struct target_desc
*tdesc_x32_linux
;
62 /* Defined in auto-generated file x32-avx-linux.c. */
63 void init_registers_x32_avx_linux (void);
64 extern const struct target_desc
*tdesc_x32_avx_linux
;
66 /* Defined in auto-generated file x32-avx512-linux.c. */
67 void init_registers_x32_avx512_linux (void);
68 extern const struct target_desc
*tdesc_x32_avx512_linux
;
72 /* Defined in auto-generated file i386-linux.c. */
73 void init_registers_i386_linux (void);
74 extern const struct target_desc
*tdesc_i386_linux
;
76 /* Defined in auto-generated file i386-mmx-linux.c. */
77 void init_registers_i386_mmx_linux (void);
78 extern const struct target_desc
*tdesc_i386_mmx_linux
;
80 /* Defined in auto-generated file i386-avx-linux.c. */
81 void init_registers_i386_avx_linux (void);
82 extern const struct target_desc
*tdesc_i386_avx_linux
;
84 /* Defined in auto-generated file i386-avx512-linux.c. */
85 void init_registers_i386_avx512_linux (void);
86 extern const struct target_desc
*tdesc_i386_avx512_linux
;
88 /* Defined in auto-generated file i386-mpx-linux.c. */
89 void init_registers_i386_mpx_linux (void);
90 extern const struct target_desc
*tdesc_i386_mpx_linux
;
93 static struct target_desc
*tdesc_amd64_linux_no_xml
;
95 static struct target_desc
*tdesc_i386_linux_no_xml
;
98 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
99 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
101 /* Backward compatibility for gdb without XML support. */
103 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
104 <architecture>i386</architecture>\
105 <osabi>GNU/Linux</osabi>\
109 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
110 <architecture>i386:x86-64</architecture>\
111 <osabi>GNU/Linux</osabi>\
116 #include <sys/procfs.h>
117 #include <sys/ptrace.h>
120 #ifndef PTRACE_GETREGSET
121 #define PTRACE_GETREGSET 0x4204
124 #ifndef PTRACE_SETREGSET
125 #define PTRACE_SETREGSET 0x4205
129 #ifndef PTRACE_GET_THREAD_AREA
130 #define PTRACE_GET_THREAD_AREA 25
133 /* This definition comes from prctl.h, but some kernels may not have it. */
134 #ifndef PTRACE_ARCH_PRCTL
135 #define PTRACE_ARCH_PRCTL 30
138 /* The following definitions come from prctl.h, but may be absent
139 for certain configurations. */
141 #define ARCH_SET_GS 0x1001
142 #define ARCH_SET_FS 0x1002
143 #define ARCH_GET_FS 0x1003
144 #define ARCH_GET_GS 0x1004
147 /* Per-process arch-specific data we want to keep. */
149 struct arch_process_info
151 struct i386_debug_reg_state debug_reg_state
;
154 /* Per-thread arch-specific data we want to keep. */
158 /* Non-zero if our copy differs from what's recorded in the thread. */
159 int debug_registers_changed
;
164 /* Mapping between the general-purpose registers in `struct user'
165 format and GDB's register array layout.
166 Note that the transfer layout uses 64-bit regs. */
167 static /*const*/ int i386_regmap
[] =
169 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
170 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
171 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
172 DS
* 8, ES
* 8, FS
* 8, GS
* 8
175 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
177 /* So code below doesn't have to care, i386 or amd64. */
178 #define ORIG_EAX ORIG_RAX
180 static const int x86_64_regmap
[] =
182 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
183 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
184 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
185 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
186 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
187 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
188 -1, -1, -1, -1, -1, -1, -1, -1,
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
192 -1, -1, -1, -1, -1, -1, -1, -1,
194 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
195 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
196 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
197 -1, -1, -1, -1, -1, -1, -1, -1,
198 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
201 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
202 -1, -1, -1, -1, -1, -1, -1, -1,
203 -1, -1, -1, -1, -1, -1, -1, -1,
204 -1, -1, -1, -1, -1, -1, -1, -1
207 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
208 #define X86_64_USER_REGS (GS + 1)
210 #else /* ! __x86_64__ */
212 /* Mapping between the general-purpose registers in `struct user'
213 format and GDB's register array layout. */
214 static /*const*/ int i386_regmap
[] =
216 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
217 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
218 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
219 DS
* 4, ES
* 4, FS
* 4, GS
* 4
222 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
228 /* Returns true if the current inferior belongs to a x86-64 process,
232 is_64bit_tdesc (void)
234 struct regcache
*regcache
= get_thread_regcache (current_inferior
, 0);
236 return register_size (regcache
->tdesc
, 0) == 8;
242 /* Called by libthread_db. */
245 ps_get_thread_area (const struct ps_prochandle
*ph
,
246 lwpid_t lwpid
, int idx
, void **base
)
249 int use_64bit
= is_64bit_tdesc ();
256 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
260 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
271 unsigned int desc
[4];
273 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
274 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
277 /* Ensure we properly extend the value to 64-bits for x86_64. */
278 *base
= (void *) (uintptr_t) desc
[1];
283 /* Get the thread area address. This is used to recognize which
284 thread is which when tracing with the in-process agent library. We
285 don't read anything from the address, and treat it as opaque; it's
286 the address itself that we assume is unique per-thread. */
289 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
292 int use_64bit
= is_64bit_tdesc ();
297 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
299 *addr
= (CORE_ADDR
) (uintptr_t) base
;
308 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
309 struct thread_info
*thr
= get_lwp_thread (lwp
);
310 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
311 unsigned int desc
[4];
313 const int reg_thread_area
= 3; /* bits to scale down register value. */
316 collect_register_by_name (regcache
, "gs", &gs
);
318 idx
= gs
>> reg_thread_area
;
320 if (ptrace (PTRACE_GET_THREAD_AREA
,
322 (void *) (long) idx
, (unsigned long) &desc
) < 0)
333 x86_cannot_store_register (int regno
)
336 if (is_64bit_tdesc ())
340 return regno
>= I386_NUM_REGS
;
344 x86_cannot_fetch_register (int regno
)
347 if (is_64bit_tdesc ())
351 return regno
>= I386_NUM_REGS
;
355 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
360 if (register_size (regcache
->tdesc
, 0) == 8)
362 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
363 if (x86_64_regmap
[i
] != -1)
364 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
368 /* 32-bit inferior registers need to be zero-extended.
369 Callers would read uninitialized memory otherwise. */
370 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
373 for (i
= 0; i
< I386_NUM_REGS
; i
++)
374 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
376 collect_register_by_name (regcache
, "orig_eax",
377 ((char *) buf
) + ORIG_EAX
* 4);
381 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
386 if (register_size (regcache
->tdesc
, 0) == 8)
388 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
389 if (x86_64_regmap
[i
] != -1)
390 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
395 for (i
= 0; i
< I386_NUM_REGS
; i
++)
396 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
398 supply_register_by_name (regcache
, "orig_eax",
399 ((char *) buf
) + ORIG_EAX
* 4);
403 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
406 i387_cache_to_fxsave (regcache
, buf
);
408 i387_cache_to_fsave (regcache
, buf
);
413 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
416 i387_fxsave_to_cache (regcache
, buf
);
418 i387_fsave_to_cache (regcache
, buf
);
425 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
427 i387_cache_to_fxsave (regcache
, buf
);
431 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
433 i387_fxsave_to_cache (regcache
, buf
);
439 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
441 i387_cache_to_xsave (regcache
, buf
);
445 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
447 i387_xsave_to_cache (regcache
, buf
);
450 /* ??? The non-biarch i386 case stores all the i387 regs twice.
451 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
452 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
453 doesn't work. IWBN to avoid the duplication in the case where it
454 does work. Maybe the arch_setup routine could check whether it works
455 and update the supported regsets accordingly. */
457 static struct regset_info x86_regsets
[] =
459 #ifdef HAVE_PTRACE_GETREGS
460 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
462 x86_fill_gregset
, x86_store_gregset
},
463 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
464 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
466 # ifdef HAVE_PTRACE_GETFPXREGS
467 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
469 x86_fill_fpxregset
, x86_store_fpxregset
},
472 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
474 x86_fill_fpregset
, x86_store_fpregset
},
475 #endif /* HAVE_PTRACE_GETREGS */
476 { 0, 0, 0, -1, -1, NULL
, NULL
}
480 x86_get_pc (struct regcache
*regcache
)
482 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
487 collect_register_by_name (regcache
, "rip", &pc
);
488 return (CORE_ADDR
) pc
;
493 collect_register_by_name (regcache
, "eip", &pc
);
494 return (CORE_ADDR
) pc
;
499 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
501 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
505 unsigned long newpc
= pc
;
506 supply_register_by_name (regcache
, "rip", &newpc
);
510 unsigned int newpc
= pc
;
511 supply_register_by_name (regcache
, "eip", &newpc
);
515 static const unsigned char x86_breakpoint
[] = { 0xCC };
516 #define x86_breakpoint_len 1
519 x86_breakpoint_at (CORE_ADDR pc
)
523 (*the_target
->read_memory
) (pc
, &c
, 1);
530 /* Support for debug registers. */
533 x86_linux_dr_get (ptid_t ptid
, int regnum
)
538 tid
= ptid_get_lwp (ptid
);
541 value
= ptrace (PTRACE_PEEKUSER
, tid
,
542 offsetof (struct user
, u_debugreg
[regnum
]), 0);
544 error ("Couldn't read debug register");
550 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
554 tid
= ptid_get_lwp (ptid
);
557 ptrace (PTRACE_POKEUSER
, tid
,
558 offsetof (struct user
, u_debugreg
[regnum
]), value
);
560 error ("Couldn't write debug register");
564 update_debug_registers_callback (struct inferior_list_entry
*entry
,
567 struct thread_info
*thr
= (struct thread_info
*) entry
;
568 struct lwp_info
*lwp
= get_thread_lwp (thr
);
569 int pid
= *(int *) pid_p
;
571 /* Only update the threads of this process. */
572 if (pid_of (thr
) == pid
)
574 /* The actual update is done later just before resuming the lwp,
575 we just mark that the registers need updating. */
576 lwp
->arch_private
->debug_registers_changed
= 1;
578 /* If the lwp isn't stopped, force it to momentarily pause, so
579 we can update its debug registers. */
581 linux_stop_lwp (lwp
);
587 /* Update the inferior's debug register REGNUM from STATE. */
590 i386_dr_low_set_addr (int regnum
, CORE_ADDR addr
)
592 /* Only update the threads of this process. */
593 int pid
= pid_of (current_inferior
);
595 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
596 fatal ("Invalid debug register %d", regnum
);
598 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
601 /* Return the inferior's debug register REGNUM. */
604 i386_dr_low_get_addr (int regnum
)
606 ptid_t ptid
= ptid_of (current_inferior
);
608 /* DR6 and DR7 are retrieved with some other way. */
609 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
611 return x86_linux_dr_get (ptid
, regnum
);
614 /* Update the inferior's DR7 debug control register from STATE. */
617 i386_dr_low_set_control (unsigned long control
)
619 /* Only update the threads of this process. */
620 int pid
= pid_of (current_inferior
);
622 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
625 /* Return the inferior's DR7 debug control register. */
628 i386_dr_low_get_control (void)
630 ptid_t ptid
= ptid_of (current_inferior
);
632 return x86_linux_dr_get (ptid
, DR_CONTROL
);
635 /* Get the value of the DR6 debug status register from the inferior
636 and record it in STATE. */
639 i386_dr_low_get_status (void)
641 ptid_t ptid
= ptid_of (current_inferior
);
643 return x86_linux_dr_get (ptid
, DR_STATUS
);
646 /* Low-level function vector. */
647 struct i386_dr_low_type i386_dr_low
=
649 i386_dr_low_set_control
,
650 i386_dr_low_set_addr
,
651 i386_dr_low_get_addr
,
652 i386_dr_low_get_status
,
653 i386_dr_low_get_control
,
657 /* Breakpoint/Watchpoint support. */
660 x86_supports_z_point_type (char z_type
)
666 case Z_PACKET_WRITE_WP
:
667 case Z_PACKET_ACCESS_WP
:
675 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
676 int size
, struct raw_breakpoint
*bp
)
678 struct process_info
*proc
= current_process ();
682 case raw_bkpt_type_sw
:
683 return insert_memory_breakpoint (bp
);
685 case raw_bkpt_type_hw
:
686 case raw_bkpt_type_write_wp
:
687 case raw_bkpt_type_access_wp
:
689 enum target_hw_bp_type hw_type
690 = raw_bkpt_type_to_target_hw_bp_type (type
);
691 struct i386_debug_reg_state
*state
692 = &proc
->private->arch_private
->debug_reg_state
;
694 return i386_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
704 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
705 int size
, struct raw_breakpoint
*bp
)
707 struct process_info
*proc
= current_process ();
711 case raw_bkpt_type_sw
:
712 return remove_memory_breakpoint (bp
);
714 case raw_bkpt_type_hw
:
715 case raw_bkpt_type_write_wp
:
716 case raw_bkpt_type_access_wp
:
718 enum target_hw_bp_type hw_type
719 = raw_bkpt_type_to_target_hw_bp_type (type
);
720 struct i386_debug_reg_state
*state
721 = &proc
->private->arch_private
->debug_reg_state
;
723 return i386_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
732 x86_stopped_by_watchpoint (void)
734 struct process_info
*proc
= current_process ();
735 return i386_dr_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
739 x86_stopped_data_address (void)
741 struct process_info
*proc
= current_process ();
743 if (i386_dr_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
749 /* Called when a new process is created. */
751 static struct arch_process_info
*
752 x86_linux_new_process (void)
754 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
756 i386_low_init_dregs (&info
->debug_reg_state
);
761 /* Called when a new thread is detected. */
763 static struct arch_lwp_info
*
764 x86_linux_new_thread (void)
766 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
768 info
->debug_registers_changed
= 1;
773 /* Called when resuming a thread.
774 If the debug regs have changed, update the thread's copies. */
777 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
779 ptid_t ptid
= ptid_of (get_lwp_thread (lwp
));
780 int clear_status
= 0;
782 if (lwp
->arch_private
->debug_registers_changed
)
785 int pid
= ptid_get_pid (ptid
);
786 struct process_info
*proc
= find_process_pid (pid
);
787 struct i386_debug_reg_state
*state
788 = &proc
->private->arch_private
->debug_reg_state
;
790 x86_linux_dr_set (ptid
, DR_CONTROL
, 0);
792 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
793 if (state
->dr_ref_count
[i
] > 0)
795 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
797 /* If we're setting a watchpoint, any change the inferior
798 had done itself to the debug registers needs to be
799 discarded, otherwise, i386_dr_stopped_data_address can
804 if (state
->dr_control_mirror
!= 0)
805 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
807 lwp
->arch_private
->debug_registers_changed
= 0;
810 if (clear_status
|| lwp
->stopped_by_watchpoint
)
811 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
814 /* When GDBSERVER is built as a 64-bit application on linux, the
815 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
816 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
817 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
818 conversion in-place ourselves. */
820 /* These types below (compat_*) define a siginfo type that is layout
821 compatible with the siginfo type exported by the 32-bit userspace
826 typedef int compat_int_t
;
827 typedef unsigned int compat_uptr_t
;
829 typedef int compat_time_t
;
830 typedef int compat_timer_t
;
831 typedef int compat_clock_t
;
833 struct compat_timeval
835 compat_time_t tv_sec
;
839 typedef union compat_sigval
841 compat_int_t sival_int
;
842 compat_uptr_t sival_ptr
;
845 typedef struct compat_siginfo
853 int _pad
[((128 / sizeof (int)) - 3)];
862 /* POSIX.1b timers */
867 compat_sigval_t _sigval
;
870 /* POSIX.1b signals */
875 compat_sigval_t _sigval
;
884 compat_clock_t _utime
;
885 compat_clock_t _stime
;
888 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
903 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
904 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
906 typedef struct compat_x32_siginfo
914 int _pad
[((128 / sizeof (int)) - 3)];
923 /* POSIX.1b timers */
928 compat_sigval_t _sigval
;
931 /* POSIX.1b signals */
936 compat_sigval_t _sigval
;
945 compat_x32_clock_t _utime
;
946 compat_x32_clock_t _stime
;
949 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
962 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
964 #define cpt_si_pid _sifields._kill._pid
965 #define cpt_si_uid _sifields._kill._uid
966 #define cpt_si_timerid _sifields._timer._tid
967 #define cpt_si_overrun _sifields._timer._overrun
968 #define cpt_si_status _sifields._sigchld._status
969 #define cpt_si_utime _sifields._sigchld._utime
970 #define cpt_si_stime _sifields._sigchld._stime
971 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
972 #define cpt_si_addr _sifields._sigfault._addr
973 #define cpt_si_band _sifields._sigpoll._band
974 #define cpt_si_fd _sifields._sigpoll._fd
976 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
977 In their place is si_timer1,si_timer2. */
979 #define si_timerid si_timer1
982 #define si_overrun si_timer2
986 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
988 memset (to
, 0, sizeof (*to
));
990 to
->si_signo
= from
->si_signo
;
991 to
->si_errno
= from
->si_errno
;
992 to
->si_code
= from
->si_code
;
994 if (to
->si_code
== SI_TIMER
)
996 to
->cpt_si_timerid
= from
->si_timerid
;
997 to
->cpt_si_overrun
= from
->si_overrun
;
998 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1000 else if (to
->si_code
== SI_USER
)
1002 to
->cpt_si_pid
= from
->si_pid
;
1003 to
->cpt_si_uid
= from
->si_uid
;
1005 else if (to
->si_code
< 0)
1007 to
->cpt_si_pid
= from
->si_pid
;
1008 to
->cpt_si_uid
= from
->si_uid
;
1009 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1013 switch (to
->si_signo
)
1016 to
->cpt_si_pid
= from
->si_pid
;
1017 to
->cpt_si_uid
= from
->si_uid
;
1018 to
->cpt_si_status
= from
->si_status
;
1019 to
->cpt_si_utime
= from
->si_utime
;
1020 to
->cpt_si_stime
= from
->si_stime
;
1026 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1029 to
->cpt_si_band
= from
->si_band
;
1030 to
->cpt_si_fd
= from
->si_fd
;
1033 to
->cpt_si_pid
= from
->si_pid
;
1034 to
->cpt_si_uid
= from
->si_uid
;
1035 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1042 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
1044 memset (to
, 0, sizeof (*to
));
1046 to
->si_signo
= from
->si_signo
;
1047 to
->si_errno
= from
->si_errno
;
1048 to
->si_code
= from
->si_code
;
1050 if (to
->si_code
== SI_TIMER
)
1052 to
->si_timerid
= from
->cpt_si_timerid
;
1053 to
->si_overrun
= from
->cpt_si_overrun
;
1054 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1056 else if (to
->si_code
== SI_USER
)
1058 to
->si_pid
= from
->cpt_si_pid
;
1059 to
->si_uid
= from
->cpt_si_uid
;
1061 else if (to
->si_code
< 0)
1063 to
->si_pid
= from
->cpt_si_pid
;
1064 to
->si_uid
= from
->cpt_si_uid
;
1065 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1069 switch (to
->si_signo
)
1072 to
->si_pid
= from
->cpt_si_pid
;
1073 to
->si_uid
= from
->cpt_si_uid
;
1074 to
->si_status
= from
->cpt_si_status
;
1075 to
->si_utime
= from
->cpt_si_utime
;
1076 to
->si_stime
= from
->cpt_si_stime
;
1082 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1085 to
->si_band
= from
->cpt_si_band
;
1086 to
->si_fd
= from
->cpt_si_fd
;
1089 to
->si_pid
= from
->cpt_si_pid
;
1090 to
->si_uid
= from
->cpt_si_uid
;
1091 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1098 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1101 memset (to
, 0, sizeof (*to
));
1103 to
->si_signo
= from
->si_signo
;
1104 to
->si_errno
= from
->si_errno
;
1105 to
->si_code
= from
->si_code
;
1107 if (to
->si_code
== SI_TIMER
)
1109 to
->cpt_si_timerid
= from
->si_timerid
;
1110 to
->cpt_si_overrun
= from
->si_overrun
;
1111 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1113 else if (to
->si_code
== SI_USER
)
1115 to
->cpt_si_pid
= from
->si_pid
;
1116 to
->cpt_si_uid
= from
->si_uid
;
1118 else if (to
->si_code
< 0)
1120 to
->cpt_si_pid
= from
->si_pid
;
1121 to
->cpt_si_uid
= from
->si_uid
;
1122 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1126 switch (to
->si_signo
)
1129 to
->cpt_si_pid
= from
->si_pid
;
1130 to
->cpt_si_uid
= from
->si_uid
;
1131 to
->cpt_si_status
= from
->si_status
;
1132 to
->cpt_si_utime
= from
->si_utime
;
1133 to
->cpt_si_stime
= from
->si_stime
;
1139 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1142 to
->cpt_si_band
= from
->si_band
;
1143 to
->cpt_si_fd
= from
->si_fd
;
1146 to
->cpt_si_pid
= from
->si_pid
;
1147 to
->cpt_si_uid
= from
->si_uid
;
1148 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1155 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1156 compat_x32_siginfo_t
*from
)
1158 memset (to
, 0, sizeof (*to
));
1160 to
->si_signo
= from
->si_signo
;
1161 to
->si_errno
= from
->si_errno
;
1162 to
->si_code
= from
->si_code
;
1164 if (to
->si_code
== SI_TIMER
)
1166 to
->si_timerid
= from
->cpt_si_timerid
;
1167 to
->si_overrun
= from
->cpt_si_overrun
;
1168 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1170 else if (to
->si_code
== SI_USER
)
1172 to
->si_pid
= from
->cpt_si_pid
;
1173 to
->si_uid
= from
->cpt_si_uid
;
1175 else if (to
->si_code
< 0)
1177 to
->si_pid
= from
->cpt_si_pid
;
1178 to
->si_uid
= from
->cpt_si_uid
;
1179 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1183 switch (to
->si_signo
)
1186 to
->si_pid
= from
->cpt_si_pid
;
1187 to
->si_uid
= from
->cpt_si_uid
;
1188 to
->si_status
= from
->cpt_si_status
;
1189 to
->si_utime
= from
->cpt_si_utime
;
1190 to
->si_stime
= from
->cpt_si_stime
;
1196 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1199 to
->si_band
= from
->cpt_si_band
;
1200 to
->si_fd
= from
->cpt_si_fd
;
1203 to
->si_pid
= from
->cpt_si_pid
;
1204 to
->si_uid
= from
->cpt_si_uid
;
1205 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1211 #endif /* __x86_64__ */
1213 /* Convert a native/host siginfo object, into/from the siginfo in the
1214 layout of the inferiors' architecture. Returns true if any
1215 conversion was done; false otherwise. If DIRECTION is 1, then copy
1216 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1220 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1223 unsigned int machine
;
1224 int tid
= lwpid_of (current_inferior
);
1225 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1227 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1228 if (!is_64bit_tdesc ())
1230 if (sizeof (siginfo_t
) != sizeof (compat_siginfo_t
))
1231 fatal ("unexpected difference in siginfo");
1234 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1236 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1240 /* No fixup for native x32 GDB. */
1241 else if (!is_elf64
&& sizeof (void *) == 8)
1243 if (sizeof (siginfo_t
) != sizeof (compat_x32_siginfo_t
))
1244 fatal ("unexpected difference in siginfo");
1247 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1250 siginfo_from_compat_x32_siginfo (native
,
1251 (struct compat_x32_siginfo
*) inf
);
1262 /* Format of XSAVE extended state is:
1265 fxsave_bytes[0..463]
1266 sw_usable_bytes[464..511]
1267 xstate_hdr_bytes[512..575]
1272 Same memory layout will be used for the coredump NT_X86_XSTATE
1273 representing the XSAVE extended state registers.
1275 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1276 extended state mask, which is the same as the extended control register
1277 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1278 together with the mask saved in the xstate_hdr_bytes to determine what
1279 states the processor/OS supports and what state, used or initialized,
1280 the process/thread is in. */
1281 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1283 /* Does the current host support the GETFPXREGS request? The header
1284 file may or may not define it, and even if it is defined, the
1285 kernel will return EIO if it's running on a pre-SSE processor. */
1286 int have_ptrace_getfpxregs
=
1287 #ifdef HAVE_PTRACE_GETFPXREGS
1294 /* Does the current host support PTRACE_GETREGSET? */
1295 static int have_ptrace_getregset
= -1;
1297 /* Get Linux/x86 target description from running target. */
1299 static const struct target_desc
*
1300 x86_linux_read_description (void)
1302 unsigned int machine
;
1306 static uint64_t xcr0
;
1307 struct regset_info
*regset
;
1309 tid
= lwpid_of (current_inferior
);
1311 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1313 if (sizeof (void *) == 4)
1316 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1318 else if (machine
== EM_X86_64
)
1319 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1323 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1324 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1326 elf_fpxregset_t fpxregs
;
1328 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1330 have_ptrace_getfpxregs
= 0;
1331 have_ptrace_getregset
= 0;
1332 return tdesc_i386_mmx_linux
;
1335 have_ptrace_getfpxregs
= 1;
1341 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1343 /* Don't use XML. */
1345 if (machine
== EM_X86_64
)
1346 return tdesc_amd64_linux_no_xml
;
1349 return tdesc_i386_linux_no_xml
;
1352 if (have_ptrace_getregset
== -1)
1354 uint64_t xstateregs
[(I386_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1357 iov
.iov_base
= xstateregs
;
1358 iov
.iov_len
= sizeof (xstateregs
);
1360 /* Check if PTRACE_GETREGSET works. */
1361 if (ptrace (PTRACE_GETREGSET
, tid
,
1362 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1363 have_ptrace_getregset
= 0;
1366 have_ptrace_getregset
= 1;
1368 /* Get XCR0 from XSAVE extended state. */
1369 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1370 / sizeof (uint64_t))];
1372 /* Use PTRACE_GETREGSET if it is available. */
1373 for (regset
= x86_regsets
;
1374 regset
->fill_function
!= NULL
; regset
++)
1375 if (regset
->get_request
== PTRACE_GETREGSET
)
1376 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1377 else if (regset
->type
!= GENERAL_REGS
)
1382 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1383 xcr0_features
= (have_ptrace_getregset
1384 && (xcr0
& I386_XSTATE_ALL_MASK
));
1389 if (machine
== EM_X86_64
)
1396 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1398 case I386_XSTATE_AVX512_MASK
:
1399 return tdesc_amd64_avx512_linux
;
1401 case I386_XSTATE_MPX_MASK
:
1402 return tdesc_amd64_mpx_linux
;
1404 case I386_XSTATE_AVX_MASK
:
1405 return tdesc_amd64_avx_linux
;
1408 return tdesc_amd64_linux
;
1412 return tdesc_amd64_linux
;
1418 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1420 case I386_XSTATE_AVX512_MASK
:
1421 return tdesc_x32_avx512_linux
;
1423 case I386_XSTATE_MPX_MASK
: /* No MPX on x32. */
1424 case I386_XSTATE_AVX_MASK
:
1425 return tdesc_x32_avx_linux
;
1428 return tdesc_x32_linux
;
1432 return tdesc_x32_linux
;
1440 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1442 case (I386_XSTATE_AVX512_MASK
):
1443 return tdesc_i386_avx512_linux
;
1445 case (I386_XSTATE_MPX_MASK
):
1446 return tdesc_i386_mpx_linux
;
1448 case (I386_XSTATE_AVX_MASK
):
1449 return tdesc_i386_avx_linux
;
1452 return tdesc_i386_linux
;
1456 return tdesc_i386_linux
;
1459 gdb_assert_not_reached ("failed to return tdesc");
1462 /* Callback for find_inferior. Stops iteration when a thread with a
1463 given PID is found. */
1466 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1468 int pid
= *(int *) data
;
1470 return (ptid_get_pid (entry
->id
) == pid
);
1473 /* Callback for for_each_inferior. Calls the arch_setup routine for
1477 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1479 int pid
= ptid_get_pid (entry
->id
);
1481 /* Look up any thread of this processes. */
1483 = (struct thread_info
*) find_inferior (&all_threads
,
1484 same_process_callback
, &pid
);
1486 the_low_target
.arch_setup ();
1489 /* Update all the target description of all processes; a new GDB
1490 connected, and it may or not support xml target descriptions. */
1493 x86_linux_update_xmltarget (void)
1495 struct thread_info
*save_inferior
= current_inferior
;
1497 /* Before changing the register cache's internal layout, flush the
1498 contents of the current valid caches back to the threads, and
1499 release the current regcache objects. */
1500 regcache_release ();
1502 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1504 current_inferior
= save_inferior
;
1507 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1508 PTRACE_GETREGSET. */
1511 x86_linux_process_qsupported (const char *query
)
1513 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1514 with "i386" in qSupported query, it supports x86 XML target
1517 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1519 char *copy
= xstrdup (query
+ 13);
1522 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1524 if (strcmp (p
, "i386") == 0)
1534 x86_linux_update_xmltarget ();
1537 /* Common for x86/x86-64. */
1539 static struct regsets_info x86_regsets_info
=
1541 x86_regsets
, /* regsets */
1542 0, /* num_regsets */
1543 NULL
, /* disabled_regsets */
1547 static struct regs_info amd64_linux_regs_info
=
1549 NULL
, /* regset_bitmap */
1550 NULL
, /* usrregs_info */
1554 static struct usrregs_info i386_linux_usrregs_info
=
1560 static struct regs_info i386_linux_regs_info
=
1562 NULL
, /* regset_bitmap */
1563 &i386_linux_usrregs_info
,
1567 const struct regs_info
*
1568 x86_linux_regs_info (void)
1571 if (is_64bit_tdesc ())
1572 return &amd64_linux_regs_info
;
1575 return &i386_linux_regs_info
;
1578 /* Initialize the target description for the architecture of the
1582 x86_arch_setup (void)
1584 current_process ()->tdesc
= x86_linux_read_description ();
1588 x86_supports_tracepoints (void)
1594 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1596 write_inferior_memory (*to
, buf
, len
);
1601 push_opcode (unsigned char *buf
, char *op
)
1603 unsigned char *buf_org
= buf
;
1608 unsigned long ul
= strtoul (op
, &endptr
, 16);
1617 return buf
- buf_org
;
1622 /* Build a jump pad that saves registers and calls a collection
1623 function. Writes a jump instruction to the jump pad to
1624 JJUMPAD_INSN. The caller is responsible to write it in at the
1625 tracepoint address. */
1628 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1629 CORE_ADDR collector
,
1632 CORE_ADDR
*jump_entry
,
1633 CORE_ADDR
*trampoline
,
1634 ULONGEST
*trampoline_size
,
1635 unsigned char *jjump_pad_insn
,
1636 ULONGEST
*jjump_pad_insn_size
,
1637 CORE_ADDR
*adjusted_insn_addr
,
1638 CORE_ADDR
*adjusted_insn_addr_end
,
1641 unsigned char buf
[40];
1645 CORE_ADDR buildaddr
= *jump_entry
;
1647 /* Build the jump pad. */
1649 /* First, do tracepoint data collection. Save registers. */
1651 /* Need to ensure stack pointer saved first. */
1652 buf
[i
++] = 0x54; /* push %rsp */
1653 buf
[i
++] = 0x55; /* push %rbp */
1654 buf
[i
++] = 0x57; /* push %rdi */
1655 buf
[i
++] = 0x56; /* push %rsi */
1656 buf
[i
++] = 0x52; /* push %rdx */
1657 buf
[i
++] = 0x51; /* push %rcx */
1658 buf
[i
++] = 0x53; /* push %rbx */
1659 buf
[i
++] = 0x50; /* push %rax */
1660 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1661 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1662 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1663 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1664 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1665 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1666 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1667 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1668 buf
[i
++] = 0x9c; /* pushfq */
1669 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1671 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1672 i
+= sizeof (unsigned long);
1673 buf
[i
++] = 0x57; /* push %rdi */
1674 append_insns (&buildaddr
, i
, buf
);
1676 /* Stack space for the collecting_t object. */
1678 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1679 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1680 memcpy (buf
+ i
, &tpoint
, 8);
1682 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1683 i
+= push_opcode (&buf
[i
],
1684 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1685 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1686 append_insns (&buildaddr
, i
, buf
);
1690 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1691 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1693 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1694 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1695 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1696 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1697 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1698 append_insns (&buildaddr
, i
, buf
);
1700 /* Set up the gdb_collect call. */
1701 /* At this point, (stack pointer + 0x18) is the base of our saved
1705 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1706 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1708 /* tpoint address may be 64-bit wide. */
1709 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1710 memcpy (buf
+ i
, &tpoint
, 8);
1712 append_insns (&buildaddr
, i
, buf
);
1714 /* The collector function being in the shared library, may be
1715 >31-bits away off the jump pad. */
1717 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1718 memcpy (buf
+ i
, &collector
, 8);
1720 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1721 append_insns (&buildaddr
, i
, buf
);
1723 /* Clear the spin-lock. */
1725 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1726 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1727 memcpy (buf
+ i
, &lockaddr
, 8);
1729 append_insns (&buildaddr
, i
, buf
);
1731 /* Remove stack that had been used for the collect_t object. */
1733 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1734 append_insns (&buildaddr
, i
, buf
);
1736 /* Restore register state. */
1738 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1742 buf
[i
++] = 0x9d; /* popfq */
1743 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1744 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1745 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1746 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1747 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1748 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1749 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1750 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1751 buf
[i
++] = 0x58; /* pop %rax */
1752 buf
[i
++] = 0x5b; /* pop %rbx */
1753 buf
[i
++] = 0x59; /* pop %rcx */
1754 buf
[i
++] = 0x5a; /* pop %rdx */
1755 buf
[i
++] = 0x5e; /* pop %rsi */
1756 buf
[i
++] = 0x5f; /* pop %rdi */
1757 buf
[i
++] = 0x5d; /* pop %rbp */
1758 buf
[i
++] = 0x5c; /* pop %rsp */
1759 append_insns (&buildaddr
, i
, buf
);
1761 /* Now, adjust the original instruction to execute in the jump
1763 *adjusted_insn_addr
= buildaddr
;
1764 relocate_instruction (&buildaddr
, tpaddr
);
1765 *adjusted_insn_addr_end
= buildaddr
;
1767 /* Finally, write a jump back to the program. */
1769 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1770 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1773 "E.Jump back from jump pad too far from tracepoint "
1774 "(offset 0x%" PRIx64
" > int32).", loffset
);
1778 offset
= (int) loffset
;
1779 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1780 memcpy (buf
+ 1, &offset
, 4);
1781 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1783 /* The jump pad is now built. Wire in a jump to our jump pad. This
1784 is always done last (by our caller actually), so that we can
1785 install fast tracepoints with threads running. This relies on
1786 the agent's atomic write support. */
1787 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1788 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1791 "E.Jump pad too far from tracepoint "
1792 "(offset 0x%" PRIx64
" > int32).", loffset
);
1796 offset
= (int) loffset
;
1798 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1799 memcpy (buf
+ 1, &offset
, 4);
1800 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1801 *jjump_pad_insn_size
= sizeof (jump_insn
);
1803 /* Return the end address of our pad. */
1804 *jump_entry
= buildaddr
;
1809 #endif /* __x86_64__ */
1811 /* Build a jump pad that saves registers and calls a collection
1812 function. Writes a jump instruction to the jump pad to
1813 JJUMPAD_INSN. The caller is responsible to write it in at the
1814 tracepoint address. */
1817 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1818 CORE_ADDR collector
,
1821 CORE_ADDR
*jump_entry
,
1822 CORE_ADDR
*trampoline
,
1823 ULONGEST
*trampoline_size
,
1824 unsigned char *jjump_pad_insn
,
1825 ULONGEST
*jjump_pad_insn_size
,
1826 CORE_ADDR
*adjusted_insn_addr
,
1827 CORE_ADDR
*adjusted_insn_addr_end
,
1830 unsigned char buf
[0x100];
1832 CORE_ADDR buildaddr
= *jump_entry
;
1834 /* Build the jump pad. */
1836 /* First, do tracepoint data collection. Save registers. */
1838 buf
[i
++] = 0x60; /* pushad */
1839 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1840 *((int *)(buf
+ i
)) = (int) tpaddr
;
1842 buf
[i
++] = 0x9c; /* pushf */
1843 buf
[i
++] = 0x1e; /* push %ds */
1844 buf
[i
++] = 0x06; /* push %es */
1845 buf
[i
++] = 0x0f; /* push %fs */
1847 buf
[i
++] = 0x0f; /* push %gs */
1849 buf
[i
++] = 0x16; /* push %ss */
1850 buf
[i
++] = 0x0e; /* push %cs */
1851 append_insns (&buildaddr
, i
, buf
);
1853 /* Stack space for the collecting_t object. */
1855 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1857 /* Build the object. */
1858 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1859 memcpy (buf
+ i
, &tpoint
, 4);
1861 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1863 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1864 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1865 append_insns (&buildaddr
, i
, buf
);
1867 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1868 If we cared for it, this could be using xchg alternatively. */
1871 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1872 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1874 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1876 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1877 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1878 append_insns (&buildaddr
, i
, buf
);
1881 /* Set up arguments to the gdb_collect call. */
1883 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1884 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1885 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1886 append_insns (&buildaddr
, i
, buf
);
1889 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1890 append_insns (&buildaddr
, i
, buf
);
1893 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1894 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1896 append_insns (&buildaddr
, i
, buf
);
1898 buf
[0] = 0xe8; /* call <reladdr> */
1899 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1900 memcpy (buf
+ 1, &offset
, 4);
1901 append_insns (&buildaddr
, 5, buf
);
1902 /* Clean up after the call. */
1903 buf
[0] = 0x83; /* add $0x8,%esp */
1906 append_insns (&buildaddr
, 3, buf
);
1909 /* Clear the spin-lock. This would need the LOCK prefix on older
1912 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1913 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1914 memcpy (buf
+ i
, &lockaddr
, 4);
1916 append_insns (&buildaddr
, i
, buf
);
1919 /* Remove stack that had been used for the collect_t object. */
1921 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1922 append_insns (&buildaddr
, i
, buf
);
1925 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1928 buf
[i
++] = 0x17; /* pop %ss */
1929 buf
[i
++] = 0x0f; /* pop %gs */
1931 buf
[i
++] = 0x0f; /* pop %fs */
1933 buf
[i
++] = 0x07; /* pop %es */
1934 buf
[i
++] = 0x1f; /* pop %ds */
1935 buf
[i
++] = 0x9d; /* popf */
1936 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1939 buf
[i
++] = 0x61; /* popad */
1940 append_insns (&buildaddr
, i
, buf
);
1942 /* Now, adjust the original instruction to execute in the jump
1944 *adjusted_insn_addr
= buildaddr
;
1945 relocate_instruction (&buildaddr
, tpaddr
);
1946 *adjusted_insn_addr_end
= buildaddr
;
1948 /* Write the jump back to the program. */
1949 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1950 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1951 memcpy (buf
+ 1, &offset
, 4);
1952 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1954 /* The jump pad is now built. Wire in a jump to our jump pad. This
1955 is always done last (by our caller actually), so that we can
1956 install fast tracepoints with threads running. This relies on
1957 the agent's atomic write support. */
1960 /* Create a trampoline. */
1961 *trampoline_size
= sizeof (jump_insn
);
1962 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1964 /* No trampoline space available. */
1966 "E.Cannot allocate trampoline space needed for fast "
1967 "tracepoints on 4-byte instructions.");
1971 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1972 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1973 memcpy (buf
+ 1, &offset
, 4);
1974 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1976 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1977 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1978 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1979 memcpy (buf
+ 2, &offset
, 2);
1980 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1981 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1985 /* Else use a 32-bit relative jump instruction. */
1986 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1987 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1988 memcpy (buf
+ 1, &offset
, 4);
1989 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1990 *jjump_pad_insn_size
= sizeof (jump_insn
);
1993 /* Return the end address of our pad. */
1994 *jump_entry
= buildaddr
;
2000 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
2001 CORE_ADDR collector
,
2004 CORE_ADDR
*jump_entry
,
2005 CORE_ADDR
*trampoline
,
2006 ULONGEST
*trampoline_size
,
2007 unsigned char *jjump_pad_insn
,
2008 ULONGEST
*jjump_pad_insn_size
,
2009 CORE_ADDR
*adjusted_insn_addr
,
2010 CORE_ADDR
*adjusted_insn_addr_end
,
2014 if (is_64bit_tdesc ())
2015 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2016 collector
, lockaddr
,
2017 orig_size
, jump_entry
,
2018 trampoline
, trampoline_size
,
2020 jjump_pad_insn_size
,
2022 adjusted_insn_addr_end
,
2026 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2027 collector
, lockaddr
,
2028 orig_size
, jump_entry
,
2029 trampoline
, trampoline_size
,
2031 jjump_pad_insn_size
,
2033 adjusted_insn_addr_end
,
2037 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2041 x86_get_min_fast_tracepoint_insn_len (void)
2043 static int warned_about_fast_tracepoints
= 0;
2046 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2047 used for fast tracepoints. */
2048 if (is_64bit_tdesc ())
2052 if (agent_loaded_p ())
2054 char errbuf
[IPA_BUFSIZ
];
2058 /* On x86, if trampolines are available, then 4-byte jump instructions
2059 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2060 with a 4-byte offset are used instead. */
2061 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
2065 /* GDB has no channel to explain to user why a shorter fast
2066 tracepoint is not possible, but at least make GDBserver
2067 mention that something has gone awry. */
2068 if (!warned_about_fast_tracepoints
)
2070 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
2071 warned_about_fast_tracepoints
= 1;
2078 /* Indicate that the minimum length is currently unknown since the IPA
2079 has not loaded yet. */
2085 add_insns (unsigned char *start
, int len
)
2087 CORE_ADDR buildaddr
= current_insn_ptr
;
2090 debug_printf ("Adding %d bytes of insn at %s\n",
2091 len
, paddress (buildaddr
));
2093 append_insns (&buildaddr
, len
, start
);
2094 current_insn_ptr
= buildaddr
;
2097 /* Our general strategy for emitting code is to avoid specifying raw
2098 bytes whenever possible, and instead copy a block of inline asm
2099 that is embedded in the function. This is a little messy, because
2100 we need to keep the compiler from discarding what looks like dead
2101 code, plus suppress various warnings. */
2103 #define EMIT_ASM(NAME, INSNS) \
2106 extern unsigned char start_ ## NAME, end_ ## NAME; \
2107 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2108 __asm__ ("jmp end_" #NAME "\n" \
2109 "\t" "start_" #NAME ":" \
2111 "\t" "end_" #NAME ":"); \
2116 #define EMIT_ASM32(NAME,INSNS) \
2119 extern unsigned char start_ ## NAME, end_ ## NAME; \
2120 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2121 __asm__ (".code32\n" \
2122 "\t" "jmp end_" #NAME "\n" \
2123 "\t" "start_" #NAME ":\n" \
2125 "\t" "end_" #NAME ":\n" \
2131 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2138 amd64_emit_prologue (void)
2140 EMIT_ASM (amd64_prologue
,
2142 "movq %rsp,%rbp\n\t"
2143 "sub $0x20,%rsp\n\t"
2144 "movq %rdi,-8(%rbp)\n\t"
2145 "movq %rsi,-16(%rbp)");
2150 amd64_emit_epilogue (void)
2152 EMIT_ASM (amd64_epilogue
,
2153 "movq -16(%rbp),%rdi\n\t"
2154 "movq %rax,(%rdi)\n\t"
2161 amd64_emit_add (void)
2163 EMIT_ASM (amd64_add
,
2164 "add (%rsp),%rax\n\t"
2165 "lea 0x8(%rsp),%rsp");
2169 amd64_emit_sub (void)
2171 EMIT_ASM (amd64_sub
,
2172 "sub %rax,(%rsp)\n\t"
2177 amd64_emit_mul (void)
2183 amd64_emit_lsh (void)
2189 amd64_emit_rsh_signed (void)
2195 amd64_emit_rsh_unsigned (void)
2201 amd64_emit_ext (int arg
)
2206 EMIT_ASM (amd64_ext_8
,
2212 EMIT_ASM (amd64_ext_16
,
2217 EMIT_ASM (amd64_ext_32
,
2226 amd64_emit_log_not (void)
2228 EMIT_ASM (amd64_log_not
,
2229 "test %rax,%rax\n\t"
2235 amd64_emit_bit_and (void)
2237 EMIT_ASM (amd64_and
,
2238 "and (%rsp),%rax\n\t"
2239 "lea 0x8(%rsp),%rsp");
2243 amd64_emit_bit_or (void)
2246 "or (%rsp),%rax\n\t"
2247 "lea 0x8(%rsp),%rsp");
2251 amd64_emit_bit_xor (void)
2253 EMIT_ASM (amd64_xor
,
2254 "xor (%rsp),%rax\n\t"
2255 "lea 0x8(%rsp),%rsp");
2259 amd64_emit_bit_not (void)
2261 EMIT_ASM (amd64_bit_not
,
2262 "xorq $0xffffffffffffffff,%rax");
2266 amd64_emit_equal (void)
2268 EMIT_ASM (amd64_equal
,
2269 "cmp %rax,(%rsp)\n\t"
2270 "je .Lamd64_equal_true\n\t"
2272 "jmp .Lamd64_equal_end\n\t"
2273 ".Lamd64_equal_true:\n\t"
2275 ".Lamd64_equal_end:\n\t"
2276 "lea 0x8(%rsp),%rsp");
2280 amd64_emit_less_signed (void)
2282 EMIT_ASM (amd64_less_signed
,
2283 "cmp %rax,(%rsp)\n\t"
2284 "jl .Lamd64_less_signed_true\n\t"
2286 "jmp .Lamd64_less_signed_end\n\t"
2287 ".Lamd64_less_signed_true:\n\t"
2289 ".Lamd64_less_signed_end:\n\t"
2290 "lea 0x8(%rsp),%rsp");
2294 amd64_emit_less_unsigned (void)
2296 EMIT_ASM (amd64_less_unsigned
,
2297 "cmp %rax,(%rsp)\n\t"
2298 "jb .Lamd64_less_unsigned_true\n\t"
2300 "jmp .Lamd64_less_unsigned_end\n\t"
2301 ".Lamd64_less_unsigned_true:\n\t"
2303 ".Lamd64_less_unsigned_end:\n\t"
2304 "lea 0x8(%rsp),%rsp");
2308 amd64_emit_ref (int size
)
2313 EMIT_ASM (amd64_ref1
,
2317 EMIT_ASM (amd64_ref2
,
2321 EMIT_ASM (amd64_ref4
,
2322 "movl (%rax),%eax");
2325 EMIT_ASM (amd64_ref8
,
2326 "movq (%rax),%rax");
2332 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2334 EMIT_ASM (amd64_if_goto
,
2338 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2346 amd64_emit_goto (int *offset_p
, int *size_p
)
2348 EMIT_ASM (amd64_goto
,
2349 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2357 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2359 int diff
= (to
- (from
+ size
));
2360 unsigned char buf
[sizeof (int)];
2368 memcpy (buf
, &diff
, sizeof (int));
2369 write_inferior_memory (from
, buf
, sizeof (int));
2373 amd64_emit_const (LONGEST num
)
2375 unsigned char buf
[16];
2377 CORE_ADDR buildaddr
= current_insn_ptr
;
2380 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2381 memcpy (&buf
[i
], &num
, sizeof (num
));
2383 append_insns (&buildaddr
, i
, buf
);
2384 current_insn_ptr
= buildaddr
;
2388 amd64_emit_call (CORE_ADDR fn
)
2390 unsigned char buf
[16];
2392 CORE_ADDR buildaddr
;
2395 /* The destination function being in the shared library, may be
2396 >31-bits away off the compiled code pad. */
2398 buildaddr
= current_insn_ptr
;
2400 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2404 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2406 /* Offset is too large for a call. Use callq, but that requires
2407 a register, so avoid it if possible. Use r10, since it is
2408 call-clobbered, we don't have to push/pop it. */
2409 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2411 memcpy (buf
+ i
, &fn
, 8);
2413 buf
[i
++] = 0xff; /* callq *%r10 */
2418 int offset32
= offset64
; /* we know we can't overflow here. */
2419 memcpy (buf
+ i
, &offset32
, 4);
2423 append_insns (&buildaddr
, i
, buf
);
2424 current_insn_ptr
= buildaddr
;
2428 amd64_emit_reg (int reg
)
2430 unsigned char buf
[16];
2432 CORE_ADDR buildaddr
;
2434 /* Assume raw_regs is still in %rdi. */
2435 buildaddr
= current_insn_ptr
;
2437 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2438 memcpy (&buf
[i
], ®
, sizeof (reg
));
2440 append_insns (&buildaddr
, i
, buf
);
2441 current_insn_ptr
= buildaddr
;
2442 amd64_emit_call (get_raw_reg_func_addr ());
2446 amd64_emit_pop (void)
2448 EMIT_ASM (amd64_pop
,
2453 amd64_emit_stack_flush (void)
2455 EMIT_ASM (amd64_stack_flush
,
2460 amd64_emit_zero_ext (int arg
)
2465 EMIT_ASM (amd64_zero_ext_8
,
2469 EMIT_ASM (amd64_zero_ext_16
,
2470 "and $0xffff,%rax");
2473 EMIT_ASM (amd64_zero_ext_32
,
2474 "mov $0xffffffff,%rcx\n\t"
2483 amd64_emit_swap (void)
2485 EMIT_ASM (amd64_swap
,
2492 amd64_emit_stack_adjust (int n
)
2494 unsigned char buf
[16];
2496 CORE_ADDR buildaddr
= current_insn_ptr
;
2499 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2503 /* This only handles adjustments up to 16, but we don't expect any more. */
2505 append_insns (&buildaddr
, i
, buf
);
2506 current_insn_ptr
= buildaddr
;
2509 /* FN's prototype is `LONGEST(*fn)(int)'. */
2512 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2514 unsigned char buf
[16];
2516 CORE_ADDR buildaddr
;
2518 buildaddr
= current_insn_ptr
;
2520 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2521 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2523 append_insns (&buildaddr
, i
, buf
);
2524 current_insn_ptr
= buildaddr
;
2525 amd64_emit_call (fn
);
2528 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2531 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2533 unsigned char buf
[16];
2535 CORE_ADDR buildaddr
;
2537 buildaddr
= current_insn_ptr
;
2539 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2540 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2542 append_insns (&buildaddr
, i
, buf
);
2543 current_insn_ptr
= buildaddr
;
2544 EMIT_ASM (amd64_void_call_2_a
,
2545 /* Save away a copy of the stack top. */
2547 /* Also pass top as the second argument. */
2549 amd64_emit_call (fn
);
2550 EMIT_ASM (amd64_void_call_2_b
,
2551 /* Restore the stack top, %rax may have been trashed. */
2556 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2559 "cmp %rax,(%rsp)\n\t"
2560 "jne .Lamd64_eq_fallthru\n\t"
2561 "lea 0x8(%rsp),%rsp\n\t"
2563 /* jmp, but don't trust the assembler to choose the right jump */
2564 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2565 ".Lamd64_eq_fallthru:\n\t"
2566 "lea 0x8(%rsp),%rsp\n\t"
2576 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2579 "cmp %rax,(%rsp)\n\t"
2580 "je .Lamd64_ne_fallthru\n\t"
2581 "lea 0x8(%rsp),%rsp\n\t"
2583 /* jmp, but don't trust the assembler to choose the right jump */
2584 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2585 ".Lamd64_ne_fallthru:\n\t"
2586 "lea 0x8(%rsp),%rsp\n\t"
2596 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2599 "cmp %rax,(%rsp)\n\t"
2600 "jnl .Lamd64_lt_fallthru\n\t"
2601 "lea 0x8(%rsp),%rsp\n\t"
2603 /* jmp, but don't trust the assembler to choose the right jump */
2604 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2605 ".Lamd64_lt_fallthru:\n\t"
2606 "lea 0x8(%rsp),%rsp\n\t"
2616 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2619 "cmp %rax,(%rsp)\n\t"
2620 "jnle .Lamd64_le_fallthru\n\t"
2621 "lea 0x8(%rsp),%rsp\n\t"
2623 /* jmp, but don't trust the assembler to choose the right jump */
2624 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2625 ".Lamd64_le_fallthru:\n\t"
2626 "lea 0x8(%rsp),%rsp\n\t"
2636 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2639 "cmp %rax,(%rsp)\n\t"
2640 "jng .Lamd64_gt_fallthru\n\t"
2641 "lea 0x8(%rsp),%rsp\n\t"
2643 /* jmp, but don't trust the assembler to choose the right jump */
2644 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2645 ".Lamd64_gt_fallthru:\n\t"
2646 "lea 0x8(%rsp),%rsp\n\t"
2656 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2659 "cmp %rax,(%rsp)\n\t"
2660 "jnge .Lamd64_ge_fallthru\n\t"
2661 ".Lamd64_ge_jump:\n\t"
2662 "lea 0x8(%rsp),%rsp\n\t"
2664 /* jmp, but don't trust the assembler to choose the right jump */
2665 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2666 ".Lamd64_ge_fallthru:\n\t"
2667 "lea 0x8(%rsp),%rsp\n\t"
2676 struct emit_ops amd64_emit_ops
=
2678 amd64_emit_prologue
,
2679 amd64_emit_epilogue
,
2684 amd64_emit_rsh_signed
,
2685 amd64_emit_rsh_unsigned
,
2693 amd64_emit_less_signed
,
2694 amd64_emit_less_unsigned
,
2698 amd64_write_goto_address
,
2703 amd64_emit_stack_flush
,
2704 amd64_emit_zero_ext
,
2706 amd64_emit_stack_adjust
,
2707 amd64_emit_int_call_1
,
2708 amd64_emit_void_call_2
,
2717 #endif /* __x86_64__ */
2720 i386_emit_prologue (void)
2722 EMIT_ASM32 (i386_prologue
,
2726 /* At this point, the raw regs base address is at 8(%ebp), and the
2727 value pointer is at 12(%ebp). */
2731 i386_emit_epilogue (void)
2733 EMIT_ASM32 (i386_epilogue
,
2734 "mov 12(%ebp),%ecx\n\t"
2735 "mov %eax,(%ecx)\n\t"
2736 "mov %ebx,0x4(%ecx)\n\t"
2744 i386_emit_add (void)
2746 EMIT_ASM32 (i386_add
,
2747 "add (%esp),%eax\n\t"
2748 "adc 0x4(%esp),%ebx\n\t"
2749 "lea 0x8(%esp),%esp");
2753 i386_emit_sub (void)
2755 EMIT_ASM32 (i386_sub
,
2756 "subl %eax,(%esp)\n\t"
2757 "sbbl %ebx,4(%esp)\n\t"
2763 i386_emit_mul (void)
2769 i386_emit_lsh (void)
2775 i386_emit_rsh_signed (void)
2781 i386_emit_rsh_unsigned (void)
2787 i386_emit_ext (int arg
)
2792 EMIT_ASM32 (i386_ext_8
,
2795 "movl %eax,%ebx\n\t"
2799 EMIT_ASM32 (i386_ext_16
,
2801 "movl %eax,%ebx\n\t"
2805 EMIT_ASM32 (i386_ext_32
,
2806 "movl %eax,%ebx\n\t"
2815 i386_emit_log_not (void)
2817 EMIT_ASM32 (i386_log_not
,
2819 "test %eax,%eax\n\t"
2826 i386_emit_bit_and (void)
2828 EMIT_ASM32 (i386_and
,
2829 "and (%esp),%eax\n\t"
2830 "and 0x4(%esp),%ebx\n\t"
2831 "lea 0x8(%esp),%esp");
2835 i386_emit_bit_or (void)
2837 EMIT_ASM32 (i386_or
,
2838 "or (%esp),%eax\n\t"
2839 "or 0x4(%esp),%ebx\n\t"
2840 "lea 0x8(%esp),%esp");
2844 i386_emit_bit_xor (void)
2846 EMIT_ASM32 (i386_xor
,
2847 "xor (%esp),%eax\n\t"
2848 "xor 0x4(%esp),%ebx\n\t"
2849 "lea 0x8(%esp),%esp");
2853 i386_emit_bit_not (void)
2855 EMIT_ASM32 (i386_bit_not
,
2856 "xor $0xffffffff,%eax\n\t"
2857 "xor $0xffffffff,%ebx\n\t");
2861 i386_emit_equal (void)
2863 EMIT_ASM32 (i386_equal
,
2864 "cmpl %ebx,4(%esp)\n\t"
2865 "jne .Li386_equal_false\n\t"
2866 "cmpl %eax,(%esp)\n\t"
2867 "je .Li386_equal_true\n\t"
2868 ".Li386_equal_false:\n\t"
2870 "jmp .Li386_equal_end\n\t"
2871 ".Li386_equal_true:\n\t"
2873 ".Li386_equal_end:\n\t"
2875 "lea 0x8(%esp),%esp");
2879 i386_emit_less_signed (void)
2881 EMIT_ASM32 (i386_less_signed
,
2882 "cmpl %ebx,4(%esp)\n\t"
2883 "jl .Li386_less_signed_true\n\t"
2884 "jne .Li386_less_signed_false\n\t"
2885 "cmpl %eax,(%esp)\n\t"
2886 "jl .Li386_less_signed_true\n\t"
2887 ".Li386_less_signed_false:\n\t"
2889 "jmp .Li386_less_signed_end\n\t"
2890 ".Li386_less_signed_true:\n\t"
2892 ".Li386_less_signed_end:\n\t"
2894 "lea 0x8(%esp),%esp");
2898 i386_emit_less_unsigned (void)
2900 EMIT_ASM32 (i386_less_unsigned
,
2901 "cmpl %ebx,4(%esp)\n\t"
2902 "jb .Li386_less_unsigned_true\n\t"
2903 "jne .Li386_less_unsigned_false\n\t"
2904 "cmpl %eax,(%esp)\n\t"
2905 "jb .Li386_less_unsigned_true\n\t"
2906 ".Li386_less_unsigned_false:\n\t"
2908 "jmp .Li386_less_unsigned_end\n\t"
2909 ".Li386_less_unsigned_true:\n\t"
2911 ".Li386_less_unsigned_end:\n\t"
2913 "lea 0x8(%esp),%esp");
2917 i386_emit_ref (int size
)
2922 EMIT_ASM32 (i386_ref1
,
2926 EMIT_ASM32 (i386_ref2
,
2930 EMIT_ASM32 (i386_ref4
,
2931 "movl (%eax),%eax");
2934 EMIT_ASM32 (i386_ref8
,
2935 "movl 4(%eax),%ebx\n\t"
2936 "movl (%eax),%eax");
2942 i386_emit_if_goto (int *offset_p
, int *size_p
)
2944 EMIT_ASM32 (i386_if_goto
,
2950 /* Don't trust the assembler to choose the right jump */
2951 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2954 *offset_p
= 11; /* be sure that this matches the sequence above */
2960 i386_emit_goto (int *offset_p
, int *size_p
)
2962 EMIT_ASM32 (i386_goto
,
2963 /* Don't trust the assembler to choose the right jump */
2964 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2972 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2974 int diff
= (to
- (from
+ size
));
2975 unsigned char buf
[sizeof (int)];
2977 /* We're only doing 4-byte sizes at the moment. */
2984 memcpy (buf
, &diff
, sizeof (int));
2985 write_inferior_memory (from
, buf
, sizeof (int));
2989 i386_emit_const (LONGEST num
)
2991 unsigned char buf
[16];
2993 CORE_ADDR buildaddr
= current_insn_ptr
;
2996 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2997 lo
= num
& 0xffffffff;
2998 memcpy (&buf
[i
], &lo
, sizeof (lo
));
3000 hi
= ((num
>> 32) & 0xffffffff);
3003 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
3004 memcpy (&buf
[i
], &hi
, sizeof (hi
));
3009 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
3011 append_insns (&buildaddr
, i
, buf
);
3012 current_insn_ptr
= buildaddr
;
3016 i386_emit_call (CORE_ADDR fn
)
3018 unsigned char buf
[16];
3020 CORE_ADDR buildaddr
;
3022 buildaddr
= current_insn_ptr
;
3024 buf
[i
++] = 0xe8; /* call <reladdr> */
3025 offset
= ((int) fn
) - (buildaddr
+ 5);
3026 memcpy (buf
+ 1, &offset
, 4);
3027 append_insns (&buildaddr
, 5, buf
);
3028 current_insn_ptr
= buildaddr
;
3032 i386_emit_reg (int reg
)
3034 unsigned char buf
[16];
3036 CORE_ADDR buildaddr
;
3038 EMIT_ASM32 (i386_reg_a
,
3040 buildaddr
= current_insn_ptr
;
3042 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
3043 memcpy (&buf
[i
], ®
, sizeof (reg
));
3045 append_insns (&buildaddr
, i
, buf
);
3046 current_insn_ptr
= buildaddr
;
3047 EMIT_ASM32 (i386_reg_b
,
3048 "mov %eax,4(%esp)\n\t"
3049 "mov 8(%ebp),%eax\n\t"
3051 i386_emit_call (get_raw_reg_func_addr ());
3052 EMIT_ASM32 (i386_reg_c
,
3054 "lea 0x8(%esp),%esp");
3058 i386_emit_pop (void)
3060 EMIT_ASM32 (i386_pop
,
3066 i386_emit_stack_flush (void)
3068 EMIT_ASM32 (i386_stack_flush
,
3074 i386_emit_zero_ext (int arg
)
3079 EMIT_ASM32 (i386_zero_ext_8
,
3080 "and $0xff,%eax\n\t"
3084 EMIT_ASM32 (i386_zero_ext_16
,
3085 "and $0xffff,%eax\n\t"
3089 EMIT_ASM32 (i386_zero_ext_32
,
3098 i386_emit_swap (void)
3100 EMIT_ASM32 (i386_swap
,
3110 i386_emit_stack_adjust (int n
)
3112 unsigned char buf
[16];
3114 CORE_ADDR buildaddr
= current_insn_ptr
;
3117 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3121 append_insns (&buildaddr
, i
, buf
);
3122 current_insn_ptr
= buildaddr
;
3125 /* FN's prototype is `LONGEST(*fn)(int)'. */
3128 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3130 unsigned char buf
[16];
3132 CORE_ADDR buildaddr
;
3134 EMIT_ASM32 (i386_int_call_1_a
,
3135 /* Reserve a bit of stack space. */
3137 /* Put the one argument on the stack. */
3138 buildaddr
= current_insn_ptr
;
3140 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3143 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3145 append_insns (&buildaddr
, i
, buf
);
3146 current_insn_ptr
= buildaddr
;
3147 i386_emit_call (fn
);
3148 EMIT_ASM32 (i386_int_call_1_c
,
3150 "lea 0x8(%esp),%esp");
3153 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3156 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3158 unsigned char buf
[16];
3160 CORE_ADDR buildaddr
;
3162 EMIT_ASM32 (i386_void_call_2_a
,
3163 /* Preserve %eax only; we don't have to worry about %ebx. */
3165 /* Reserve a bit of stack space for arguments. */
3166 "sub $0x10,%esp\n\t"
3167 /* Copy "top" to the second argument position. (Note that
3168 we can't assume function won't scribble on its
3169 arguments, so don't try to restore from this.) */
3170 "mov %eax,4(%esp)\n\t"
3171 "mov %ebx,8(%esp)");
3172 /* Put the first argument on the stack. */
3173 buildaddr
= current_insn_ptr
;
3175 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3178 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3180 append_insns (&buildaddr
, i
, buf
);
3181 current_insn_ptr
= buildaddr
;
3182 i386_emit_call (fn
);
3183 EMIT_ASM32 (i386_void_call_2_b
,
3184 "lea 0x10(%esp),%esp\n\t"
3185 /* Restore original stack top. */
3191 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3194 /* Check low half first, more likely to be decider */
3195 "cmpl %eax,(%esp)\n\t"
3196 "jne .Leq_fallthru\n\t"
3197 "cmpl %ebx,4(%esp)\n\t"
3198 "jne .Leq_fallthru\n\t"
3199 "lea 0x8(%esp),%esp\n\t"
3202 /* jmp, but don't trust the assembler to choose the right jump */
3203 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3204 ".Leq_fallthru:\n\t"
3205 "lea 0x8(%esp),%esp\n\t"
3216 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3219 /* Check low half first, more likely to be decider */
3220 "cmpl %eax,(%esp)\n\t"
3222 "cmpl %ebx,4(%esp)\n\t"
3223 "je .Lne_fallthru\n\t"
3225 "lea 0x8(%esp),%esp\n\t"
3228 /* jmp, but don't trust the assembler to choose the right jump */
3229 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3230 ".Lne_fallthru:\n\t"
3231 "lea 0x8(%esp),%esp\n\t"
3242 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3245 "cmpl %ebx,4(%esp)\n\t"
3247 "jne .Llt_fallthru\n\t"
3248 "cmpl %eax,(%esp)\n\t"
3249 "jnl .Llt_fallthru\n\t"
3251 "lea 0x8(%esp),%esp\n\t"
3254 /* jmp, but don't trust the assembler to choose the right jump */
3255 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3256 ".Llt_fallthru:\n\t"
3257 "lea 0x8(%esp),%esp\n\t"
3268 i386_emit_le_goto (int *offset_p
, int *size_p
)
3271 "cmpl %ebx,4(%esp)\n\t"
3273 "jne .Lle_fallthru\n\t"
3274 "cmpl %eax,(%esp)\n\t"
3275 "jnle .Lle_fallthru\n\t"
3277 "lea 0x8(%esp),%esp\n\t"
3280 /* jmp, but don't trust the assembler to choose the right jump */
3281 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3282 ".Lle_fallthru:\n\t"
3283 "lea 0x8(%esp),%esp\n\t"
3294 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3297 "cmpl %ebx,4(%esp)\n\t"
3299 "jne .Lgt_fallthru\n\t"
3300 "cmpl %eax,(%esp)\n\t"
3301 "jng .Lgt_fallthru\n\t"
3303 "lea 0x8(%esp),%esp\n\t"
3306 /* jmp, but don't trust the assembler to choose the right jump */
3307 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3308 ".Lgt_fallthru:\n\t"
3309 "lea 0x8(%esp),%esp\n\t"
3320 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3323 "cmpl %ebx,4(%esp)\n\t"
3325 "jne .Lge_fallthru\n\t"
3326 "cmpl %eax,(%esp)\n\t"
3327 "jnge .Lge_fallthru\n\t"
3329 "lea 0x8(%esp),%esp\n\t"
3332 /* jmp, but don't trust the assembler to choose the right jump */
3333 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3334 ".Lge_fallthru:\n\t"
3335 "lea 0x8(%esp),%esp\n\t"
3345 struct emit_ops i386_emit_ops
=
3353 i386_emit_rsh_signed
,
3354 i386_emit_rsh_unsigned
,
3362 i386_emit_less_signed
,
3363 i386_emit_less_unsigned
,
3367 i386_write_goto_address
,
3372 i386_emit_stack_flush
,
3375 i386_emit_stack_adjust
,
3376 i386_emit_int_call_1
,
3377 i386_emit_void_call_2
,
3387 static struct emit_ops
*
3391 if (is_64bit_tdesc ())
3392 return &amd64_emit_ops
;
3395 return &i386_emit_ops
;
3399 x86_supports_range_stepping (void)
3404 /* This is initialized assuming an amd64 target.
3405 x86_arch_setup will correct it for i386 or amd64 targets. */
3407 struct linux_target_ops the_low_target
=
3410 x86_linux_regs_info
,
3411 x86_cannot_fetch_register
,
3412 x86_cannot_store_register
,
3413 NULL
, /* fetch_register */
3421 x86_supports_z_point_type
,
3424 x86_stopped_by_watchpoint
,
3425 x86_stopped_data_address
,
3426 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3427 native i386 case (no registers smaller than an xfer unit), and are not
3428 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3431 /* need to fix up i386 siginfo if host is amd64 */
3433 x86_linux_new_process
,
3434 x86_linux_new_thread
,
3435 x86_linux_prepare_to_resume
,
3436 x86_linux_process_qsupported
,
3437 x86_supports_tracepoints
,
3438 x86_get_thread_area
,
3439 x86_install_fast_tracepoint_jump_pad
,
3441 x86_get_min_fast_tracepoint_insn_len
,
3442 x86_supports_range_stepping
,
3446 initialize_low_arch (void)
3448 /* Initialize the Linux target descriptions. */
3450 init_registers_amd64_linux ();
3451 init_registers_amd64_avx_linux ();
3452 init_registers_amd64_avx512_linux ();
3453 init_registers_amd64_mpx_linux ();
3455 init_registers_x32_linux ();
3456 init_registers_x32_avx_linux ();
3457 init_registers_x32_avx512_linux ();
3459 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3460 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3461 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3463 init_registers_i386_linux ();
3464 init_registers_i386_mmx_linux ();
3465 init_registers_i386_avx_linux ();
3466 init_registers_i386_avx512_linux ();
3467 init_registers_i386_mpx_linux ();
3469 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3470 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3471 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3473 initialize_regsets_info (&x86_regsets_info
);