1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002-2014 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
25 #include "linux-low.h"
28 #include "i386-xstate.h"
30 #include "gdb_proc_service.h"
31 /* Don't include elf/common.h if linux/elf.h got included by
32 gdb_proc_service.h. */
34 #include "elf/common.h"
39 #include "tracepoint.h"
43 /* Defined in auto-generated file amd64-linux.c. */
44 void init_registers_amd64_linux (void);
45 extern const struct target_desc
*tdesc_amd64_linux
;
47 /* Defined in auto-generated file amd64-avx-linux.c. */
48 void init_registers_amd64_avx_linux (void);
49 extern const struct target_desc
*tdesc_amd64_avx_linux
;
51 /* Defined in auto-generated file amd64-avx512-linux.c. */
52 void init_registers_amd64_avx512_linux (void);
53 extern const struct target_desc
*tdesc_amd64_avx512_linux
;
55 /* Defined in auto-generated file amd64-mpx-linux.c. */
56 void init_registers_amd64_mpx_linux (void);
57 extern const struct target_desc
*tdesc_amd64_mpx_linux
;
59 /* Defined in auto-generated file x32-linux.c. */
60 void init_registers_x32_linux (void);
61 extern const struct target_desc
*tdesc_x32_linux
;
63 /* Defined in auto-generated file x32-avx-linux.c. */
64 void init_registers_x32_avx_linux (void);
65 extern const struct target_desc
*tdesc_x32_avx_linux
;
67 /* Defined in auto-generated file x32-avx512-linux.c. */
68 void init_registers_x32_avx512_linux (void);
69 extern const struct target_desc
*tdesc_x32_avx512_linux
;
73 /* Defined in auto-generated file i386-linux.c. */
74 void init_registers_i386_linux (void);
75 extern const struct target_desc
*tdesc_i386_linux
;
77 /* Defined in auto-generated file i386-mmx-linux.c. */
78 void init_registers_i386_mmx_linux (void);
79 extern const struct target_desc
*tdesc_i386_mmx_linux
;
81 /* Defined in auto-generated file i386-avx-linux.c. */
82 void init_registers_i386_avx_linux (void);
83 extern const struct target_desc
*tdesc_i386_avx_linux
;
85 /* Defined in auto-generated file i386-avx512-linux.c. */
86 void init_registers_i386_avx512_linux (void);
87 extern const struct target_desc
*tdesc_i386_avx512_linux
;
89 /* Defined in auto-generated file i386-mpx-linux.c. */
90 void init_registers_i386_mpx_linux (void);
91 extern const struct target_desc
*tdesc_i386_mpx_linux
;
94 static struct target_desc
*tdesc_amd64_linux_no_xml
;
96 static struct target_desc
*tdesc_i386_linux_no_xml
;
99 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
100 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
102 /* Backward compatibility for gdb without XML support. */
104 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
105 <architecture>i386</architecture>\
106 <osabi>GNU/Linux</osabi>\
110 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
111 <architecture>i386:x86-64</architecture>\
112 <osabi>GNU/Linux</osabi>\
117 #include <sys/procfs.h>
118 #include <sys/ptrace.h>
121 #ifndef PTRACE_GETREGSET
122 #define PTRACE_GETREGSET 0x4204
125 #ifndef PTRACE_SETREGSET
126 #define PTRACE_SETREGSET 0x4205
130 #ifndef PTRACE_GET_THREAD_AREA
131 #define PTRACE_GET_THREAD_AREA 25
134 /* This definition comes from prctl.h, but some kernels may not have it. */
135 #ifndef PTRACE_ARCH_PRCTL
136 #define PTRACE_ARCH_PRCTL 30
139 /* The following definitions come from prctl.h, but may be absent
140 for certain configurations. */
142 #define ARCH_SET_GS 0x1001
143 #define ARCH_SET_FS 0x1002
144 #define ARCH_GET_FS 0x1003
145 #define ARCH_GET_GS 0x1004
148 /* Per-process arch-specific data we want to keep. */
150 struct arch_process_info
152 struct i386_debug_reg_state debug_reg_state
;
155 /* Per-thread arch-specific data we want to keep. */
159 /* Non-zero if our copy differs from what's recorded in the thread. */
160 int debug_registers_changed
;
165 /* Mapping between the general-purpose registers in `struct user'
166 format and GDB's register array layout.
167 Note that the transfer layout uses 64-bit regs. */
168 static /*const*/ int i386_regmap
[] =
170 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
171 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
172 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
173 DS
* 8, ES
* 8, FS
* 8, GS
* 8
176 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
178 /* So code below doesn't have to care, i386 or amd64. */
179 #define ORIG_EAX ORIG_RAX
181 static const int x86_64_regmap
[] =
183 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
184 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
185 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
186 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
187 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
188 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
196 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
197 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
200 -1, -1, -1, -1, -1, -1, -1, -1,
201 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
202 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1,
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1
208 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
209 #define X86_64_USER_REGS (GS + 1)
211 #else /* ! __x86_64__ */
213 /* Mapping between the general-purpose registers in `struct user'
214 format and GDB's register array layout. */
215 static /*const*/ int i386_regmap
[] =
217 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
218 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
219 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
220 DS
* 4, ES
* 4, FS
* 4, GS
* 4
223 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
229 /* Returns true if the current inferior belongs to a x86-64 process,
233 is_64bit_tdesc (void)
235 struct regcache
*regcache
= get_thread_regcache (current_inferior
, 0);
237 return register_size (regcache
->tdesc
, 0) == 8;
243 /* Called by libthread_db. */
246 ps_get_thread_area (const struct ps_prochandle
*ph
,
247 lwpid_t lwpid
, int idx
, void **base
)
250 int use_64bit
= is_64bit_tdesc ();
257 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
261 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
272 unsigned int desc
[4];
274 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
275 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
278 /* Ensure we properly extend the value to 64-bits for x86_64. */
279 *base
= (void *) (uintptr_t) desc
[1];
284 /* Get the thread area address. This is used to recognize which
285 thread is which when tracing with the in-process agent library. We
286 don't read anything from the address, and treat it as opaque; it's
287 the address itself that we assume is unique per-thread. */
290 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
293 int use_64bit
= is_64bit_tdesc ();
298 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
300 *addr
= (CORE_ADDR
) (uintptr_t) base
;
309 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
310 struct thread_info
*thr
= get_lwp_thread (lwp
);
311 struct regcache
*regcache
= get_thread_regcache (thr
, 1);
312 unsigned int desc
[4];
314 const int reg_thread_area
= 3; /* bits to scale down register value. */
317 collect_register_by_name (regcache
, "gs", &gs
);
319 idx
= gs
>> reg_thread_area
;
321 if (ptrace (PTRACE_GET_THREAD_AREA
,
323 (void *) (long) idx
, (unsigned long) &desc
) < 0)
334 x86_cannot_store_register (int regno
)
337 if (is_64bit_tdesc ())
341 return regno
>= I386_NUM_REGS
;
345 x86_cannot_fetch_register (int regno
)
348 if (is_64bit_tdesc ())
352 return regno
>= I386_NUM_REGS
;
356 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
361 if (register_size (regcache
->tdesc
, 0) == 8)
363 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
364 if (x86_64_regmap
[i
] != -1)
365 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
369 /* 32-bit inferior registers need to be zero-extended.
370 Callers would read uninitialized memory otherwise. */
371 memset (buf
, 0x00, X86_64_USER_REGS
* 8);
374 for (i
= 0; i
< I386_NUM_REGS
; i
++)
375 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
377 collect_register_by_name (regcache
, "orig_eax",
378 ((char *) buf
) + ORIG_EAX
* 4);
382 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
387 if (register_size (regcache
->tdesc
, 0) == 8)
389 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
390 if (x86_64_regmap
[i
] != -1)
391 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
396 for (i
= 0; i
< I386_NUM_REGS
; i
++)
397 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
399 supply_register_by_name (regcache
, "orig_eax",
400 ((char *) buf
) + ORIG_EAX
* 4);
404 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
407 i387_cache_to_fxsave (regcache
, buf
);
409 i387_cache_to_fsave (regcache
, buf
);
414 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
417 i387_fxsave_to_cache (regcache
, buf
);
419 i387_fsave_to_cache (regcache
, buf
);
426 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
428 i387_cache_to_fxsave (regcache
, buf
);
432 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
434 i387_fxsave_to_cache (regcache
, buf
);
440 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
442 i387_cache_to_xsave (regcache
, buf
);
446 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
448 i387_xsave_to_cache (regcache
, buf
);
451 /* ??? The non-biarch i386 case stores all the i387 regs twice.
452 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
453 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
454 doesn't work. IWBN to avoid the duplication in the case where it
455 does work. Maybe the arch_setup routine could check whether it works
456 and update the supported regsets accordingly. */
458 static struct regset_info x86_regsets
[] =
460 #ifdef HAVE_PTRACE_GETREGS
461 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
463 x86_fill_gregset
, x86_store_gregset
},
464 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
465 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
467 # ifdef HAVE_PTRACE_GETFPXREGS
468 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
470 x86_fill_fpxregset
, x86_store_fpxregset
},
473 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
475 x86_fill_fpregset
, x86_store_fpregset
},
476 #endif /* HAVE_PTRACE_GETREGS */
477 { 0, 0, 0, -1, -1, NULL
, NULL
}
481 x86_get_pc (struct regcache
*regcache
)
483 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
488 collect_register_by_name (regcache
, "rip", &pc
);
489 return (CORE_ADDR
) pc
;
494 collect_register_by_name (regcache
, "eip", &pc
);
495 return (CORE_ADDR
) pc
;
500 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
502 int use_64bit
= register_size (regcache
->tdesc
, 0) == 8;
506 unsigned long newpc
= pc
;
507 supply_register_by_name (regcache
, "rip", &newpc
);
511 unsigned int newpc
= pc
;
512 supply_register_by_name (regcache
, "eip", &newpc
);
516 static const unsigned char x86_breakpoint
[] = { 0xCC };
517 #define x86_breakpoint_len 1
520 x86_breakpoint_at (CORE_ADDR pc
)
524 (*the_target
->read_memory
) (pc
, &c
, 1);
531 /* Support for debug registers. */
534 x86_linux_dr_get (ptid_t ptid
, int regnum
)
539 tid
= ptid_get_lwp (ptid
);
542 value
= ptrace (PTRACE_PEEKUSER
, tid
,
543 offsetof (struct user
, u_debugreg
[regnum
]), 0);
545 error ("Couldn't read debug register");
551 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
555 tid
= ptid_get_lwp (ptid
);
558 ptrace (PTRACE_POKEUSER
, tid
,
559 offsetof (struct user
, u_debugreg
[regnum
]), value
);
561 error ("Couldn't write debug register");
565 update_debug_registers_callback (struct inferior_list_entry
*entry
,
568 struct thread_info
*thr
= (struct thread_info
*) entry
;
569 struct lwp_info
*lwp
= get_thread_lwp (thr
);
570 int pid
= *(int *) pid_p
;
572 /* Only update the threads of this process. */
573 if (pid_of (thr
) == pid
)
575 /* The actual update is done later just before resuming the lwp,
576 we just mark that the registers need updating. */
577 lwp
->arch_private
->debug_registers_changed
= 1;
579 /* If the lwp isn't stopped, force it to momentarily pause, so
580 we can update its debug registers. */
582 linux_stop_lwp (lwp
);
588 /* Update the inferior's debug register REGNUM from STATE. */
591 i386_dr_low_set_addr (int regnum
, CORE_ADDR addr
)
593 /* Only update the threads of this process. */
594 int pid
= pid_of (current_inferior
);
596 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
597 fatal ("Invalid debug register %d", regnum
);
599 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
602 /* Return the inferior's debug register REGNUM. */
605 i386_dr_low_get_addr (int regnum
)
607 ptid_t ptid
= ptid_of (current_inferior
);
609 /* DR6 and DR7 are retrieved with some other way. */
610 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
612 return x86_linux_dr_get (ptid
, regnum
);
615 /* Update the inferior's DR7 debug control register from STATE. */
618 i386_dr_low_set_control (unsigned long control
)
620 /* Only update the threads of this process. */
621 int pid
= pid_of (current_inferior
);
623 find_inferior (&all_threads
, update_debug_registers_callback
, &pid
);
626 /* Return the inferior's DR7 debug control register. */
629 i386_dr_low_get_control (void)
631 ptid_t ptid
= ptid_of (current_inferior
);
633 return x86_linux_dr_get (ptid
, DR_CONTROL
);
636 /* Get the value of the DR6 debug status register from the inferior
637 and record it in STATE. */
640 i386_dr_low_get_status (void)
642 ptid_t ptid
= ptid_of (current_inferior
);
644 return x86_linux_dr_get (ptid
, DR_STATUS
);
647 /* Low-level function vector. */
648 struct i386_dr_low_type i386_dr_low
=
650 i386_dr_low_set_control
,
651 i386_dr_low_set_addr
,
652 i386_dr_low_get_addr
,
653 i386_dr_low_get_status
,
654 i386_dr_low_get_control
,
658 /* Breakpoint/Watchpoint support. */
661 x86_supports_z_point_type (char z_type
)
667 case Z_PACKET_WRITE_WP
:
668 case Z_PACKET_ACCESS_WP
:
676 x86_insert_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
677 int size
, struct raw_breakpoint
*bp
)
679 struct process_info
*proc
= current_process ();
683 case raw_bkpt_type_sw
:
684 return insert_memory_breakpoint (bp
);
686 case raw_bkpt_type_hw
:
687 case raw_bkpt_type_write_wp
:
688 case raw_bkpt_type_access_wp
:
690 enum target_hw_bp_type hw_type
691 = raw_bkpt_type_to_target_hw_bp_type (type
);
692 struct i386_debug_reg_state
*state
693 = &proc
->private->arch_private
->debug_reg_state
;
695 return i386_dr_insert_watchpoint (state
, hw_type
, addr
, size
);
705 x86_remove_point (enum raw_bkpt_type type
, CORE_ADDR addr
,
706 int size
, struct raw_breakpoint
*bp
)
708 struct process_info
*proc
= current_process ();
712 case raw_bkpt_type_sw
:
713 return remove_memory_breakpoint (bp
);
715 case raw_bkpt_type_hw
:
716 case raw_bkpt_type_write_wp
:
717 case raw_bkpt_type_access_wp
:
719 enum target_hw_bp_type hw_type
720 = raw_bkpt_type_to_target_hw_bp_type (type
);
721 struct i386_debug_reg_state
*state
722 = &proc
->private->arch_private
->debug_reg_state
;
724 return i386_dr_remove_watchpoint (state
, hw_type
, addr
, size
);
733 x86_stopped_by_watchpoint (void)
735 struct process_info
*proc
= current_process ();
736 return i386_dr_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
740 x86_stopped_data_address (void)
742 struct process_info
*proc
= current_process ();
744 if (i386_dr_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
750 /* Called when a new process is created. */
752 static struct arch_process_info
*
753 x86_linux_new_process (void)
755 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
757 i386_low_init_dregs (&info
->debug_reg_state
);
762 /* Called when a new thread is detected. */
764 static struct arch_lwp_info
*
765 x86_linux_new_thread (void)
767 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
769 info
->debug_registers_changed
= 1;
774 /* Called when resuming a thread.
775 If the debug regs have changed, update the thread's copies. */
778 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
780 ptid_t ptid
= ptid_of (get_lwp_thread (lwp
));
781 int clear_status
= 0;
783 if (lwp
->arch_private
->debug_registers_changed
)
786 int pid
= ptid_get_pid (ptid
);
787 struct process_info
*proc
= find_process_pid (pid
);
788 struct i386_debug_reg_state
*state
789 = &proc
->private->arch_private
->debug_reg_state
;
791 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
792 if (state
->dr_ref_count
[i
] > 0)
794 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
796 /* If we're setting a watchpoint, any change the inferior
797 had done itself to the debug registers needs to be
798 discarded, otherwise, i386_dr_stopped_data_address can
803 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
805 lwp
->arch_private
->debug_registers_changed
= 0;
808 if (clear_status
|| lwp
->stopped_by_watchpoint
)
809 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
812 /* When GDBSERVER is built as a 64-bit application on linux, the
813 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
814 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
815 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
816 conversion in-place ourselves. */
818 /* These types below (compat_*) define a siginfo type that is layout
819 compatible with the siginfo type exported by the 32-bit userspace
824 typedef int compat_int_t
;
825 typedef unsigned int compat_uptr_t
;
827 typedef int compat_time_t
;
828 typedef int compat_timer_t
;
829 typedef int compat_clock_t
;
831 struct compat_timeval
833 compat_time_t tv_sec
;
837 typedef union compat_sigval
839 compat_int_t sival_int
;
840 compat_uptr_t sival_ptr
;
843 typedef struct compat_siginfo
851 int _pad
[((128 / sizeof (int)) - 3)];
860 /* POSIX.1b timers */
865 compat_sigval_t _sigval
;
868 /* POSIX.1b signals */
873 compat_sigval_t _sigval
;
882 compat_clock_t _utime
;
883 compat_clock_t _stime
;
886 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
901 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
902 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t
;
904 typedef struct compat_x32_siginfo
912 int _pad
[((128 / sizeof (int)) - 3)];
921 /* POSIX.1b timers */
926 compat_sigval_t _sigval
;
929 /* POSIX.1b signals */
934 compat_sigval_t _sigval
;
943 compat_x32_clock_t _utime
;
944 compat_x32_clock_t _stime
;
947 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
960 } compat_x32_siginfo_t
__attribute__ ((__aligned__ (8)));
962 #define cpt_si_pid _sifields._kill._pid
963 #define cpt_si_uid _sifields._kill._uid
964 #define cpt_si_timerid _sifields._timer._tid
965 #define cpt_si_overrun _sifields._timer._overrun
966 #define cpt_si_status _sifields._sigchld._status
967 #define cpt_si_utime _sifields._sigchld._utime
968 #define cpt_si_stime _sifields._sigchld._stime
969 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
970 #define cpt_si_addr _sifields._sigfault._addr
971 #define cpt_si_band _sifields._sigpoll._band
972 #define cpt_si_fd _sifields._sigpoll._fd
974 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
975 In their place is si_timer1,si_timer2. */
977 #define si_timerid si_timer1
980 #define si_overrun si_timer2
984 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
986 memset (to
, 0, sizeof (*to
));
988 to
->si_signo
= from
->si_signo
;
989 to
->si_errno
= from
->si_errno
;
990 to
->si_code
= from
->si_code
;
992 if (to
->si_code
== SI_TIMER
)
994 to
->cpt_si_timerid
= from
->si_timerid
;
995 to
->cpt_si_overrun
= from
->si_overrun
;
996 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
998 else if (to
->si_code
== SI_USER
)
1000 to
->cpt_si_pid
= from
->si_pid
;
1001 to
->cpt_si_uid
= from
->si_uid
;
1003 else if (to
->si_code
< 0)
1005 to
->cpt_si_pid
= from
->si_pid
;
1006 to
->cpt_si_uid
= from
->si_uid
;
1007 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1011 switch (to
->si_signo
)
1014 to
->cpt_si_pid
= from
->si_pid
;
1015 to
->cpt_si_uid
= from
->si_uid
;
1016 to
->cpt_si_status
= from
->si_status
;
1017 to
->cpt_si_utime
= from
->si_utime
;
1018 to
->cpt_si_stime
= from
->si_stime
;
1024 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1027 to
->cpt_si_band
= from
->si_band
;
1028 to
->cpt_si_fd
= from
->si_fd
;
1031 to
->cpt_si_pid
= from
->si_pid
;
1032 to
->cpt_si_uid
= from
->si_uid
;
1033 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1040 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
1042 memset (to
, 0, sizeof (*to
));
1044 to
->si_signo
= from
->si_signo
;
1045 to
->si_errno
= from
->si_errno
;
1046 to
->si_code
= from
->si_code
;
1048 if (to
->si_code
== SI_TIMER
)
1050 to
->si_timerid
= from
->cpt_si_timerid
;
1051 to
->si_overrun
= from
->cpt_si_overrun
;
1052 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1054 else if (to
->si_code
== SI_USER
)
1056 to
->si_pid
= from
->cpt_si_pid
;
1057 to
->si_uid
= from
->cpt_si_uid
;
1059 else if (to
->si_code
< 0)
1061 to
->si_pid
= from
->cpt_si_pid
;
1062 to
->si_uid
= from
->cpt_si_uid
;
1063 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1067 switch (to
->si_signo
)
1070 to
->si_pid
= from
->cpt_si_pid
;
1071 to
->si_uid
= from
->cpt_si_uid
;
1072 to
->si_status
= from
->cpt_si_status
;
1073 to
->si_utime
= from
->cpt_si_utime
;
1074 to
->si_stime
= from
->cpt_si_stime
;
1080 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1083 to
->si_band
= from
->cpt_si_band
;
1084 to
->si_fd
= from
->cpt_si_fd
;
1087 to
->si_pid
= from
->cpt_si_pid
;
1088 to
->si_uid
= from
->cpt_si_uid
;
1089 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1096 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t
*to
,
1099 memset (to
, 0, sizeof (*to
));
1101 to
->si_signo
= from
->si_signo
;
1102 to
->si_errno
= from
->si_errno
;
1103 to
->si_code
= from
->si_code
;
1105 if (to
->si_code
== SI_TIMER
)
1107 to
->cpt_si_timerid
= from
->si_timerid
;
1108 to
->cpt_si_overrun
= from
->si_overrun
;
1109 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1111 else if (to
->si_code
== SI_USER
)
1113 to
->cpt_si_pid
= from
->si_pid
;
1114 to
->cpt_si_uid
= from
->si_uid
;
1116 else if (to
->si_code
< 0)
1118 to
->cpt_si_pid
= from
->si_pid
;
1119 to
->cpt_si_uid
= from
->si_uid
;
1120 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1124 switch (to
->si_signo
)
1127 to
->cpt_si_pid
= from
->si_pid
;
1128 to
->cpt_si_uid
= from
->si_uid
;
1129 to
->cpt_si_status
= from
->si_status
;
1130 to
->cpt_si_utime
= from
->si_utime
;
1131 to
->cpt_si_stime
= from
->si_stime
;
1137 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
1140 to
->cpt_si_band
= from
->si_band
;
1141 to
->cpt_si_fd
= from
->si_fd
;
1144 to
->cpt_si_pid
= from
->si_pid
;
1145 to
->cpt_si_uid
= from
->si_uid
;
1146 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
1153 siginfo_from_compat_x32_siginfo (siginfo_t
*to
,
1154 compat_x32_siginfo_t
*from
)
1156 memset (to
, 0, sizeof (*to
));
1158 to
->si_signo
= from
->si_signo
;
1159 to
->si_errno
= from
->si_errno
;
1160 to
->si_code
= from
->si_code
;
1162 if (to
->si_code
== SI_TIMER
)
1164 to
->si_timerid
= from
->cpt_si_timerid
;
1165 to
->si_overrun
= from
->cpt_si_overrun
;
1166 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1168 else if (to
->si_code
== SI_USER
)
1170 to
->si_pid
= from
->cpt_si_pid
;
1171 to
->si_uid
= from
->cpt_si_uid
;
1173 else if (to
->si_code
< 0)
1175 to
->si_pid
= from
->cpt_si_pid
;
1176 to
->si_uid
= from
->cpt_si_uid
;
1177 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
1181 switch (to
->si_signo
)
1184 to
->si_pid
= from
->cpt_si_pid
;
1185 to
->si_uid
= from
->cpt_si_uid
;
1186 to
->si_status
= from
->cpt_si_status
;
1187 to
->si_utime
= from
->cpt_si_utime
;
1188 to
->si_stime
= from
->cpt_si_stime
;
1194 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
1197 to
->si_band
= from
->cpt_si_band
;
1198 to
->si_fd
= from
->cpt_si_fd
;
1201 to
->si_pid
= from
->cpt_si_pid
;
1202 to
->si_uid
= from
->cpt_si_uid
;
1203 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
1209 #endif /* __x86_64__ */
1211 /* Convert a native/host siginfo object, into/from the siginfo in the
1212 layout of the inferiors' architecture. Returns true if any
1213 conversion was done; false otherwise. If DIRECTION is 1, then copy
1214 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1218 x86_siginfo_fixup (siginfo_t
*native
, void *inf
, int direction
)
1221 unsigned int machine
;
1222 int tid
= lwpid_of (current_inferior
);
1223 int is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1225 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1226 if (!is_64bit_tdesc ())
1228 if (sizeof (siginfo_t
) != sizeof (compat_siginfo_t
))
1229 fatal ("unexpected difference in siginfo");
1232 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
1234 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
1238 /* No fixup for native x32 GDB. */
1239 else if (!is_elf64
&& sizeof (void *) == 8)
1241 if (sizeof (siginfo_t
) != sizeof (compat_x32_siginfo_t
))
1242 fatal ("unexpected difference in siginfo");
1245 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo
*) inf
,
1248 siginfo_from_compat_x32_siginfo (native
,
1249 (struct compat_x32_siginfo
*) inf
);
1260 /* Format of XSAVE extended state is:
1263 fxsave_bytes[0..463]
1264 sw_usable_bytes[464..511]
1265 xstate_hdr_bytes[512..575]
1270 Same memory layout will be used for the coredump NT_X86_XSTATE
1271 representing the XSAVE extended state registers.
1273 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1274 extended state mask, which is the same as the extended control register
1275 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1276 together with the mask saved in the xstate_hdr_bytes to determine what
1277 states the processor/OS supports and what state, used or initialized,
1278 the process/thread is in. */
1279 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1281 /* Does the current host support the GETFPXREGS request? The header
1282 file may or may not define it, and even if it is defined, the
1283 kernel will return EIO if it's running on a pre-SSE processor. */
1284 int have_ptrace_getfpxregs
=
1285 #ifdef HAVE_PTRACE_GETFPXREGS
1292 /* Does the current host support PTRACE_GETREGSET? */
1293 static int have_ptrace_getregset
= -1;
1295 /* Get Linux/x86 target description from running target. */
1297 static const struct target_desc
*
1298 x86_linux_read_description (void)
1300 unsigned int machine
;
1304 static uint64_t xcr0
;
1305 struct regset_info
*regset
;
1307 tid
= lwpid_of (current_inferior
);
1309 is_elf64
= linux_pid_exe_is_elf_64_file (tid
, &machine
);
1311 if (sizeof (void *) == 4)
1314 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1316 else if (machine
== EM_X86_64
)
1317 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1321 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1322 if (machine
== EM_386
&& have_ptrace_getfpxregs
== -1)
1324 elf_fpxregset_t fpxregs
;
1326 if (ptrace (PTRACE_GETFPXREGS
, tid
, 0, (long) &fpxregs
) < 0)
1328 have_ptrace_getfpxregs
= 0;
1329 have_ptrace_getregset
= 0;
1330 return tdesc_i386_mmx_linux
;
1333 have_ptrace_getfpxregs
= 1;
1339 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1341 /* Don't use XML. */
1343 if (machine
== EM_X86_64
)
1344 return tdesc_amd64_linux_no_xml
;
1347 return tdesc_i386_linux_no_xml
;
1350 if (have_ptrace_getregset
== -1)
1352 uint64_t xstateregs
[(I386_XSTATE_SSE_SIZE
/ sizeof (uint64_t))];
1355 iov
.iov_base
= xstateregs
;
1356 iov
.iov_len
= sizeof (xstateregs
);
1358 /* Check if PTRACE_GETREGSET works. */
1359 if (ptrace (PTRACE_GETREGSET
, tid
,
1360 (unsigned int) NT_X86_XSTATE
, (long) &iov
) < 0)
1361 have_ptrace_getregset
= 0;
1364 have_ptrace_getregset
= 1;
1366 /* Get XCR0 from XSAVE extended state. */
1367 xcr0
= xstateregs
[(I386_LINUX_XSAVE_XCR0_OFFSET
1368 / sizeof (uint64_t))];
1370 /* Use PTRACE_GETREGSET if it is available. */
1371 for (regset
= x86_regsets
;
1372 regset
->fill_function
!= NULL
; regset
++)
1373 if (regset
->get_request
== PTRACE_GETREGSET
)
1374 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1375 else if (regset
->type
!= GENERAL_REGS
)
1380 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1381 xcr0_features
= (have_ptrace_getregset
1382 && (xcr0
& I386_XSTATE_ALL_MASK
));
1387 if (machine
== EM_X86_64
)
1394 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1396 case I386_XSTATE_AVX512_MASK
:
1397 return tdesc_amd64_avx512_linux
;
1399 case I386_XSTATE_MPX_MASK
:
1400 return tdesc_amd64_mpx_linux
;
1402 case I386_XSTATE_AVX_MASK
:
1403 return tdesc_amd64_avx_linux
;
1406 return tdesc_amd64_linux
;
1410 return tdesc_amd64_linux
;
1416 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1418 case I386_XSTATE_AVX512_MASK
:
1419 return tdesc_x32_avx512_linux
;
1421 case I386_XSTATE_MPX_MASK
: /* No MPX on x32. */
1422 case I386_XSTATE_AVX_MASK
:
1423 return tdesc_x32_avx_linux
;
1426 return tdesc_x32_linux
;
1430 return tdesc_x32_linux
;
1438 switch (xcr0
& I386_XSTATE_ALL_MASK
)
1440 case (I386_XSTATE_AVX512_MASK
):
1441 return tdesc_i386_avx512_linux
;
1443 case (I386_XSTATE_MPX_MASK
):
1444 return tdesc_i386_mpx_linux
;
1446 case (I386_XSTATE_AVX_MASK
):
1447 return tdesc_i386_avx_linux
;
1450 return tdesc_i386_linux
;
1454 return tdesc_i386_linux
;
1457 gdb_assert_not_reached ("failed to return tdesc");
1460 /* Callback for find_inferior. Stops iteration when a thread with a
1461 given PID is found. */
1464 same_process_callback (struct inferior_list_entry
*entry
, void *data
)
1466 int pid
= *(int *) data
;
1468 return (ptid_get_pid (entry
->id
) == pid
);
1471 /* Callback for for_each_inferior. Calls the arch_setup routine for
1475 x86_arch_setup_process_callback (struct inferior_list_entry
*entry
)
1477 int pid
= ptid_get_pid (entry
->id
);
1479 /* Look up any thread of this processes. */
1481 = (struct thread_info
*) find_inferior (&all_threads
,
1482 same_process_callback
, &pid
);
1484 the_low_target
.arch_setup ();
1487 /* Update all the target description of all processes; a new GDB
1488 connected, and it may or not support xml target descriptions. */
1491 x86_linux_update_xmltarget (void)
1493 struct thread_info
*save_inferior
= current_inferior
;
1495 /* Before changing the register cache's internal layout, flush the
1496 contents of the current valid caches back to the threads, and
1497 release the current regcache objects. */
1498 regcache_release ();
1500 for_each_inferior (&all_processes
, x86_arch_setup_process_callback
);
1502 current_inferior
= save_inferior
;
1505 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1506 PTRACE_GETREGSET. */
1509 x86_linux_process_qsupported (const char *query
)
1511 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1512 with "i386" in qSupported query, it supports x86 XML target
1515 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1517 char *copy
= xstrdup (query
+ 13);
1520 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1522 if (strcmp (p
, "i386") == 0)
1532 x86_linux_update_xmltarget ();
1535 /* Common for x86/x86-64. */
1537 static struct regsets_info x86_regsets_info
=
1539 x86_regsets
, /* regsets */
1540 0, /* num_regsets */
1541 NULL
, /* disabled_regsets */
1545 static struct regs_info amd64_linux_regs_info
=
1547 NULL
, /* regset_bitmap */
1548 NULL
, /* usrregs_info */
1552 static struct usrregs_info i386_linux_usrregs_info
=
1558 static struct regs_info i386_linux_regs_info
=
1560 NULL
, /* regset_bitmap */
1561 &i386_linux_usrregs_info
,
1565 const struct regs_info
*
1566 x86_linux_regs_info (void)
1569 if (is_64bit_tdesc ())
1570 return &amd64_linux_regs_info
;
1573 return &i386_linux_regs_info
;
1576 /* Initialize the target description for the architecture of the
1580 x86_arch_setup (void)
1582 current_process ()->tdesc
= x86_linux_read_description ();
1586 x86_supports_tracepoints (void)
1592 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1594 write_inferior_memory (*to
, buf
, len
);
1599 push_opcode (unsigned char *buf
, char *op
)
1601 unsigned char *buf_org
= buf
;
1606 unsigned long ul
= strtoul (op
, &endptr
, 16);
1615 return buf
- buf_org
;
1620 /* Build a jump pad that saves registers and calls a collection
1621 function. Writes a jump instruction to the jump pad to
1622 JJUMPAD_INSN. The caller is responsible to write it in at the
1623 tracepoint address. */
1626 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1627 CORE_ADDR collector
,
1630 CORE_ADDR
*jump_entry
,
1631 CORE_ADDR
*trampoline
,
1632 ULONGEST
*trampoline_size
,
1633 unsigned char *jjump_pad_insn
,
1634 ULONGEST
*jjump_pad_insn_size
,
1635 CORE_ADDR
*adjusted_insn_addr
,
1636 CORE_ADDR
*adjusted_insn_addr_end
,
1639 unsigned char buf
[40];
1643 CORE_ADDR buildaddr
= *jump_entry
;
1645 /* Build the jump pad. */
1647 /* First, do tracepoint data collection. Save registers. */
1649 /* Need to ensure stack pointer saved first. */
1650 buf
[i
++] = 0x54; /* push %rsp */
1651 buf
[i
++] = 0x55; /* push %rbp */
1652 buf
[i
++] = 0x57; /* push %rdi */
1653 buf
[i
++] = 0x56; /* push %rsi */
1654 buf
[i
++] = 0x52; /* push %rdx */
1655 buf
[i
++] = 0x51; /* push %rcx */
1656 buf
[i
++] = 0x53; /* push %rbx */
1657 buf
[i
++] = 0x50; /* push %rax */
1658 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1659 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1660 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1661 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1662 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1663 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1664 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1665 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1666 buf
[i
++] = 0x9c; /* pushfq */
1667 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1669 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1670 i
+= sizeof (unsigned long);
1671 buf
[i
++] = 0x57; /* push %rdi */
1672 append_insns (&buildaddr
, i
, buf
);
1674 /* Stack space for the collecting_t object. */
1676 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1677 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1678 memcpy (buf
+ i
, &tpoint
, 8);
1680 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1681 i
+= push_opcode (&buf
[i
],
1682 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1683 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1684 append_insns (&buildaddr
, i
, buf
);
1688 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1689 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1691 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1692 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1693 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1694 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1695 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1696 append_insns (&buildaddr
, i
, buf
);
1698 /* Set up the gdb_collect call. */
1699 /* At this point, (stack pointer + 0x18) is the base of our saved
1703 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1704 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1706 /* tpoint address may be 64-bit wide. */
1707 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1708 memcpy (buf
+ i
, &tpoint
, 8);
1710 append_insns (&buildaddr
, i
, buf
);
1712 /* The collector function being in the shared library, may be
1713 >31-bits away off the jump pad. */
1715 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1716 memcpy (buf
+ i
, &collector
, 8);
1718 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1719 append_insns (&buildaddr
, i
, buf
);
1721 /* Clear the spin-lock. */
1723 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1724 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1725 memcpy (buf
+ i
, &lockaddr
, 8);
1727 append_insns (&buildaddr
, i
, buf
);
1729 /* Remove stack that had been used for the collect_t object. */
1731 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1732 append_insns (&buildaddr
, i
, buf
);
1734 /* Restore register state. */
1736 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1740 buf
[i
++] = 0x9d; /* popfq */
1741 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1742 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1743 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1744 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1745 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1746 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1747 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1748 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1749 buf
[i
++] = 0x58; /* pop %rax */
1750 buf
[i
++] = 0x5b; /* pop %rbx */
1751 buf
[i
++] = 0x59; /* pop %rcx */
1752 buf
[i
++] = 0x5a; /* pop %rdx */
1753 buf
[i
++] = 0x5e; /* pop %rsi */
1754 buf
[i
++] = 0x5f; /* pop %rdi */
1755 buf
[i
++] = 0x5d; /* pop %rbp */
1756 buf
[i
++] = 0x5c; /* pop %rsp */
1757 append_insns (&buildaddr
, i
, buf
);
1759 /* Now, adjust the original instruction to execute in the jump
1761 *adjusted_insn_addr
= buildaddr
;
1762 relocate_instruction (&buildaddr
, tpaddr
);
1763 *adjusted_insn_addr_end
= buildaddr
;
1765 /* Finally, write a jump back to the program. */
1767 loffset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1768 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1771 "E.Jump back from jump pad too far from tracepoint "
1772 "(offset 0x%" PRIx64
" > int32).", loffset
);
1776 offset
= (int) loffset
;
1777 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1778 memcpy (buf
+ 1, &offset
, 4);
1779 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1781 /* The jump pad is now built. Wire in a jump to our jump pad. This
1782 is always done last (by our caller actually), so that we can
1783 install fast tracepoints with threads running. This relies on
1784 the agent's atomic write support. */
1785 loffset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1786 if (loffset
> INT_MAX
|| loffset
< INT_MIN
)
1789 "E.Jump pad too far from tracepoint "
1790 "(offset 0x%" PRIx64
" > int32).", loffset
);
1794 offset
= (int) loffset
;
1796 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1797 memcpy (buf
+ 1, &offset
, 4);
1798 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1799 *jjump_pad_insn_size
= sizeof (jump_insn
);
1801 /* Return the end address of our pad. */
1802 *jump_entry
= buildaddr
;
1807 #endif /* __x86_64__ */
1809 /* Build a jump pad that saves registers and calls a collection
1810 function. Writes a jump instruction to the jump pad to
1811 JJUMPAD_INSN. The caller is responsible to write it in at the
1812 tracepoint address. */
1815 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1816 CORE_ADDR collector
,
1819 CORE_ADDR
*jump_entry
,
1820 CORE_ADDR
*trampoline
,
1821 ULONGEST
*trampoline_size
,
1822 unsigned char *jjump_pad_insn
,
1823 ULONGEST
*jjump_pad_insn_size
,
1824 CORE_ADDR
*adjusted_insn_addr
,
1825 CORE_ADDR
*adjusted_insn_addr_end
,
1828 unsigned char buf
[0x100];
1830 CORE_ADDR buildaddr
= *jump_entry
;
1832 /* Build the jump pad. */
1834 /* First, do tracepoint data collection. Save registers. */
1836 buf
[i
++] = 0x60; /* pushad */
1837 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1838 *((int *)(buf
+ i
)) = (int) tpaddr
;
1840 buf
[i
++] = 0x9c; /* pushf */
1841 buf
[i
++] = 0x1e; /* push %ds */
1842 buf
[i
++] = 0x06; /* push %es */
1843 buf
[i
++] = 0x0f; /* push %fs */
1845 buf
[i
++] = 0x0f; /* push %gs */
1847 buf
[i
++] = 0x16; /* push %ss */
1848 buf
[i
++] = 0x0e; /* push %cs */
1849 append_insns (&buildaddr
, i
, buf
);
1851 /* Stack space for the collecting_t object. */
1853 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1855 /* Build the object. */
1856 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1857 memcpy (buf
+ i
, &tpoint
, 4);
1859 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1861 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1862 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1863 append_insns (&buildaddr
, i
, buf
);
1865 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1866 If we cared for it, this could be using xchg alternatively. */
1869 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1870 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1872 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1874 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1875 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1876 append_insns (&buildaddr
, i
, buf
);
1879 /* Set up arguments to the gdb_collect call. */
1881 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1882 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1883 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1884 append_insns (&buildaddr
, i
, buf
);
1887 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1888 append_insns (&buildaddr
, i
, buf
);
1891 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1892 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1894 append_insns (&buildaddr
, i
, buf
);
1896 buf
[0] = 0xe8; /* call <reladdr> */
1897 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1898 memcpy (buf
+ 1, &offset
, 4);
1899 append_insns (&buildaddr
, 5, buf
);
1900 /* Clean up after the call. */
1901 buf
[0] = 0x83; /* add $0x8,%esp */
1904 append_insns (&buildaddr
, 3, buf
);
1907 /* Clear the spin-lock. This would need the LOCK prefix on older
1910 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1911 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1912 memcpy (buf
+ i
, &lockaddr
, 4);
1914 append_insns (&buildaddr
, i
, buf
);
1917 /* Remove stack that had been used for the collect_t object. */
1919 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1920 append_insns (&buildaddr
, i
, buf
);
1923 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1926 buf
[i
++] = 0x17; /* pop %ss */
1927 buf
[i
++] = 0x0f; /* pop %gs */
1929 buf
[i
++] = 0x0f; /* pop %fs */
1931 buf
[i
++] = 0x07; /* pop %es */
1932 buf
[i
++] = 0x1f; /* pop %ds */
1933 buf
[i
++] = 0x9d; /* popf */
1934 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1937 buf
[i
++] = 0x61; /* popad */
1938 append_insns (&buildaddr
, i
, buf
);
1940 /* Now, adjust the original instruction to execute in the jump
1942 *adjusted_insn_addr
= buildaddr
;
1943 relocate_instruction (&buildaddr
, tpaddr
);
1944 *adjusted_insn_addr_end
= buildaddr
;
1946 /* Write the jump back to the program. */
1947 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1948 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1949 memcpy (buf
+ 1, &offset
, 4);
1950 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1952 /* The jump pad is now built. Wire in a jump to our jump pad. This
1953 is always done last (by our caller actually), so that we can
1954 install fast tracepoints with threads running. This relies on
1955 the agent's atomic write support. */
1958 /* Create a trampoline. */
1959 *trampoline_size
= sizeof (jump_insn
);
1960 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1962 /* No trampoline space available. */
1964 "E.Cannot allocate trampoline space needed for fast "
1965 "tracepoints on 4-byte instructions.");
1969 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1970 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1971 memcpy (buf
+ 1, &offset
, 4);
1972 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1974 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1975 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1976 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1977 memcpy (buf
+ 2, &offset
, 2);
1978 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1979 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1983 /* Else use a 32-bit relative jump instruction. */
1984 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1985 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1986 memcpy (buf
+ 1, &offset
, 4);
1987 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1988 *jjump_pad_insn_size
= sizeof (jump_insn
);
1991 /* Return the end address of our pad. */
1992 *jump_entry
= buildaddr
;
1998 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1999 CORE_ADDR collector
,
2002 CORE_ADDR
*jump_entry
,
2003 CORE_ADDR
*trampoline
,
2004 ULONGEST
*trampoline_size
,
2005 unsigned char *jjump_pad_insn
,
2006 ULONGEST
*jjump_pad_insn_size
,
2007 CORE_ADDR
*adjusted_insn_addr
,
2008 CORE_ADDR
*adjusted_insn_addr_end
,
2012 if (is_64bit_tdesc ())
2013 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2014 collector
, lockaddr
,
2015 orig_size
, jump_entry
,
2016 trampoline
, trampoline_size
,
2018 jjump_pad_insn_size
,
2020 adjusted_insn_addr_end
,
2024 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
2025 collector
, lockaddr
,
2026 orig_size
, jump_entry
,
2027 trampoline
, trampoline_size
,
2029 jjump_pad_insn_size
,
2031 adjusted_insn_addr_end
,
2035 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2039 x86_get_min_fast_tracepoint_insn_len (void)
2041 static int warned_about_fast_tracepoints
= 0;
2044 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2045 used for fast tracepoints. */
2046 if (is_64bit_tdesc ())
2050 if (agent_loaded_p ())
2052 char errbuf
[IPA_BUFSIZ
];
2056 /* On x86, if trampolines are available, then 4-byte jump instructions
2057 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2058 with a 4-byte offset are used instead. */
2059 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
2063 /* GDB has no channel to explain to user why a shorter fast
2064 tracepoint is not possible, but at least make GDBserver
2065 mention that something has gone awry. */
2066 if (!warned_about_fast_tracepoints
)
2068 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
2069 warned_about_fast_tracepoints
= 1;
2076 /* Indicate that the minimum length is currently unknown since the IPA
2077 has not loaded yet. */
2083 add_insns (unsigned char *start
, int len
)
2085 CORE_ADDR buildaddr
= current_insn_ptr
;
2088 debug_printf ("Adding %d bytes of insn at %s\n",
2089 len
, paddress (buildaddr
));
2091 append_insns (&buildaddr
, len
, start
);
2092 current_insn_ptr
= buildaddr
;
2095 /* Our general strategy for emitting code is to avoid specifying raw
2096 bytes whenever possible, and instead copy a block of inline asm
2097 that is embedded in the function. This is a little messy, because
2098 we need to keep the compiler from discarding what looks like dead
2099 code, plus suppress various warnings. */
2101 #define EMIT_ASM(NAME, INSNS) \
2104 extern unsigned char start_ ## NAME, end_ ## NAME; \
2105 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2106 __asm__ ("jmp end_" #NAME "\n" \
2107 "\t" "start_" #NAME ":" \
2109 "\t" "end_" #NAME ":"); \
2114 #define EMIT_ASM32(NAME,INSNS) \
2117 extern unsigned char start_ ## NAME, end_ ## NAME; \
2118 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2119 __asm__ (".code32\n" \
2120 "\t" "jmp end_" #NAME "\n" \
2121 "\t" "start_" #NAME ":\n" \
2123 "\t" "end_" #NAME ":\n" \
2129 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2136 amd64_emit_prologue (void)
2138 EMIT_ASM (amd64_prologue
,
2140 "movq %rsp,%rbp\n\t"
2141 "sub $0x20,%rsp\n\t"
2142 "movq %rdi,-8(%rbp)\n\t"
2143 "movq %rsi,-16(%rbp)");
2148 amd64_emit_epilogue (void)
2150 EMIT_ASM (amd64_epilogue
,
2151 "movq -16(%rbp),%rdi\n\t"
2152 "movq %rax,(%rdi)\n\t"
2159 amd64_emit_add (void)
2161 EMIT_ASM (amd64_add
,
2162 "add (%rsp),%rax\n\t"
2163 "lea 0x8(%rsp),%rsp");
2167 amd64_emit_sub (void)
2169 EMIT_ASM (amd64_sub
,
2170 "sub %rax,(%rsp)\n\t"
2175 amd64_emit_mul (void)
2181 amd64_emit_lsh (void)
2187 amd64_emit_rsh_signed (void)
2193 amd64_emit_rsh_unsigned (void)
2199 amd64_emit_ext (int arg
)
2204 EMIT_ASM (amd64_ext_8
,
2210 EMIT_ASM (amd64_ext_16
,
2215 EMIT_ASM (amd64_ext_32
,
2224 amd64_emit_log_not (void)
2226 EMIT_ASM (amd64_log_not
,
2227 "test %rax,%rax\n\t"
2233 amd64_emit_bit_and (void)
2235 EMIT_ASM (amd64_and
,
2236 "and (%rsp),%rax\n\t"
2237 "lea 0x8(%rsp),%rsp");
2241 amd64_emit_bit_or (void)
2244 "or (%rsp),%rax\n\t"
2245 "lea 0x8(%rsp),%rsp");
2249 amd64_emit_bit_xor (void)
2251 EMIT_ASM (amd64_xor
,
2252 "xor (%rsp),%rax\n\t"
2253 "lea 0x8(%rsp),%rsp");
2257 amd64_emit_bit_not (void)
2259 EMIT_ASM (amd64_bit_not
,
2260 "xorq $0xffffffffffffffff,%rax");
2264 amd64_emit_equal (void)
2266 EMIT_ASM (amd64_equal
,
2267 "cmp %rax,(%rsp)\n\t"
2268 "je .Lamd64_equal_true\n\t"
2270 "jmp .Lamd64_equal_end\n\t"
2271 ".Lamd64_equal_true:\n\t"
2273 ".Lamd64_equal_end:\n\t"
2274 "lea 0x8(%rsp),%rsp");
2278 amd64_emit_less_signed (void)
2280 EMIT_ASM (amd64_less_signed
,
2281 "cmp %rax,(%rsp)\n\t"
2282 "jl .Lamd64_less_signed_true\n\t"
2284 "jmp .Lamd64_less_signed_end\n\t"
2285 ".Lamd64_less_signed_true:\n\t"
2287 ".Lamd64_less_signed_end:\n\t"
2288 "lea 0x8(%rsp),%rsp");
2292 amd64_emit_less_unsigned (void)
2294 EMIT_ASM (amd64_less_unsigned
,
2295 "cmp %rax,(%rsp)\n\t"
2296 "jb .Lamd64_less_unsigned_true\n\t"
2298 "jmp .Lamd64_less_unsigned_end\n\t"
2299 ".Lamd64_less_unsigned_true:\n\t"
2301 ".Lamd64_less_unsigned_end:\n\t"
2302 "lea 0x8(%rsp),%rsp");
2306 amd64_emit_ref (int size
)
2311 EMIT_ASM (amd64_ref1
,
2315 EMIT_ASM (amd64_ref2
,
2319 EMIT_ASM (amd64_ref4
,
2320 "movl (%rax),%eax");
2323 EMIT_ASM (amd64_ref8
,
2324 "movq (%rax),%rax");
2330 amd64_emit_if_goto (int *offset_p
, int *size_p
)
2332 EMIT_ASM (amd64_if_goto
,
2336 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2344 amd64_emit_goto (int *offset_p
, int *size_p
)
2346 EMIT_ASM (amd64_goto
,
2347 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2355 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2357 int diff
= (to
- (from
+ size
));
2358 unsigned char buf
[sizeof (int)];
2366 memcpy (buf
, &diff
, sizeof (int));
2367 write_inferior_memory (from
, buf
, sizeof (int));
2371 amd64_emit_const (LONGEST num
)
2373 unsigned char buf
[16];
2375 CORE_ADDR buildaddr
= current_insn_ptr
;
2378 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
2379 memcpy (&buf
[i
], &num
, sizeof (num
));
2381 append_insns (&buildaddr
, i
, buf
);
2382 current_insn_ptr
= buildaddr
;
2386 amd64_emit_call (CORE_ADDR fn
)
2388 unsigned char buf
[16];
2390 CORE_ADDR buildaddr
;
2393 /* The destination function being in the shared library, may be
2394 >31-bits away off the compiled code pad. */
2396 buildaddr
= current_insn_ptr
;
2398 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
2402 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
2404 /* Offset is too large for a call. Use callq, but that requires
2405 a register, so avoid it if possible. Use r10, since it is
2406 call-clobbered, we don't have to push/pop it. */
2407 buf
[i
++] = 0x48; /* mov $fn,%r10 */
2409 memcpy (buf
+ i
, &fn
, 8);
2411 buf
[i
++] = 0xff; /* callq *%r10 */
2416 int offset32
= offset64
; /* we know we can't overflow here. */
2417 memcpy (buf
+ i
, &offset32
, 4);
2421 append_insns (&buildaddr
, i
, buf
);
2422 current_insn_ptr
= buildaddr
;
2426 amd64_emit_reg (int reg
)
2428 unsigned char buf
[16];
2430 CORE_ADDR buildaddr
;
2432 /* Assume raw_regs is still in %rdi. */
2433 buildaddr
= current_insn_ptr
;
2435 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
2436 memcpy (&buf
[i
], ®
, sizeof (reg
));
2438 append_insns (&buildaddr
, i
, buf
);
2439 current_insn_ptr
= buildaddr
;
2440 amd64_emit_call (get_raw_reg_func_addr ());
2444 amd64_emit_pop (void)
2446 EMIT_ASM (amd64_pop
,
2451 amd64_emit_stack_flush (void)
2453 EMIT_ASM (amd64_stack_flush
,
2458 amd64_emit_zero_ext (int arg
)
2463 EMIT_ASM (amd64_zero_ext_8
,
2467 EMIT_ASM (amd64_zero_ext_16
,
2468 "and $0xffff,%rax");
2471 EMIT_ASM (amd64_zero_ext_32
,
2472 "mov $0xffffffff,%rcx\n\t"
2481 amd64_emit_swap (void)
2483 EMIT_ASM (amd64_swap
,
2490 amd64_emit_stack_adjust (int n
)
2492 unsigned char buf
[16];
2494 CORE_ADDR buildaddr
= current_insn_ptr
;
2497 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2501 /* This only handles adjustments up to 16, but we don't expect any more. */
2503 append_insns (&buildaddr
, i
, buf
);
2504 current_insn_ptr
= buildaddr
;
2507 /* FN's prototype is `LONGEST(*fn)(int)'. */
2510 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2512 unsigned char buf
[16];
2514 CORE_ADDR buildaddr
;
2516 buildaddr
= current_insn_ptr
;
2518 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2519 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2521 append_insns (&buildaddr
, i
, buf
);
2522 current_insn_ptr
= buildaddr
;
2523 amd64_emit_call (fn
);
2526 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2529 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2531 unsigned char buf
[16];
2533 CORE_ADDR buildaddr
;
2535 buildaddr
= current_insn_ptr
;
2537 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2538 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2540 append_insns (&buildaddr
, i
, buf
);
2541 current_insn_ptr
= buildaddr
;
2542 EMIT_ASM (amd64_void_call_2_a
,
2543 /* Save away a copy of the stack top. */
2545 /* Also pass top as the second argument. */
2547 amd64_emit_call (fn
);
2548 EMIT_ASM (amd64_void_call_2_b
,
2549 /* Restore the stack top, %rax may have been trashed. */
2554 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2557 "cmp %rax,(%rsp)\n\t"
2558 "jne .Lamd64_eq_fallthru\n\t"
2559 "lea 0x8(%rsp),%rsp\n\t"
2561 /* jmp, but don't trust the assembler to choose the right jump */
2562 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2563 ".Lamd64_eq_fallthru:\n\t"
2564 "lea 0x8(%rsp),%rsp\n\t"
2574 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2577 "cmp %rax,(%rsp)\n\t"
2578 "je .Lamd64_ne_fallthru\n\t"
2579 "lea 0x8(%rsp),%rsp\n\t"
2581 /* jmp, but don't trust the assembler to choose the right jump */
2582 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2583 ".Lamd64_ne_fallthru:\n\t"
2584 "lea 0x8(%rsp),%rsp\n\t"
2594 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2597 "cmp %rax,(%rsp)\n\t"
2598 "jnl .Lamd64_lt_fallthru\n\t"
2599 "lea 0x8(%rsp),%rsp\n\t"
2601 /* jmp, but don't trust the assembler to choose the right jump */
2602 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2603 ".Lamd64_lt_fallthru:\n\t"
2604 "lea 0x8(%rsp),%rsp\n\t"
2614 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2617 "cmp %rax,(%rsp)\n\t"
2618 "jnle .Lamd64_le_fallthru\n\t"
2619 "lea 0x8(%rsp),%rsp\n\t"
2621 /* jmp, but don't trust the assembler to choose the right jump */
2622 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2623 ".Lamd64_le_fallthru:\n\t"
2624 "lea 0x8(%rsp),%rsp\n\t"
2634 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2637 "cmp %rax,(%rsp)\n\t"
2638 "jng .Lamd64_gt_fallthru\n\t"
2639 "lea 0x8(%rsp),%rsp\n\t"
2641 /* jmp, but don't trust the assembler to choose the right jump */
2642 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2643 ".Lamd64_gt_fallthru:\n\t"
2644 "lea 0x8(%rsp),%rsp\n\t"
2654 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2657 "cmp %rax,(%rsp)\n\t"
2658 "jnge .Lamd64_ge_fallthru\n\t"
2659 ".Lamd64_ge_jump:\n\t"
2660 "lea 0x8(%rsp),%rsp\n\t"
2662 /* jmp, but don't trust the assembler to choose the right jump */
2663 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2664 ".Lamd64_ge_fallthru:\n\t"
2665 "lea 0x8(%rsp),%rsp\n\t"
2674 struct emit_ops amd64_emit_ops
=
2676 amd64_emit_prologue
,
2677 amd64_emit_epilogue
,
2682 amd64_emit_rsh_signed
,
2683 amd64_emit_rsh_unsigned
,
2691 amd64_emit_less_signed
,
2692 amd64_emit_less_unsigned
,
2696 amd64_write_goto_address
,
2701 amd64_emit_stack_flush
,
2702 amd64_emit_zero_ext
,
2704 amd64_emit_stack_adjust
,
2705 amd64_emit_int_call_1
,
2706 amd64_emit_void_call_2
,
2715 #endif /* __x86_64__ */
2718 i386_emit_prologue (void)
2720 EMIT_ASM32 (i386_prologue
,
2724 /* At this point, the raw regs base address is at 8(%ebp), and the
2725 value pointer is at 12(%ebp). */
2729 i386_emit_epilogue (void)
2731 EMIT_ASM32 (i386_epilogue
,
2732 "mov 12(%ebp),%ecx\n\t"
2733 "mov %eax,(%ecx)\n\t"
2734 "mov %ebx,0x4(%ecx)\n\t"
2742 i386_emit_add (void)
2744 EMIT_ASM32 (i386_add
,
2745 "add (%esp),%eax\n\t"
2746 "adc 0x4(%esp),%ebx\n\t"
2747 "lea 0x8(%esp),%esp");
2751 i386_emit_sub (void)
2753 EMIT_ASM32 (i386_sub
,
2754 "subl %eax,(%esp)\n\t"
2755 "sbbl %ebx,4(%esp)\n\t"
2761 i386_emit_mul (void)
2767 i386_emit_lsh (void)
2773 i386_emit_rsh_signed (void)
2779 i386_emit_rsh_unsigned (void)
2785 i386_emit_ext (int arg
)
2790 EMIT_ASM32 (i386_ext_8
,
2793 "movl %eax,%ebx\n\t"
2797 EMIT_ASM32 (i386_ext_16
,
2799 "movl %eax,%ebx\n\t"
2803 EMIT_ASM32 (i386_ext_32
,
2804 "movl %eax,%ebx\n\t"
2813 i386_emit_log_not (void)
2815 EMIT_ASM32 (i386_log_not
,
2817 "test %eax,%eax\n\t"
2824 i386_emit_bit_and (void)
2826 EMIT_ASM32 (i386_and
,
2827 "and (%esp),%eax\n\t"
2828 "and 0x4(%esp),%ebx\n\t"
2829 "lea 0x8(%esp),%esp");
2833 i386_emit_bit_or (void)
2835 EMIT_ASM32 (i386_or
,
2836 "or (%esp),%eax\n\t"
2837 "or 0x4(%esp),%ebx\n\t"
2838 "lea 0x8(%esp),%esp");
2842 i386_emit_bit_xor (void)
2844 EMIT_ASM32 (i386_xor
,
2845 "xor (%esp),%eax\n\t"
2846 "xor 0x4(%esp),%ebx\n\t"
2847 "lea 0x8(%esp),%esp");
2851 i386_emit_bit_not (void)
2853 EMIT_ASM32 (i386_bit_not
,
2854 "xor $0xffffffff,%eax\n\t"
2855 "xor $0xffffffff,%ebx\n\t");
2859 i386_emit_equal (void)
2861 EMIT_ASM32 (i386_equal
,
2862 "cmpl %ebx,4(%esp)\n\t"
2863 "jne .Li386_equal_false\n\t"
2864 "cmpl %eax,(%esp)\n\t"
2865 "je .Li386_equal_true\n\t"
2866 ".Li386_equal_false:\n\t"
2868 "jmp .Li386_equal_end\n\t"
2869 ".Li386_equal_true:\n\t"
2871 ".Li386_equal_end:\n\t"
2873 "lea 0x8(%esp),%esp");
2877 i386_emit_less_signed (void)
2879 EMIT_ASM32 (i386_less_signed
,
2880 "cmpl %ebx,4(%esp)\n\t"
2881 "jl .Li386_less_signed_true\n\t"
2882 "jne .Li386_less_signed_false\n\t"
2883 "cmpl %eax,(%esp)\n\t"
2884 "jl .Li386_less_signed_true\n\t"
2885 ".Li386_less_signed_false:\n\t"
2887 "jmp .Li386_less_signed_end\n\t"
2888 ".Li386_less_signed_true:\n\t"
2890 ".Li386_less_signed_end:\n\t"
2892 "lea 0x8(%esp),%esp");
2896 i386_emit_less_unsigned (void)
2898 EMIT_ASM32 (i386_less_unsigned
,
2899 "cmpl %ebx,4(%esp)\n\t"
2900 "jb .Li386_less_unsigned_true\n\t"
2901 "jne .Li386_less_unsigned_false\n\t"
2902 "cmpl %eax,(%esp)\n\t"
2903 "jb .Li386_less_unsigned_true\n\t"
2904 ".Li386_less_unsigned_false:\n\t"
2906 "jmp .Li386_less_unsigned_end\n\t"
2907 ".Li386_less_unsigned_true:\n\t"
2909 ".Li386_less_unsigned_end:\n\t"
2911 "lea 0x8(%esp),%esp");
2915 i386_emit_ref (int size
)
2920 EMIT_ASM32 (i386_ref1
,
2924 EMIT_ASM32 (i386_ref2
,
2928 EMIT_ASM32 (i386_ref4
,
2929 "movl (%eax),%eax");
2932 EMIT_ASM32 (i386_ref8
,
2933 "movl 4(%eax),%ebx\n\t"
2934 "movl (%eax),%eax");
2940 i386_emit_if_goto (int *offset_p
, int *size_p
)
2942 EMIT_ASM32 (i386_if_goto
,
2948 /* Don't trust the assembler to choose the right jump */
2949 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2952 *offset_p
= 11; /* be sure that this matches the sequence above */
2958 i386_emit_goto (int *offset_p
, int *size_p
)
2960 EMIT_ASM32 (i386_goto
,
2961 /* Don't trust the assembler to choose the right jump */
2962 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2970 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2972 int diff
= (to
- (from
+ size
));
2973 unsigned char buf
[sizeof (int)];
2975 /* We're only doing 4-byte sizes at the moment. */
2982 memcpy (buf
, &diff
, sizeof (int));
2983 write_inferior_memory (from
, buf
, sizeof (int));
2987 i386_emit_const (LONGEST num
)
2989 unsigned char buf
[16];
2991 CORE_ADDR buildaddr
= current_insn_ptr
;
2994 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2995 lo
= num
& 0xffffffff;
2996 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2998 hi
= ((num
>> 32) & 0xffffffff);
3001 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
3002 memcpy (&buf
[i
], &hi
, sizeof (hi
));
3007 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
3009 append_insns (&buildaddr
, i
, buf
);
3010 current_insn_ptr
= buildaddr
;
3014 i386_emit_call (CORE_ADDR fn
)
3016 unsigned char buf
[16];
3018 CORE_ADDR buildaddr
;
3020 buildaddr
= current_insn_ptr
;
3022 buf
[i
++] = 0xe8; /* call <reladdr> */
3023 offset
= ((int) fn
) - (buildaddr
+ 5);
3024 memcpy (buf
+ 1, &offset
, 4);
3025 append_insns (&buildaddr
, 5, buf
);
3026 current_insn_ptr
= buildaddr
;
3030 i386_emit_reg (int reg
)
3032 unsigned char buf
[16];
3034 CORE_ADDR buildaddr
;
3036 EMIT_ASM32 (i386_reg_a
,
3038 buildaddr
= current_insn_ptr
;
3040 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
3041 memcpy (&buf
[i
], ®
, sizeof (reg
));
3043 append_insns (&buildaddr
, i
, buf
);
3044 current_insn_ptr
= buildaddr
;
3045 EMIT_ASM32 (i386_reg_b
,
3046 "mov %eax,4(%esp)\n\t"
3047 "mov 8(%ebp),%eax\n\t"
3049 i386_emit_call (get_raw_reg_func_addr ());
3050 EMIT_ASM32 (i386_reg_c
,
3052 "lea 0x8(%esp),%esp");
3056 i386_emit_pop (void)
3058 EMIT_ASM32 (i386_pop
,
3064 i386_emit_stack_flush (void)
3066 EMIT_ASM32 (i386_stack_flush
,
3072 i386_emit_zero_ext (int arg
)
3077 EMIT_ASM32 (i386_zero_ext_8
,
3078 "and $0xff,%eax\n\t"
3082 EMIT_ASM32 (i386_zero_ext_16
,
3083 "and $0xffff,%eax\n\t"
3087 EMIT_ASM32 (i386_zero_ext_32
,
3096 i386_emit_swap (void)
3098 EMIT_ASM32 (i386_swap
,
3108 i386_emit_stack_adjust (int n
)
3110 unsigned char buf
[16];
3112 CORE_ADDR buildaddr
= current_insn_ptr
;
3115 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
3119 append_insns (&buildaddr
, i
, buf
);
3120 current_insn_ptr
= buildaddr
;
3123 /* FN's prototype is `LONGEST(*fn)(int)'. */
3126 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
3128 unsigned char buf
[16];
3130 CORE_ADDR buildaddr
;
3132 EMIT_ASM32 (i386_int_call_1_a
,
3133 /* Reserve a bit of stack space. */
3135 /* Put the one argument on the stack. */
3136 buildaddr
= current_insn_ptr
;
3138 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3141 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3143 append_insns (&buildaddr
, i
, buf
);
3144 current_insn_ptr
= buildaddr
;
3145 i386_emit_call (fn
);
3146 EMIT_ASM32 (i386_int_call_1_c
,
3148 "lea 0x8(%esp),%esp");
3151 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3154 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
3156 unsigned char buf
[16];
3158 CORE_ADDR buildaddr
;
3160 EMIT_ASM32 (i386_void_call_2_a
,
3161 /* Preserve %eax only; we don't have to worry about %ebx. */
3163 /* Reserve a bit of stack space for arguments. */
3164 "sub $0x10,%esp\n\t"
3165 /* Copy "top" to the second argument position. (Note that
3166 we can't assume function won't scribble on its
3167 arguments, so don't try to restore from this.) */
3168 "mov %eax,4(%esp)\n\t"
3169 "mov %ebx,8(%esp)");
3170 /* Put the first argument on the stack. */
3171 buildaddr
= current_insn_ptr
;
3173 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
3176 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
3178 append_insns (&buildaddr
, i
, buf
);
3179 current_insn_ptr
= buildaddr
;
3180 i386_emit_call (fn
);
3181 EMIT_ASM32 (i386_void_call_2_b
,
3182 "lea 0x10(%esp),%esp\n\t"
3183 /* Restore original stack top. */
3189 i386_emit_eq_goto (int *offset_p
, int *size_p
)
3192 /* Check low half first, more likely to be decider */
3193 "cmpl %eax,(%esp)\n\t"
3194 "jne .Leq_fallthru\n\t"
3195 "cmpl %ebx,4(%esp)\n\t"
3196 "jne .Leq_fallthru\n\t"
3197 "lea 0x8(%esp),%esp\n\t"
3200 /* jmp, but don't trust the assembler to choose the right jump */
3201 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3202 ".Leq_fallthru:\n\t"
3203 "lea 0x8(%esp),%esp\n\t"
3214 i386_emit_ne_goto (int *offset_p
, int *size_p
)
3217 /* Check low half first, more likely to be decider */
3218 "cmpl %eax,(%esp)\n\t"
3220 "cmpl %ebx,4(%esp)\n\t"
3221 "je .Lne_fallthru\n\t"
3223 "lea 0x8(%esp),%esp\n\t"
3226 /* jmp, but don't trust the assembler to choose the right jump */
3227 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3228 ".Lne_fallthru:\n\t"
3229 "lea 0x8(%esp),%esp\n\t"
3240 i386_emit_lt_goto (int *offset_p
, int *size_p
)
3243 "cmpl %ebx,4(%esp)\n\t"
3245 "jne .Llt_fallthru\n\t"
3246 "cmpl %eax,(%esp)\n\t"
3247 "jnl .Llt_fallthru\n\t"
3249 "lea 0x8(%esp),%esp\n\t"
3252 /* jmp, but don't trust the assembler to choose the right jump */
3253 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3254 ".Llt_fallthru:\n\t"
3255 "lea 0x8(%esp),%esp\n\t"
3266 i386_emit_le_goto (int *offset_p
, int *size_p
)
3269 "cmpl %ebx,4(%esp)\n\t"
3271 "jne .Lle_fallthru\n\t"
3272 "cmpl %eax,(%esp)\n\t"
3273 "jnle .Lle_fallthru\n\t"
3275 "lea 0x8(%esp),%esp\n\t"
3278 /* jmp, but don't trust the assembler to choose the right jump */
3279 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3280 ".Lle_fallthru:\n\t"
3281 "lea 0x8(%esp),%esp\n\t"
3292 i386_emit_gt_goto (int *offset_p
, int *size_p
)
3295 "cmpl %ebx,4(%esp)\n\t"
3297 "jne .Lgt_fallthru\n\t"
3298 "cmpl %eax,(%esp)\n\t"
3299 "jng .Lgt_fallthru\n\t"
3301 "lea 0x8(%esp),%esp\n\t"
3304 /* jmp, but don't trust the assembler to choose the right jump */
3305 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3306 ".Lgt_fallthru:\n\t"
3307 "lea 0x8(%esp),%esp\n\t"
3318 i386_emit_ge_goto (int *offset_p
, int *size_p
)
3321 "cmpl %ebx,4(%esp)\n\t"
3323 "jne .Lge_fallthru\n\t"
3324 "cmpl %eax,(%esp)\n\t"
3325 "jnge .Lge_fallthru\n\t"
3327 "lea 0x8(%esp),%esp\n\t"
3330 /* jmp, but don't trust the assembler to choose the right jump */
3331 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3332 ".Lge_fallthru:\n\t"
3333 "lea 0x8(%esp),%esp\n\t"
3343 struct emit_ops i386_emit_ops
=
3351 i386_emit_rsh_signed
,
3352 i386_emit_rsh_unsigned
,
3360 i386_emit_less_signed
,
3361 i386_emit_less_unsigned
,
3365 i386_write_goto_address
,
3370 i386_emit_stack_flush
,
3373 i386_emit_stack_adjust
,
3374 i386_emit_int_call_1
,
3375 i386_emit_void_call_2
,
3385 static struct emit_ops
*
3389 if (is_64bit_tdesc ())
3390 return &amd64_emit_ops
;
3393 return &i386_emit_ops
;
3397 x86_supports_range_stepping (void)
3402 /* This is initialized assuming an amd64 target.
3403 x86_arch_setup will correct it for i386 or amd64 targets. */
3405 struct linux_target_ops the_low_target
=
3408 x86_linux_regs_info
,
3409 x86_cannot_fetch_register
,
3410 x86_cannot_store_register
,
3411 NULL
, /* fetch_register */
3419 x86_supports_z_point_type
,
3422 x86_stopped_by_watchpoint
,
3423 x86_stopped_data_address
,
3424 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3425 native i386 case (no registers smaller than an xfer unit), and are not
3426 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3429 /* need to fix up i386 siginfo if host is amd64 */
3431 x86_linux_new_process
,
3432 x86_linux_new_thread
,
3433 x86_linux_prepare_to_resume
,
3434 x86_linux_process_qsupported
,
3435 x86_supports_tracepoints
,
3436 x86_get_thread_area
,
3437 x86_install_fast_tracepoint_jump_pad
,
3439 x86_get_min_fast_tracepoint_insn_len
,
3440 x86_supports_range_stepping
,
3444 initialize_low_arch (void)
3446 /* Initialize the Linux target descriptions. */
3448 init_registers_amd64_linux ();
3449 init_registers_amd64_avx_linux ();
3450 init_registers_amd64_avx512_linux ();
3451 init_registers_amd64_mpx_linux ();
3453 init_registers_x32_linux ();
3454 init_registers_x32_avx_linux ();
3455 init_registers_x32_avx512_linux ();
3457 tdesc_amd64_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3458 copy_target_description (tdesc_amd64_linux_no_xml
, tdesc_amd64_linux
);
3459 tdesc_amd64_linux_no_xml
->xmltarget
= xmltarget_amd64_linux_no_xml
;
3461 init_registers_i386_linux ();
3462 init_registers_i386_mmx_linux ();
3463 init_registers_i386_avx_linux ();
3464 init_registers_i386_avx512_linux ();
3465 init_registers_i386_mpx_linux ();
3467 tdesc_i386_linux_no_xml
= xmalloc (sizeof (struct target_desc
));
3468 copy_target_description (tdesc_i386_linux_no_xml
, tdesc_i386_linux
);
3469 tdesc_i386_linux_no_xml
->xmltarget
= xmltarget_i386_linux_no_xml
;
3471 initialize_regsets_info (&x86_regsets_info
);