1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
3 Copyright (C) 2002, 2004-2012 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "linux-low.h"
27 #include "i386-xstate.h"
28 #include "elf/common.h"
30 #include "gdb_proc_service.h"
32 /* Defined in auto-generated file i386-linux.c. */
33 void init_registers_i386_linux (void);
34 /* Defined in auto-generated file amd64-linux.c. */
35 void init_registers_amd64_linux (void);
36 /* Defined in auto-generated file i386-avx-linux.c. */
37 void init_registers_i386_avx_linux (void);
38 /* Defined in auto-generated file amd64-avx-linux.c. */
39 void init_registers_amd64_avx_linux (void);
40 /* Defined in auto-generated file i386-mmx-linux.c. */
41 void init_registers_i386_mmx_linux (void);
43 static unsigned char jump_insn
[] = { 0xe9, 0, 0, 0, 0 };
44 static unsigned char small_jump_insn
[] = { 0x66, 0xe9, 0, 0 };
46 /* Backward compatibility for gdb without XML support. */
48 static const char *xmltarget_i386_linux_no_xml
= "@<target>\
49 <architecture>i386</architecture>\
50 <osabi>GNU/Linux</osabi>\
54 static const char *xmltarget_amd64_linux_no_xml
= "@<target>\
55 <architecture>i386:x86-64</architecture>\
56 <osabi>GNU/Linux</osabi>\
61 #include <sys/procfs.h>
62 #include <sys/ptrace.h>
65 #ifndef PTRACE_GETREGSET
66 #define PTRACE_GETREGSET 0x4204
69 #ifndef PTRACE_SETREGSET
70 #define PTRACE_SETREGSET 0x4205
74 #ifndef PTRACE_GET_THREAD_AREA
75 #define PTRACE_GET_THREAD_AREA 25
78 /* This definition comes from prctl.h, but some kernels may not have it. */
79 #ifndef PTRACE_ARCH_PRCTL
80 #define PTRACE_ARCH_PRCTL 30
83 /* The following definitions come from prctl.h, but may be absent
84 for certain configurations. */
86 #define ARCH_SET_GS 0x1001
87 #define ARCH_SET_FS 0x1002
88 #define ARCH_GET_FS 0x1003
89 #define ARCH_GET_GS 0x1004
92 /* Per-process arch-specific data we want to keep. */
94 struct arch_process_info
96 struct i386_debug_reg_state debug_reg_state
;
99 /* Per-thread arch-specific data we want to keep. */
103 /* Non-zero if our copy differs from what's recorded in the thread. */
104 int debug_registers_changed
;
109 /* Mapping between the general-purpose registers in `struct user'
110 format and GDB's register array layout.
111 Note that the transfer layout uses 64-bit regs. */
112 static /*const*/ int i386_regmap
[] =
114 RAX
* 8, RCX
* 8, RDX
* 8, RBX
* 8,
115 RSP
* 8, RBP
* 8, RSI
* 8, RDI
* 8,
116 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
117 DS
* 8, ES
* 8, FS
* 8, GS
* 8
120 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
122 /* So code below doesn't have to care, i386 or amd64. */
123 #define ORIG_EAX ORIG_RAX
125 static const int x86_64_regmap
[] =
127 RAX
* 8, RBX
* 8, RCX
* 8, RDX
* 8,
128 RSI
* 8, RDI
* 8, RBP
* 8, RSP
* 8,
129 R8
* 8, R9
* 8, R10
* 8, R11
* 8,
130 R12
* 8, R13
* 8, R14
* 8, R15
* 8,
131 RIP
* 8, EFLAGS
* 8, CS
* 8, SS
* 8,
132 DS
* 8, ES
* 8, FS
* 8, GS
* 8,
133 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1, -1,
140 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
142 #else /* ! __x86_64__ */
144 /* Mapping between the general-purpose registers in `struct user'
145 format and GDB's register array layout. */
146 static /*const*/ int i386_regmap
[] =
148 EAX
* 4, ECX
* 4, EDX
* 4, EBX
* 4,
149 UESP
* 4, EBP
* 4, ESI
* 4, EDI
* 4,
150 EIP
* 4, EFL
* 4, CS
* 4, SS
* 4,
151 DS
* 4, ES
* 4, FS
* 4, GS
* 4
154 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
158 /* Called by libthread_db. */
161 ps_get_thread_area (const struct ps_prochandle
*ph
,
162 lwpid_t lwpid
, int idx
, void **base
)
165 int use_64bit
= register_size (0) == 8;
172 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_FS
) == 0)
176 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, base
, ARCH_GET_GS
) == 0)
187 unsigned int desc
[4];
189 if (ptrace (PTRACE_GET_THREAD_AREA
, lwpid
,
190 (void *) (intptr_t) idx
, (unsigned long) &desc
) < 0)
193 *(int *)base
= desc
[1];
198 /* Get the thread area address. This is used to recognize which
199 thread is which when tracing with the in-process agent library. We
200 don't read anything from the address, and treat it as opaque; it's
201 the address itself that we assume is unique per-thread. */
204 x86_get_thread_area (int lwpid
, CORE_ADDR
*addr
)
207 int use_64bit
= register_size (0) == 8;
212 if (ptrace (PTRACE_ARCH_PRCTL
, lwpid
, &base
, ARCH_GET_FS
) == 0)
214 *addr
= (CORE_ADDR
) (uintptr_t) base
;
223 struct lwp_info
*lwp
= find_lwp_pid (pid_to_ptid (lwpid
));
224 struct regcache
*regcache
= get_thread_regcache (get_lwp_thread (lwp
), 1);
225 unsigned int desc
[4];
227 const int reg_thread_area
= 3; /* bits to scale down register value. */
230 collect_register_by_name (regcache
, "gs", &gs
);
232 idx
= gs
>> reg_thread_area
;
234 if (ptrace (PTRACE_GET_THREAD_AREA
,
236 (void *) (long) idx
, (unsigned long) &desc
) < 0)
247 i386_cannot_store_register (int regno
)
249 return regno
>= I386_NUM_REGS
;
253 i386_cannot_fetch_register (int regno
)
255 return regno
>= I386_NUM_REGS
;
259 x86_fill_gregset (struct regcache
*regcache
, void *buf
)
264 if (register_size (0) == 8)
266 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
267 if (x86_64_regmap
[i
] != -1)
268 collect_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
273 for (i
= 0; i
< I386_NUM_REGS
; i
++)
274 collect_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
276 collect_register_by_name (regcache
, "orig_eax",
277 ((char *) buf
) + ORIG_EAX
* 4);
281 x86_store_gregset (struct regcache
*regcache
, const void *buf
)
286 if (register_size (0) == 8)
288 for (i
= 0; i
< X86_64_NUM_REGS
; i
++)
289 if (x86_64_regmap
[i
] != -1)
290 supply_register (regcache
, i
, ((char *) buf
) + x86_64_regmap
[i
]);
295 for (i
= 0; i
< I386_NUM_REGS
; i
++)
296 supply_register (regcache
, i
, ((char *) buf
) + i386_regmap
[i
]);
298 supply_register_by_name (regcache
, "orig_eax",
299 ((char *) buf
) + ORIG_EAX
* 4);
303 x86_fill_fpregset (struct regcache
*regcache
, void *buf
)
306 i387_cache_to_fxsave (regcache
, buf
);
308 i387_cache_to_fsave (regcache
, buf
);
313 x86_store_fpregset (struct regcache
*regcache
, const void *buf
)
316 i387_fxsave_to_cache (regcache
, buf
);
318 i387_fsave_to_cache (regcache
, buf
);
325 x86_fill_fpxregset (struct regcache
*regcache
, void *buf
)
327 i387_cache_to_fxsave (regcache
, buf
);
331 x86_store_fpxregset (struct regcache
*regcache
, const void *buf
)
333 i387_fxsave_to_cache (regcache
, buf
);
339 x86_fill_xstateregset (struct regcache
*regcache
, void *buf
)
341 i387_cache_to_xsave (regcache
, buf
);
345 x86_store_xstateregset (struct regcache
*regcache
, const void *buf
)
347 i387_xsave_to_cache (regcache
, buf
);
350 /* ??? The non-biarch i386 case stores all the i387 regs twice.
351 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
352 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
353 doesn't work. IWBN to avoid the duplication in the case where it
354 does work. Maybe the arch_setup routine could check whether it works
355 and update target_regsets accordingly, maybe by moving target_regsets
356 to linux_target_ops and set the right one there, rather than having to
357 modify the target_regsets global. */
359 struct regset_info target_regsets
[] =
361 #ifdef HAVE_PTRACE_GETREGS
362 { PTRACE_GETREGS
, PTRACE_SETREGS
, 0, sizeof (elf_gregset_t
),
364 x86_fill_gregset
, x86_store_gregset
},
365 { PTRACE_GETREGSET
, PTRACE_SETREGSET
, NT_X86_XSTATE
, 0,
366 EXTENDED_REGS
, x86_fill_xstateregset
, x86_store_xstateregset
},
368 # ifdef HAVE_PTRACE_GETFPXREGS
369 { PTRACE_GETFPXREGS
, PTRACE_SETFPXREGS
, 0, sizeof (elf_fpxregset_t
),
371 x86_fill_fpxregset
, x86_store_fpxregset
},
374 { PTRACE_GETFPREGS
, PTRACE_SETFPREGS
, 0, sizeof (elf_fpregset_t
),
376 x86_fill_fpregset
, x86_store_fpregset
},
377 #endif /* HAVE_PTRACE_GETREGS */
378 { 0, 0, 0, -1, -1, NULL
, NULL
}
382 x86_get_pc (struct regcache
*regcache
)
384 int use_64bit
= register_size (0) == 8;
389 collect_register_by_name (regcache
, "rip", &pc
);
390 return (CORE_ADDR
) pc
;
395 collect_register_by_name (regcache
, "eip", &pc
);
396 return (CORE_ADDR
) pc
;
401 x86_set_pc (struct regcache
*regcache
, CORE_ADDR pc
)
403 int use_64bit
= register_size (0) == 8;
407 unsigned long newpc
= pc
;
408 supply_register_by_name (regcache
, "rip", &newpc
);
412 unsigned int newpc
= pc
;
413 supply_register_by_name (regcache
, "eip", &newpc
);
417 static const unsigned char x86_breakpoint
[] = { 0xCC };
418 #define x86_breakpoint_len 1
421 x86_breakpoint_at (CORE_ADDR pc
)
425 (*the_target
->read_memory
) (pc
, &c
, 1);
432 /* Support for debug registers. */
435 x86_linux_dr_get (ptid_t ptid
, int regnum
)
440 tid
= ptid_get_lwp (ptid
);
443 value
= ptrace (PTRACE_PEEKUSER
, tid
,
444 offsetof (struct user
, u_debugreg
[regnum
]), 0);
446 error ("Couldn't read debug register");
452 x86_linux_dr_set (ptid_t ptid
, int regnum
, unsigned long value
)
456 tid
= ptid_get_lwp (ptid
);
459 ptrace (PTRACE_POKEUSER
, tid
,
460 offsetof (struct user
, u_debugreg
[regnum
]), value
);
462 error ("Couldn't write debug register");
466 update_debug_registers_callback (struct inferior_list_entry
*entry
,
469 struct lwp_info
*lwp
= (struct lwp_info
*) entry
;
470 int pid
= *(int *) pid_p
;
472 /* Only update the threads of this process. */
473 if (pid_of (lwp
) == pid
)
475 /* The actual update is done later just before resuming the lwp,
476 we just mark that the registers need updating. */
477 lwp
->arch_private
->debug_registers_changed
= 1;
479 /* If the lwp isn't stopped, force it to momentarily pause, so
480 we can update its debug registers. */
482 linux_stop_lwp (lwp
);
488 /* Update the inferior's debug register REGNUM from STATE. */
491 i386_dr_low_set_addr (const struct i386_debug_reg_state
*state
, int regnum
)
493 /* Only update the threads of this process. */
494 int pid
= pid_of (get_thread_lwp (current_inferior
));
496 if (! (regnum
>= 0 && regnum
<= DR_LASTADDR
- DR_FIRSTADDR
))
497 fatal ("Invalid debug register %d", regnum
);
499 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
502 /* Return the inferior's debug register REGNUM. */
505 i386_dr_low_get_addr (int regnum
)
507 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
508 ptid_t ptid
= ptid_of (lwp
);
510 /* DR6 and DR7 are retrieved with some other way. */
511 gdb_assert (DR_FIRSTADDR
<= regnum
&& regnum
<= DR_LASTADDR
);
513 return x86_linux_dr_get (ptid
, regnum
);
516 /* Update the inferior's DR7 debug control register from STATE. */
519 i386_dr_low_set_control (const struct i386_debug_reg_state
*state
)
521 /* Only update the threads of this process. */
522 int pid
= pid_of (get_thread_lwp (current_inferior
));
524 find_inferior (&all_lwps
, update_debug_registers_callback
, &pid
);
527 /* Return the inferior's DR7 debug control register. */
530 i386_dr_low_get_control (void)
532 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
533 ptid_t ptid
= ptid_of (lwp
);
535 return x86_linux_dr_get (ptid
, DR_CONTROL
);
538 /* Get the value of the DR6 debug status register from the inferior
539 and record it in STATE. */
542 i386_dr_low_get_status (void)
544 struct lwp_info
*lwp
= get_thread_lwp (current_inferior
);
545 ptid_t ptid
= ptid_of (lwp
);
547 return x86_linux_dr_get (ptid
, DR_STATUS
);
550 /* Breakpoint/Watchpoint support. */
553 x86_insert_point (char type
, CORE_ADDR addr
, int len
)
555 struct process_info
*proc
= current_process ();
562 ret
= prepare_to_access_memory ();
565 ret
= set_gdb_breakpoint_at (addr
);
566 done_accessing_memory ();
572 return i386_low_insert_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
581 x86_remove_point (char type
, CORE_ADDR addr
, int len
)
583 struct process_info
*proc
= current_process ();
590 ret
= prepare_to_access_memory ();
593 ret
= delete_gdb_breakpoint_at (addr
);
594 done_accessing_memory ();
600 return i386_low_remove_watchpoint (&proc
->private->arch_private
->debug_reg_state
,
609 x86_stopped_by_watchpoint (void)
611 struct process_info
*proc
= current_process ();
612 return i386_low_stopped_by_watchpoint (&proc
->private->arch_private
->debug_reg_state
);
616 x86_stopped_data_address (void)
618 struct process_info
*proc
= current_process ();
620 if (i386_low_stopped_data_address (&proc
->private->arch_private
->debug_reg_state
,
626 /* Called when a new process is created. */
628 static struct arch_process_info
*
629 x86_linux_new_process (void)
631 struct arch_process_info
*info
= xcalloc (1, sizeof (*info
));
633 i386_low_init_dregs (&info
->debug_reg_state
);
638 /* Called when a new thread is detected. */
640 static struct arch_lwp_info
*
641 x86_linux_new_thread (void)
643 struct arch_lwp_info
*info
= xcalloc (1, sizeof (*info
));
645 info
->debug_registers_changed
= 1;
650 /* Called when resuming a thread.
651 If the debug regs have changed, update the thread's copies. */
654 x86_linux_prepare_to_resume (struct lwp_info
*lwp
)
656 ptid_t ptid
= ptid_of (lwp
);
657 int clear_status
= 0;
659 if (lwp
->arch_private
->debug_registers_changed
)
662 int pid
= ptid_get_pid (ptid
);
663 struct process_info
*proc
= find_process_pid (pid
);
664 struct i386_debug_reg_state
*state
665 = &proc
->private->arch_private
->debug_reg_state
;
667 for (i
= DR_FIRSTADDR
; i
<= DR_LASTADDR
; i
++)
668 if (state
->dr_ref_count
[i
] > 0)
670 x86_linux_dr_set (ptid
, i
, state
->dr_mirror
[i
]);
672 /* If we're setting a watchpoint, any change the inferior
673 had done itself to the debug registers needs to be
674 discarded, otherwise, i386_low_stopped_data_address can
679 x86_linux_dr_set (ptid
, DR_CONTROL
, state
->dr_control_mirror
);
681 lwp
->arch_private
->debug_registers_changed
= 0;
684 if (clear_status
|| lwp
->stopped_by_watchpoint
)
685 x86_linux_dr_set (ptid
, DR_STATUS
, 0);
688 /* When GDBSERVER is built as a 64-bit application on linux, the
689 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
690 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
691 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
692 conversion in-place ourselves. */
694 /* These types below (compat_*) define a siginfo type that is layout
695 compatible with the siginfo type exported by the 32-bit userspace
700 typedef int compat_int_t
;
701 typedef unsigned int compat_uptr_t
;
703 typedef int compat_time_t
;
704 typedef int compat_timer_t
;
705 typedef int compat_clock_t
;
707 struct compat_timeval
709 compat_time_t tv_sec
;
713 typedef union compat_sigval
715 compat_int_t sival_int
;
716 compat_uptr_t sival_ptr
;
719 typedef struct compat_siginfo
727 int _pad
[((128 / sizeof (int)) - 3)];
736 /* POSIX.1b timers */
741 compat_sigval_t _sigval
;
744 /* POSIX.1b signals */
749 compat_sigval_t _sigval
;
758 compat_clock_t _utime
;
759 compat_clock_t _stime
;
762 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
777 #define cpt_si_pid _sifields._kill._pid
778 #define cpt_si_uid _sifields._kill._uid
779 #define cpt_si_timerid _sifields._timer._tid
780 #define cpt_si_overrun _sifields._timer._overrun
781 #define cpt_si_status _sifields._sigchld._status
782 #define cpt_si_utime _sifields._sigchld._utime
783 #define cpt_si_stime _sifields._sigchld._stime
784 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
785 #define cpt_si_addr _sifields._sigfault._addr
786 #define cpt_si_band _sifields._sigpoll._band
787 #define cpt_si_fd _sifields._sigpoll._fd
789 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
790 In their place is si_timer1,si_timer2. */
792 #define si_timerid si_timer1
795 #define si_overrun si_timer2
799 compat_siginfo_from_siginfo (compat_siginfo_t
*to
, siginfo_t
*from
)
801 memset (to
, 0, sizeof (*to
));
803 to
->si_signo
= from
->si_signo
;
804 to
->si_errno
= from
->si_errno
;
805 to
->si_code
= from
->si_code
;
807 if (to
->si_code
== SI_TIMER
)
809 to
->cpt_si_timerid
= from
->si_timerid
;
810 to
->cpt_si_overrun
= from
->si_overrun
;
811 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
813 else if (to
->si_code
== SI_USER
)
815 to
->cpt_si_pid
= from
->si_pid
;
816 to
->cpt_si_uid
= from
->si_uid
;
818 else if (to
->si_code
< 0)
820 to
->cpt_si_pid
= from
->si_pid
;
821 to
->cpt_si_uid
= from
->si_uid
;
822 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
826 switch (to
->si_signo
)
829 to
->cpt_si_pid
= from
->si_pid
;
830 to
->cpt_si_uid
= from
->si_uid
;
831 to
->cpt_si_status
= from
->si_status
;
832 to
->cpt_si_utime
= from
->si_utime
;
833 to
->cpt_si_stime
= from
->si_stime
;
839 to
->cpt_si_addr
= (intptr_t) from
->si_addr
;
842 to
->cpt_si_band
= from
->si_band
;
843 to
->cpt_si_fd
= from
->si_fd
;
846 to
->cpt_si_pid
= from
->si_pid
;
847 to
->cpt_si_uid
= from
->si_uid
;
848 to
->cpt_si_ptr
= (intptr_t) from
->si_ptr
;
855 siginfo_from_compat_siginfo (siginfo_t
*to
, compat_siginfo_t
*from
)
857 memset (to
, 0, sizeof (*to
));
859 to
->si_signo
= from
->si_signo
;
860 to
->si_errno
= from
->si_errno
;
861 to
->si_code
= from
->si_code
;
863 if (to
->si_code
== SI_TIMER
)
865 to
->si_timerid
= from
->cpt_si_timerid
;
866 to
->si_overrun
= from
->cpt_si_overrun
;
867 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
869 else if (to
->si_code
== SI_USER
)
871 to
->si_pid
= from
->cpt_si_pid
;
872 to
->si_uid
= from
->cpt_si_uid
;
874 else if (to
->si_code
< 0)
876 to
->si_pid
= from
->cpt_si_pid
;
877 to
->si_uid
= from
->cpt_si_uid
;
878 to
->si_ptr
= (void *) (intptr_t) from
->cpt_si_ptr
;
882 switch (to
->si_signo
)
885 to
->si_pid
= from
->cpt_si_pid
;
886 to
->si_uid
= from
->cpt_si_uid
;
887 to
->si_status
= from
->cpt_si_status
;
888 to
->si_utime
= from
->cpt_si_utime
;
889 to
->si_stime
= from
->cpt_si_stime
;
895 to
->si_addr
= (void *) (intptr_t) from
->cpt_si_addr
;
898 to
->si_band
= from
->cpt_si_band
;
899 to
->si_fd
= from
->cpt_si_fd
;
902 to
->si_pid
= from
->cpt_si_pid
;
903 to
->si_uid
= from
->cpt_si_uid
;
904 to
->si_ptr
= (void* ) (intptr_t) from
->cpt_si_ptr
;
910 #endif /* __x86_64__ */
912 /* Convert a native/host siginfo object, into/from the siginfo in the
913 layout of the inferiors' architecture. Returns true if any
914 conversion was done; false otherwise. If DIRECTION is 1, then copy
915 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
919 x86_siginfo_fixup (struct siginfo
*native
, void *inf
, int direction
)
922 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
923 if (register_size (0) == 4)
925 if (sizeof (struct siginfo
) != sizeof (compat_siginfo_t
))
926 fatal ("unexpected difference in siginfo");
929 compat_siginfo_from_siginfo ((struct compat_siginfo
*) inf
, native
);
931 siginfo_from_compat_siginfo (native
, (struct compat_siginfo
*) inf
);
942 /* Update gdbserver_xmltarget. */
945 x86_linux_update_xmltarget (void)
948 struct regset_info
*regset
;
949 static unsigned long long xcr0
;
950 static int have_ptrace_getregset
= -1;
951 #if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
952 static int have_ptrace_getfpxregs
= -1;
955 if (!current_inferior
)
958 /* Before changing the register cache internal layout or the target
959 regsets, flush the contents of the current valid caches back to
961 regcache_invalidate ();
963 pid
= pid_of (get_thread_lwp (current_inferior
));
965 if (num_xmm_registers
== 8)
966 init_registers_i386_linux ();
968 init_registers_amd64_linux ();
971 # ifdef HAVE_PTRACE_GETFPXREGS
972 if (have_ptrace_getfpxregs
== -1)
974 elf_fpxregset_t fpxregs
;
976 if (ptrace (PTRACE_GETFPXREGS
, pid
, 0, (int) &fpxregs
) < 0)
978 have_ptrace_getfpxregs
= 0;
979 x86_xcr0
= I386_XSTATE_X87_MASK
;
981 /* Disable PTRACE_GETFPXREGS. */
982 for (regset
= target_regsets
;
983 regset
->fill_function
!= NULL
; regset
++)
984 if (regset
->get_request
== PTRACE_GETFPXREGS
)
991 have_ptrace_getfpxregs
= 1;
994 if (!have_ptrace_getfpxregs
)
996 init_registers_i386_mmx_linux ();
1000 init_registers_i386_linux ();
1006 /* Don't use XML. */
1008 if (num_xmm_registers
== 8)
1009 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1011 gdbserver_xmltarget
= xmltarget_amd64_linux_no_xml
;
1013 gdbserver_xmltarget
= xmltarget_i386_linux_no_xml
;
1016 x86_xcr0
= I386_XSTATE_SSE_MASK
;
1021 /* Check if XSAVE extended state is supported. */
1022 if (have_ptrace_getregset
== -1)
1024 unsigned long long xstateregs
[I386_XSTATE_SSE_SIZE
/ sizeof (long long)];
1027 iov
.iov_base
= xstateregs
;
1028 iov
.iov_len
= sizeof (xstateregs
);
1030 /* Check if PTRACE_GETREGSET works. */
1031 if (ptrace (PTRACE_GETREGSET
, pid
, (unsigned int) NT_X86_XSTATE
,
1034 have_ptrace_getregset
= 0;
1038 have_ptrace_getregset
= 1;
1040 /* Get XCR0 from XSAVE extended state at byte 464. */
1041 xcr0
= xstateregs
[464 / sizeof (long long)];
1043 /* Use PTRACE_GETREGSET if it is available. */
1044 for (regset
= target_regsets
;
1045 regset
->fill_function
!= NULL
; regset
++)
1046 if (regset
->get_request
== PTRACE_GETREGSET
)
1047 regset
->size
= I386_XSTATE_SIZE (xcr0
);
1048 else if (regset
->type
!= GENERAL_REGS
)
1052 if (have_ptrace_getregset
)
1054 /* AVX is the highest feature we support. */
1055 if ((xcr0
& I386_XSTATE_AVX_MASK
) == I386_XSTATE_AVX_MASK
)
1060 /* I386 has 8 xmm regs. */
1061 if (num_xmm_registers
== 8)
1062 init_registers_i386_avx_linux ();
1064 init_registers_amd64_avx_linux ();
1066 init_registers_i386_avx_linux ();
1072 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1073 PTRACE_GETREGSET. */
1076 x86_linux_process_qsupported (const char *query
)
1078 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1079 with "i386" in qSupported query, it supports x86 XML target
1082 if (query
!= NULL
&& strncmp (query
, "xmlRegisters=", 13) == 0)
1084 char *copy
= xstrdup (query
+ 13);
1087 for (p
= strtok (copy
, ","); p
!= NULL
; p
= strtok (NULL
, ","))
1089 if (strcmp (p
, "i386") == 0)
1099 x86_linux_update_xmltarget ();
1102 /* Initialize gdbserver for the architecture of the inferior. */
1105 x86_arch_setup (void)
1108 int pid
= pid_of (get_thread_lwp (current_inferior
));
1109 int use_64bit
= linux_pid_exe_is_elf_64_file (pid
);
1113 /* This can only happen if /proc/<pid>/exe is unreadable,
1114 but "that can't happen" if we've gotten this far.
1115 Fall through and assume this is a 32-bit program. */
1119 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1120 the_low_target
.num_regs
= -1;
1121 the_low_target
.regmap
= NULL
;
1122 the_low_target
.cannot_fetch_register
= NULL
;
1123 the_low_target
.cannot_store_register
= NULL
;
1125 /* Amd64 has 16 xmm regs. */
1126 num_xmm_registers
= 16;
1128 x86_linux_update_xmltarget ();
1133 /* Ok we have a 32-bit inferior. */
1135 the_low_target
.num_regs
= I386_NUM_REGS
;
1136 the_low_target
.regmap
= i386_regmap
;
1137 the_low_target
.cannot_fetch_register
= i386_cannot_fetch_register
;
1138 the_low_target
.cannot_store_register
= i386_cannot_store_register
;
1140 /* I386 has 8 xmm regs. */
1141 num_xmm_registers
= 8;
1143 x86_linux_update_xmltarget ();
1147 x86_supports_tracepoints (void)
1153 append_insns (CORE_ADDR
*to
, size_t len
, const unsigned char *buf
)
1155 write_inferior_memory (*to
, buf
, len
);
1160 push_opcode (unsigned char *buf
, char *op
)
1162 unsigned char *buf_org
= buf
;
1167 unsigned long ul
= strtoul (op
, &endptr
, 16);
1176 return buf
- buf_org
;
1181 /* Build a jump pad that saves registers and calls a collection
1182 function. Writes a jump instruction to the jump pad to
1183 JJUMPAD_INSN. The caller is responsible to write it in at the
1184 tracepoint address. */
1187 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1188 CORE_ADDR collector
,
1191 CORE_ADDR
*jump_entry
,
1192 CORE_ADDR
*trampoline
,
1193 ULONGEST
*trampoline_size
,
1194 unsigned char *jjump_pad_insn
,
1195 ULONGEST
*jjump_pad_insn_size
,
1196 CORE_ADDR
*adjusted_insn_addr
,
1197 CORE_ADDR
*adjusted_insn_addr_end
,
1200 unsigned char buf
[40];
1202 CORE_ADDR buildaddr
= *jump_entry
;
1204 /* Build the jump pad. */
1206 /* First, do tracepoint data collection. Save registers. */
1208 /* Need to ensure stack pointer saved first. */
1209 buf
[i
++] = 0x54; /* push %rsp */
1210 buf
[i
++] = 0x55; /* push %rbp */
1211 buf
[i
++] = 0x57; /* push %rdi */
1212 buf
[i
++] = 0x56; /* push %rsi */
1213 buf
[i
++] = 0x52; /* push %rdx */
1214 buf
[i
++] = 0x51; /* push %rcx */
1215 buf
[i
++] = 0x53; /* push %rbx */
1216 buf
[i
++] = 0x50; /* push %rax */
1217 buf
[i
++] = 0x41; buf
[i
++] = 0x57; /* push %r15 */
1218 buf
[i
++] = 0x41; buf
[i
++] = 0x56; /* push %r14 */
1219 buf
[i
++] = 0x41; buf
[i
++] = 0x55; /* push %r13 */
1220 buf
[i
++] = 0x41; buf
[i
++] = 0x54; /* push %r12 */
1221 buf
[i
++] = 0x41; buf
[i
++] = 0x53; /* push %r11 */
1222 buf
[i
++] = 0x41; buf
[i
++] = 0x52; /* push %r10 */
1223 buf
[i
++] = 0x41; buf
[i
++] = 0x51; /* push %r9 */
1224 buf
[i
++] = 0x41; buf
[i
++] = 0x50; /* push %r8 */
1225 buf
[i
++] = 0x9c; /* pushfq */
1226 buf
[i
++] = 0x48; /* movl <addr>,%rdi */
1228 *((unsigned long *)(buf
+ i
)) = (unsigned long) tpaddr
;
1229 i
+= sizeof (unsigned long);
1230 buf
[i
++] = 0x57; /* push %rdi */
1231 append_insns (&buildaddr
, i
, buf
);
1233 /* Stack space for the collecting_t object. */
1235 i
+= push_opcode (&buf
[i
], "48 83 ec 18"); /* sub $0x18,%rsp */
1236 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov <tpoint>,%rax */
1237 memcpy (buf
+ i
, &tpoint
, 8);
1239 i
+= push_opcode (&buf
[i
], "48 89 04 24"); /* mov %rax,(%rsp) */
1240 i
+= push_opcode (&buf
[i
],
1241 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1242 i
+= push_opcode (&buf
[i
], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1243 append_insns (&buildaddr
, i
, buf
);
1247 i
+= push_opcode (&buf
[i
], "48 be"); /* movl <lockaddr>,%rsi */
1248 memcpy (&buf
[i
], (void *) &lockaddr
, 8);
1250 i
+= push_opcode (&buf
[i
], "48 89 e1"); /* mov %rsp,%rcx */
1251 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1252 i
+= push_opcode (&buf
[i
], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1253 i
+= push_opcode (&buf
[i
], "48 85 c0"); /* test %rax,%rax */
1254 i
+= push_opcode (&buf
[i
], "75 f4"); /* jne <again> */
1255 append_insns (&buildaddr
, i
, buf
);
1257 /* Set up the gdb_collect call. */
1258 /* At this point, (stack pointer + 0x18) is the base of our saved
1262 i
+= push_opcode (&buf
[i
], "48 89 e6"); /* mov %rsp,%rsi */
1263 i
+= push_opcode (&buf
[i
], "48 83 c6 18"); /* add $0x18,%rsi */
1265 /* tpoint address may be 64-bit wide. */
1266 i
+= push_opcode (&buf
[i
], "48 bf"); /* movl <addr>,%rdi */
1267 memcpy (buf
+ i
, &tpoint
, 8);
1269 append_insns (&buildaddr
, i
, buf
);
1271 /* The collector function being in the shared library, may be
1272 >31-bits away off the jump pad. */
1274 i
+= push_opcode (&buf
[i
], "48 b8"); /* mov $collector,%rax */
1275 memcpy (buf
+ i
, &collector
, 8);
1277 i
+= push_opcode (&buf
[i
], "ff d0"); /* callq *%rax */
1278 append_insns (&buildaddr
, i
, buf
);
1280 /* Clear the spin-lock. */
1282 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1283 i
+= push_opcode (&buf
[i
], "48 a3"); /* mov %rax, lockaddr */
1284 memcpy (buf
+ i
, &lockaddr
, 8);
1286 append_insns (&buildaddr
, i
, buf
);
1288 /* Remove stack that had been used for the collect_t object. */
1290 i
+= push_opcode (&buf
[i
], "48 83 c4 18"); /* add $0x18,%rsp */
1291 append_insns (&buildaddr
, i
, buf
);
1293 /* Restore register state. */
1295 buf
[i
++] = 0x48; /* add $0x8,%rsp */
1299 buf
[i
++] = 0x9d; /* popfq */
1300 buf
[i
++] = 0x41; buf
[i
++] = 0x58; /* pop %r8 */
1301 buf
[i
++] = 0x41; buf
[i
++] = 0x59; /* pop %r9 */
1302 buf
[i
++] = 0x41; buf
[i
++] = 0x5a; /* pop %r10 */
1303 buf
[i
++] = 0x41; buf
[i
++] = 0x5b; /* pop %r11 */
1304 buf
[i
++] = 0x41; buf
[i
++] = 0x5c; /* pop %r12 */
1305 buf
[i
++] = 0x41; buf
[i
++] = 0x5d; /* pop %r13 */
1306 buf
[i
++] = 0x41; buf
[i
++] = 0x5e; /* pop %r14 */
1307 buf
[i
++] = 0x41; buf
[i
++] = 0x5f; /* pop %r15 */
1308 buf
[i
++] = 0x58; /* pop %rax */
1309 buf
[i
++] = 0x5b; /* pop %rbx */
1310 buf
[i
++] = 0x59; /* pop %rcx */
1311 buf
[i
++] = 0x5a; /* pop %rdx */
1312 buf
[i
++] = 0x5e; /* pop %rsi */
1313 buf
[i
++] = 0x5f; /* pop %rdi */
1314 buf
[i
++] = 0x5d; /* pop %rbp */
1315 buf
[i
++] = 0x5c; /* pop %rsp */
1316 append_insns (&buildaddr
, i
, buf
);
1318 /* Now, adjust the original instruction to execute in the jump
1320 *adjusted_insn_addr
= buildaddr
;
1321 relocate_instruction (&buildaddr
, tpaddr
);
1322 *adjusted_insn_addr_end
= buildaddr
;
1324 /* Finally, write a jump back to the program. */
1325 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1326 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1327 memcpy (buf
+ 1, &offset
, 4);
1328 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1330 /* The jump pad is now built. Wire in a jump to our jump pad. This
1331 is always done last (by our caller actually), so that we can
1332 install fast tracepoints with threads running. This relies on
1333 the agent's atomic write support. */
1334 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1335 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1336 memcpy (buf
+ 1, &offset
, 4);
1337 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1338 *jjump_pad_insn_size
= sizeof (jump_insn
);
1340 /* Return the end address of our pad. */
1341 *jump_entry
= buildaddr
;
1346 #endif /* __x86_64__ */
1348 /* Build a jump pad that saves registers and calls a collection
1349 function. Writes a jump instruction to the jump pad to
1350 JJUMPAD_INSN. The caller is responsible to write it in at the
1351 tracepoint address. */
1354 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1355 CORE_ADDR collector
,
1358 CORE_ADDR
*jump_entry
,
1359 CORE_ADDR
*trampoline
,
1360 ULONGEST
*trampoline_size
,
1361 unsigned char *jjump_pad_insn
,
1362 ULONGEST
*jjump_pad_insn_size
,
1363 CORE_ADDR
*adjusted_insn_addr
,
1364 CORE_ADDR
*adjusted_insn_addr_end
,
1367 unsigned char buf
[0x100];
1369 CORE_ADDR buildaddr
= *jump_entry
;
1371 /* Build the jump pad. */
1373 /* First, do tracepoint data collection. Save registers. */
1375 buf
[i
++] = 0x60; /* pushad */
1376 buf
[i
++] = 0x68; /* push tpaddr aka $pc */
1377 *((int *)(buf
+ i
)) = (int) tpaddr
;
1379 buf
[i
++] = 0x9c; /* pushf */
1380 buf
[i
++] = 0x1e; /* push %ds */
1381 buf
[i
++] = 0x06; /* push %es */
1382 buf
[i
++] = 0x0f; /* push %fs */
1384 buf
[i
++] = 0x0f; /* push %gs */
1386 buf
[i
++] = 0x16; /* push %ss */
1387 buf
[i
++] = 0x0e; /* push %cs */
1388 append_insns (&buildaddr
, i
, buf
);
1390 /* Stack space for the collecting_t object. */
1392 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1394 /* Build the object. */
1395 i
+= push_opcode (&buf
[i
], "b8"); /* mov <tpoint>,%eax */
1396 memcpy (buf
+ i
, &tpoint
, 4);
1398 i
+= push_opcode (&buf
[i
], "89 04 24"); /* mov %eax,(%esp) */
1400 i
+= push_opcode (&buf
[i
], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1401 i
+= push_opcode (&buf
[i
], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1402 append_insns (&buildaddr
, i
, buf
);
1404 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1405 If we cared for it, this could be using xchg alternatively. */
1408 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1409 i
+= push_opcode (&buf
[i
], "f0 0f b1 25"); /* lock cmpxchg
1411 memcpy (&buf
[i
], (void *) &lockaddr
, 4);
1413 i
+= push_opcode (&buf
[i
], "85 c0"); /* test %eax,%eax */
1414 i
+= push_opcode (&buf
[i
], "75 f2"); /* jne <again> */
1415 append_insns (&buildaddr
, i
, buf
);
1418 /* Set up arguments to the gdb_collect call. */
1420 i
+= push_opcode (&buf
[i
], "89 e0"); /* mov %esp,%eax */
1421 i
+= push_opcode (&buf
[i
], "83 c0 08"); /* add $0x08,%eax */
1422 i
+= push_opcode (&buf
[i
], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1423 append_insns (&buildaddr
, i
, buf
);
1426 i
+= push_opcode (&buf
[i
], "83 ec 08"); /* sub $0x8,%esp */
1427 append_insns (&buildaddr
, i
, buf
);
1430 i
+= push_opcode (&buf
[i
], "c7 04 24"); /* movl <addr>,(%esp) */
1431 memcpy (&buf
[i
], (void *) &tpoint
, 4);
1433 append_insns (&buildaddr
, i
, buf
);
1435 buf
[0] = 0xe8; /* call <reladdr> */
1436 offset
= collector
- (buildaddr
+ sizeof (jump_insn
));
1437 memcpy (buf
+ 1, &offset
, 4);
1438 append_insns (&buildaddr
, 5, buf
);
1439 /* Clean up after the call. */
1440 buf
[0] = 0x83; /* add $0x8,%esp */
1443 append_insns (&buildaddr
, 3, buf
);
1446 /* Clear the spin-lock. This would need the LOCK prefix on older
1449 i
+= push_opcode (&buf
[i
], "31 c0"); /* xor %eax,%eax */
1450 i
+= push_opcode (&buf
[i
], "a3"); /* mov %eax, lockaddr */
1451 memcpy (buf
+ i
, &lockaddr
, 4);
1453 append_insns (&buildaddr
, i
, buf
);
1456 /* Remove stack that had been used for the collect_t object. */
1458 i
+= push_opcode (&buf
[i
], "83 c4 08"); /* add $0x08,%esp */
1459 append_insns (&buildaddr
, i
, buf
);
1462 buf
[i
++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1465 buf
[i
++] = 0x17; /* pop %ss */
1466 buf
[i
++] = 0x0f; /* pop %gs */
1468 buf
[i
++] = 0x0f; /* pop %fs */
1470 buf
[i
++] = 0x07; /* pop %es */
1471 buf
[i
++] = 0x1f; /* pop %ds */
1472 buf
[i
++] = 0x9d; /* popf */
1473 buf
[i
++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1476 buf
[i
++] = 0x61; /* popad */
1477 append_insns (&buildaddr
, i
, buf
);
1479 /* Now, adjust the original instruction to execute in the jump
1481 *adjusted_insn_addr
= buildaddr
;
1482 relocate_instruction (&buildaddr
, tpaddr
);
1483 *adjusted_insn_addr_end
= buildaddr
;
1485 /* Write the jump back to the program. */
1486 offset
= (tpaddr
+ orig_size
) - (buildaddr
+ sizeof (jump_insn
));
1487 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1488 memcpy (buf
+ 1, &offset
, 4);
1489 append_insns (&buildaddr
, sizeof (jump_insn
), buf
);
1491 /* The jump pad is now built. Wire in a jump to our jump pad. This
1492 is always done last (by our caller actually), so that we can
1493 install fast tracepoints with threads running. This relies on
1494 the agent's atomic write support. */
1497 /* Create a trampoline. */
1498 *trampoline_size
= sizeof (jump_insn
);
1499 if (!claim_trampoline_space (*trampoline_size
, trampoline
))
1501 /* No trampoline space available. */
1503 "E.Cannot allocate trampoline space needed for fast "
1504 "tracepoints on 4-byte instructions.");
1508 offset
= *jump_entry
- (*trampoline
+ sizeof (jump_insn
));
1509 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1510 memcpy (buf
+ 1, &offset
, 4);
1511 write_inferior_memory (*trampoline
, buf
, sizeof (jump_insn
));
1513 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1514 offset
= (*trampoline
- (tpaddr
+ sizeof (small_jump_insn
))) & 0xffff;
1515 memcpy (buf
, small_jump_insn
, sizeof (small_jump_insn
));
1516 memcpy (buf
+ 2, &offset
, 2);
1517 memcpy (jjump_pad_insn
, buf
, sizeof (small_jump_insn
));
1518 *jjump_pad_insn_size
= sizeof (small_jump_insn
);
1522 /* Else use a 32-bit relative jump instruction. */
1523 offset
= *jump_entry
- (tpaddr
+ sizeof (jump_insn
));
1524 memcpy (buf
, jump_insn
, sizeof (jump_insn
));
1525 memcpy (buf
+ 1, &offset
, 4);
1526 memcpy (jjump_pad_insn
, buf
, sizeof (jump_insn
));
1527 *jjump_pad_insn_size
= sizeof (jump_insn
);
1530 /* Return the end address of our pad. */
1531 *jump_entry
= buildaddr
;
1537 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint
, CORE_ADDR tpaddr
,
1538 CORE_ADDR collector
,
1541 CORE_ADDR
*jump_entry
,
1542 CORE_ADDR
*trampoline
,
1543 ULONGEST
*trampoline_size
,
1544 unsigned char *jjump_pad_insn
,
1545 ULONGEST
*jjump_pad_insn_size
,
1546 CORE_ADDR
*adjusted_insn_addr
,
1547 CORE_ADDR
*adjusted_insn_addr_end
,
1551 if (register_size (0) == 8)
1552 return amd64_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1553 collector
, lockaddr
,
1554 orig_size
, jump_entry
,
1555 trampoline
, trampoline_size
,
1557 jjump_pad_insn_size
,
1559 adjusted_insn_addr_end
,
1563 return i386_install_fast_tracepoint_jump_pad (tpoint
, tpaddr
,
1564 collector
, lockaddr
,
1565 orig_size
, jump_entry
,
1566 trampoline
, trampoline_size
,
1568 jjump_pad_insn_size
,
1570 adjusted_insn_addr_end
,
1574 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1578 x86_get_min_fast_tracepoint_insn_len (void)
1580 static int warned_about_fast_tracepoints
= 0;
1583 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1584 used for fast tracepoints. */
1585 if (register_size (0) == 8)
1589 if (in_process_agent_loaded ())
1591 char errbuf
[IPA_BUFSIZ
];
1595 /* On x86, if trampolines are available, then 4-byte jump instructions
1596 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1597 with a 4-byte offset are used instead. */
1598 if (have_fast_tracepoint_trampoline_buffer (errbuf
))
1602 /* GDB has no channel to explain to user why a shorter fast
1603 tracepoint is not possible, but at least make GDBserver
1604 mention that something has gone awry. */
1605 if (!warned_about_fast_tracepoints
)
1607 warning ("4-byte fast tracepoints not available; %s\n", errbuf
);
1608 warned_about_fast_tracepoints
= 1;
1615 /* Indicate that the minimum length is currently unknown since the IPA
1616 has not loaded yet. */
1622 add_insns (unsigned char *start
, int len
)
1624 CORE_ADDR buildaddr
= current_insn_ptr
;
1627 fprintf (stderr
, "Adding %d bytes of insn at %s\n",
1628 len
, paddress (buildaddr
));
1630 append_insns (&buildaddr
, len
, start
);
1631 current_insn_ptr
= buildaddr
;
1634 /* Our general strategy for emitting code is to avoid specifying raw
1635 bytes whenever possible, and instead copy a block of inline asm
1636 that is embedded in the function. This is a little messy, because
1637 we need to keep the compiler from discarding what looks like dead
1638 code, plus suppress various warnings. */
1640 #define EMIT_ASM(NAME, INSNS) \
1643 extern unsigned char start_ ## NAME, end_ ## NAME; \
1644 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1645 __asm__ ("jmp end_" #NAME "\n" \
1646 "\t" "start_" #NAME ":" \
1648 "\t" "end_" #NAME ":"); \
1653 #define EMIT_ASM32(NAME,INSNS) \
1656 extern unsigned char start_ ## NAME, end_ ## NAME; \
1657 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1658 __asm__ (".code32\n" \
1659 "\t" "jmp end_" #NAME "\n" \
1660 "\t" "start_" #NAME ":\n" \
1662 "\t" "end_" #NAME ":\n" \
1668 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1675 amd64_emit_prologue (void)
1677 EMIT_ASM (amd64_prologue
,
1679 "movq %rsp,%rbp\n\t"
1680 "sub $0x20,%rsp\n\t"
1681 "movq %rdi,-8(%rbp)\n\t"
1682 "movq %rsi,-16(%rbp)");
1687 amd64_emit_epilogue (void)
1689 EMIT_ASM (amd64_epilogue
,
1690 "movq -16(%rbp),%rdi\n\t"
1691 "movq %rax,(%rdi)\n\t"
1698 amd64_emit_add (void)
1700 EMIT_ASM (amd64_add
,
1701 "add (%rsp),%rax\n\t"
1702 "lea 0x8(%rsp),%rsp");
1706 amd64_emit_sub (void)
1708 EMIT_ASM (amd64_sub
,
1709 "sub %rax,(%rsp)\n\t"
1714 amd64_emit_mul (void)
1720 amd64_emit_lsh (void)
1726 amd64_emit_rsh_signed (void)
1732 amd64_emit_rsh_unsigned (void)
1738 amd64_emit_ext (int arg
)
1743 EMIT_ASM (amd64_ext_8
,
1749 EMIT_ASM (amd64_ext_16
,
1754 EMIT_ASM (amd64_ext_32
,
1763 amd64_emit_log_not (void)
1765 EMIT_ASM (amd64_log_not
,
1766 "test %rax,%rax\n\t"
1772 amd64_emit_bit_and (void)
1774 EMIT_ASM (amd64_and
,
1775 "and (%rsp),%rax\n\t"
1776 "lea 0x8(%rsp),%rsp");
1780 amd64_emit_bit_or (void)
1783 "or (%rsp),%rax\n\t"
1784 "lea 0x8(%rsp),%rsp");
1788 amd64_emit_bit_xor (void)
1790 EMIT_ASM (amd64_xor
,
1791 "xor (%rsp),%rax\n\t"
1792 "lea 0x8(%rsp),%rsp");
1796 amd64_emit_bit_not (void)
1798 EMIT_ASM (amd64_bit_not
,
1799 "xorq $0xffffffffffffffff,%rax");
1803 amd64_emit_equal (void)
1805 EMIT_ASM (amd64_equal
,
1806 "cmp %rax,(%rsp)\n\t"
1807 "je .Lamd64_equal_true\n\t"
1809 "jmp .Lamd64_equal_end\n\t"
1810 ".Lamd64_equal_true:\n\t"
1812 ".Lamd64_equal_end:\n\t"
1813 "lea 0x8(%rsp),%rsp");
1817 amd64_emit_less_signed (void)
1819 EMIT_ASM (amd64_less_signed
,
1820 "cmp %rax,(%rsp)\n\t"
1821 "jl .Lamd64_less_signed_true\n\t"
1823 "jmp .Lamd64_less_signed_end\n\t"
1824 ".Lamd64_less_signed_true:\n\t"
1826 ".Lamd64_less_signed_end:\n\t"
1827 "lea 0x8(%rsp),%rsp");
1831 amd64_emit_less_unsigned (void)
1833 EMIT_ASM (amd64_less_unsigned
,
1834 "cmp %rax,(%rsp)\n\t"
1835 "jb .Lamd64_less_unsigned_true\n\t"
1837 "jmp .Lamd64_less_unsigned_end\n\t"
1838 ".Lamd64_less_unsigned_true:\n\t"
1840 ".Lamd64_less_unsigned_end:\n\t"
1841 "lea 0x8(%rsp),%rsp");
1845 amd64_emit_ref (int size
)
1850 EMIT_ASM (amd64_ref1
,
1854 EMIT_ASM (amd64_ref2
,
1858 EMIT_ASM (amd64_ref4
,
1859 "movl (%rax),%eax");
1862 EMIT_ASM (amd64_ref8
,
1863 "movq (%rax),%rax");
1869 amd64_emit_if_goto (int *offset_p
, int *size_p
)
1871 EMIT_ASM (amd64_if_goto
,
1875 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1883 amd64_emit_goto (int *offset_p
, int *size_p
)
1885 EMIT_ASM (amd64_goto
,
1886 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1894 amd64_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
1896 int diff
= (to
- (from
+ size
));
1897 unsigned char buf
[sizeof (int)];
1905 memcpy (buf
, &diff
, sizeof (int));
1906 write_inferior_memory (from
, buf
, sizeof (int));
1910 amd64_emit_const (LONGEST num
)
1912 unsigned char buf
[16];
1914 CORE_ADDR buildaddr
= current_insn_ptr
;
1917 buf
[i
++] = 0x48; buf
[i
++] = 0xb8; /* mov $<n>,%rax */
1918 memcpy (&buf
[i
], &num
, sizeof (num
));
1920 append_insns (&buildaddr
, i
, buf
);
1921 current_insn_ptr
= buildaddr
;
1925 amd64_emit_call (CORE_ADDR fn
)
1927 unsigned char buf
[16];
1929 CORE_ADDR buildaddr
;
1932 /* The destination function being in the shared library, may be
1933 >31-bits away off the compiled code pad. */
1935 buildaddr
= current_insn_ptr
;
1937 offset64
= fn
- (buildaddr
+ 1 /* call op */ + 4 /* 32-bit offset */);
1941 if (offset64
> INT_MAX
|| offset64
< INT_MIN
)
1943 /* Offset is too large for a call. Use callq, but that requires
1944 a register, so avoid it if possible. Use r10, since it is
1945 call-clobbered, we don't have to push/pop it. */
1946 buf
[i
++] = 0x48; /* mov $fn,%r10 */
1948 memcpy (buf
+ i
, &fn
, 8);
1950 buf
[i
++] = 0xff; /* callq *%r10 */
1955 int offset32
= offset64
; /* we know we can't overflow here. */
1956 memcpy (buf
+ i
, &offset32
, 4);
1960 append_insns (&buildaddr
, i
, buf
);
1961 current_insn_ptr
= buildaddr
;
1965 amd64_emit_reg (int reg
)
1967 unsigned char buf
[16];
1969 CORE_ADDR buildaddr
;
1971 /* Assume raw_regs is still in %rdi. */
1972 buildaddr
= current_insn_ptr
;
1974 buf
[i
++] = 0xbe; /* mov $<n>,%esi */
1975 memcpy (&buf
[i
], ®
, sizeof (reg
));
1977 append_insns (&buildaddr
, i
, buf
);
1978 current_insn_ptr
= buildaddr
;
1979 amd64_emit_call (get_raw_reg_func_addr ());
1983 amd64_emit_pop (void)
1985 EMIT_ASM (amd64_pop
,
1990 amd64_emit_stack_flush (void)
1992 EMIT_ASM (amd64_stack_flush
,
1997 amd64_emit_zero_ext (int arg
)
2002 EMIT_ASM (amd64_zero_ext_8
,
2006 EMIT_ASM (amd64_zero_ext_16
,
2007 "and $0xffff,%rax");
2010 EMIT_ASM (amd64_zero_ext_32
,
2011 "mov $0xffffffff,%rcx\n\t"
2020 amd64_emit_swap (void)
2022 EMIT_ASM (amd64_swap
,
2029 amd64_emit_stack_adjust (int n
)
2031 unsigned char buf
[16];
2033 CORE_ADDR buildaddr
= current_insn_ptr
;
2036 buf
[i
++] = 0x48; /* lea $<n>(%rsp),%rsp */
2040 /* This only handles adjustments up to 16, but we don't expect any more. */
2042 append_insns (&buildaddr
, i
, buf
);
2043 current_insn_ptr
= buildaddr
;
2046 /* FN's prototype is `LONGEST(*fn)(int)'. */
2049 amd64_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2051 unsigned char buf
[16];
2053 CORE_ADDR buildaddr
;
2055 buildaddr
= current_insn_ptr
;
2057 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2058 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2060 append_insns (&buildaddr
, i
, buf
);
2061 current_insn_ptr
= buildaddr
;
2062 amd64_emit_call (fn
);
2065 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2068 amd64_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2070 unsigned char buf
[16];
2072 CORE_ADDR buildaddr
;
2074 buildaddr
= current_insn_ptr
;
2076 buf
[i
++] = 0xbf; /* movl $<n>,%edi */
2077 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2079 append_insns (&buildaddr
, i
, buf
);
2080 current_insn_ptr
= buildaddr
;
2081 EMIT_ASM (amd64_void_call_2_a
,
2082 /* Save away a copy of the stack top. */
2084 /* Also pass top as the second argument. */
2086 amd64_emit_call (fn
);
2087 EMIT_ASM (amd64_void_call_2_b
,
2088 /* Restore the stack top, %rax may have been trashed. */
2093 amd64_emit_eq_goto (int *offset_p
, int *size_p
)
2096 "cmp %rax,(%rsp)\n\t"
2097 "jne .Lamd64_eq_fallthru\n\t"
2098 "lea 0x8(%rsp),%rsp\n\t"
2100 /* jmp, but don't trust the assembler to choose the right jump */
2101 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2102 ".Lamd64_eq_fallthru:\n\t"
2103 "lea 0x8(%rsp),%rsp\n\t"
2113 amd64_emit_ne_goto (int *offset_p
, int *size_p
)
2116 "cmp %rax,(%rsp)\n\t"
2117 "je .Lamd64_ne_fallthru\n\t"
2118 "lea 0x8(%rsp),%rsp\n\t"
2120 /* jmp, but don't trust the assembler to choose the right jump */
2121 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2122 ".Lamd64_ne_fallthru:\n\t"
2123 "lea 0x8(%rsp),%rsp\n\t"
2133 amd64_emit_lt_goto (int *offset_p
, int *size_p
)
2136 "cmp %rax,(%rsp)\n\t"
2137 "jnl .Lamd64_lt_fallthru\n\t"
2138 "lea 0x8(%rsp),%rsp\n\t"
2140 /* jmp, but don't trust the assembler to choose the right jump */
2141 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2142 ".Lamd64_lt_fallthru:\n\t"
2143 "lea 0x8(%rsp),%rsp\n\t"
2153 amd64_emit_le_goto (int *offset_p
, int *size_p
)
2156 "cmp %rax,(%rsp)\n\t"
2157 "jnle .Lamd64_le_fallthru\n\t"
2158 "lea 0x8(%rsp),%rsp\n\t"
2160 /* jmp, but don't trust the assembler to choose the right jump */
2161 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2162 ".Lamd64_le_fallthru:\n\t"
2163 "lea 0x8(%rsp),%rsp\n\t"
2173 amd64_emit_gt_goto (int *offset_p
, int *size_p
)
2176 "cmp %rax,(%rsp)\n\t"
2177 "jng .Lamd64_gt_fallthru\n\t"
2178 "lea 0x8(%rsp),%rsp\n\t"
2180 /* jmp, but don't trust the assembler to choose the right jump */
2181 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2182 ".Lamd64_gt_fallthru:\n\t"
2183 "lea 0x8(%rsp),%rsp\n\t"
2193 amd64_emit_ge_goto (int *offset_p
, int *size_p
)
2196 "cmp %rax,(%rsp)\n\t"
2197 "jnge .Lamd64_ge_fallthru\n\t"
2198 ".Lamd64_ge_jump:\n\t"
2199 "lea 0x8(%rsp),%rsp\n\t"
2201 /* jmp, but don't trust the assembler to choose the right jump */
2202 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2203 ".Lamd64_ge_fallthru:\n\t"
2204 "lea 0x8(%rsp),%rsp\n\t"
2213 struct emit_ops amd64_emit_ops
=
2215 amd64_emit_prologue
,
2216 amd64_emit_epilogue
,
2221 amd64_emit_rsh_signed
,
2222 amd64_emit_rsh_unsigned
,
2230 amd64_emit_less_signed
,
2231 amd64_emit_less_unsigned
,
2235 amd64_write_goto_address
,
2240 amd64_emit_stack_flush
,
2241 amd64_emit_zero_ext
,
2243 amd64_emit_stack_adjust
,
2244 amd64_emit_int_call_1
,
2245 amd64_emit_void_call_2
,
2254 #endif /* __x86_64__ */
2257 i386_emit_prologue (void)
2259 EMIT_ASM32 (i386_prologue
,
2263 /* At this point, the raw regs base address is at 8(%ebp), and the
2264 value pointer is at 12(%ebp). */
2268 i386_emit_epilogue (void)
2270 EMIT_ASM32 (i386_epilogue
,
2271 "mov 12(%ebp),%ecx\n\t"
2272 "mov %eax,(%ecx)\n\t"
2273 "mov %ebx,0x4(%ecx)\n\t"
2281 i386_emit_add (void)
2283 EMIT_ASM32 (i386_add
,
2284 "add (%esp),%eax\n\t"
2285 "adc 0x4(%esp),%ebx\n\t"
2286 "lea 0x8(%esp),%esp");
2290 i386_emit_sub (void)
2292 EMIT_ASM32 (i386_sub
,
2293 "subl %eax,(%esp)\n\t"
2294 "sbbl %ebx,4(%esp)\n\t"
2300 i386_emit_mul (void)
2306 i386_emit_lsh (void)
2312 i386_emit_rsh_signed (void)
2318 i386_emit_rsh_unsigned (void)
2324 i386_emit_ext (int arg
)
2329 EMIT_ASM32 (i386_ext_8
,
2332 "movl %eax,%ebx\n\t"
2336 EMIT_ASM32 (i386_ext_16
,
2338 "movl %eax,%ebx\n\t"
2342 EMIT_ASM32 (i386_ext_32
,
2343 "movl %eax,%ebx\n\t"
2352 i386_emit_log_not (void)
2354 EMIT_ASM32 (i386_log_not
,
2356 "test %eax,%eax\n\t"
2363 i386_emit_bit_and (void)
2365 EMIT_ASM32 (i386_and
,
2366 "and (%esp),%eax\n\t"
2367 "and 0x4(%esp),%ebx\n\t"
2368 "lea 0x8(%esp),%esp");
2372 i386_emit_bit_or (void)
2374 EMIT_ASM32 (i386_or
,
2375 "or (%esp),%eax\n\t"
2376 "or 0x4(%esp),%ebx\n\t"
2377 "lea 0x8(%esp),%esp");
2381 i386_emit_bit_xor (void)
2383 EMIT_ASM32 (i386_xor
,
2384 "xor (%esp),%eax\n\t"
2385 "xor 0x4(%esp),%ebx\n\t"
2386 "lea 0x8(%esp),%esp");
2390 i386_emit_bit_not (void)
2392 EMIT_ASM32 (i386_bit_not
,
2393 "xor $0xffffffff,%eax\n\t"
2394 "xor $0xffffffff,%ebx\n\t");
2398 i386_emit_equal (void)
2400 EMIT_ASM32 (i386_equal
,
2401 "cmpl %ebx,4(%esp)\n\t"
2402 "jne .Li386_equal_false\n\t"
2403 "cmpl %eax,(%esp)\n\t"
2404 "je .Li386_equal_true\n\t"
2405 ".Li386_equal_false:\n\t"
2407 "jmp .Li386_equal_end\n\t"
2408 ".Li386_equal_true:\n\t"
2410 ".Li386_equal_end:\n\t"
2412 "lea 0x8(%esp),%esp");
2416 i386_emit_less_signed (void)
2418 EMIT_ASM32 (i386_less_signed
,
2419 "cmpl %ebx,4(%esp)\n\t"
2420 "jl .Li386_less_signed_true\n\t"
2421 "jne .Li386_less_signed_false\n\t"
2422 "cmpl %eax,(%esp)\n\t"
2423 "jl .Li386_less_signed_true\n\t"
2424 ".Li386_less_signed_false:\n\t"
2426 "jmp .Li386_less_signed_end\n\t"
2427 ".Li386_less_signed_true:\n\t"
2429 ".Li386_less_signed_end:\n\t"
2431 "lea 0x8(%esp),%esp");
2435 i386_emit_less_unsigned (void)
2437 EMIT_ASM32 (i386_less_unsigned
,
2438 "cmpl %ebx,4(%esp)\n\t"
2439 "jb .Li386_less_unsigned_true\n\t"
2440 "jne .Li386_less_unsigned_false\n\t"
2441 "cmpl %eax,(%esp)\n\t"
2442 "jb .Li386_less_unsigned_true\n\t"
2443 ".Li386_less_unsigned_false:\n\t"
2445 "jmp .Li386_less_unsigned_end\n\t"
2446 ".Li386_less_unsigned_true:\n\t"
2448 ".Li386_less_unsigned_end:\n\t"
2450 "lea 0x8(%esp),%esp");
2454 i386_emit_ref (int size
)
2459 EMIT_ASM32 (i386_ref1
,
2463 EMIT_ASM32 (i386_ref2
,
2467 EMIT_ASM32 (i386_ref4
,
2468 "movl (%eax),%eax");
2471 EMIT_ASM32 (i386_ref8
,
2472 "movl 4(%eax),%ebx\n\t"
2473 "movl (%eax),%eax");
2479 i386_emit_if_goto (int *offset_p
, int *size_p
)
2481 EMIT_ASM32 (i386_if_goto
,
2487 /* Don't trust the assembler to choose the right jump */
2488 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2491 *offset_p
= 11; /* be sure that this matches the sequence above */
2497 i386_emit_goto (int *offset_p
, int *size_p
)
2499 EMIT_ASM32 (i386_goto
,
2500 /* Don't trust the assembler to choose the right jump */
2501 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2509 i386_write_goto_address (CORE_ADDR from
, CORE_ADDR to
, int size
)
2511 int diff
= (to
- (from
+ size
));
2512 unsigned char buf
[sizeof (int)];
2514 /* We're only doing 4-byte sizes at the moment. */
2521 memcpy (buf
, &diff
, sizeof (int));
2522 write_inferior_memory (from
, buf
, sizeof (int));
2526 i386_emit_const (LONGEST num
)
2528 unsigned char buf
[16];
2530 CORE_ADDR buildaddr
= current_insn_ptr
;
2533 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2534 lo
= num
& 0xffffffff;
2535 memcpy (&buf
[i
], &lo
, sizeof (lo
));
2537 hi
= ((num
>> 32) & 0xffffffff);
2540 buf
[i
++] = 0xbb; /* mov $<n>,%ebx */
2541 memcpy (&buf
[i
], &hi
, sizeof (hi
));
2546 buf
[i
++] = 0x31; buf
[i
++] = 0xdb; /* xor %ebx,%ebx */
2548 append_insns (&buildaddr
, i
, buf
);
2549 current_insn_ptr
= buildaddr
;
2553 i386_emit_call (CORE_ADDR fn
)
2555 unsigned char buf
[16];
2557 CORE_ADDR buildaddr
;
2559 buildaddr
= current_insn_ptr
;
2561 buf
[i
++] = 0xe8; /* call <reladdr> */
2562 offset
= ((int) fn
) - (buildaddr
+ 5);
2563 memcpy (buf
+ 1, &offset
, 4);
2564 append_insns (&buildaddr
, 5, buf
);
2565 current_insn_ptr
= buildaddr
;
2569 i386_emit_reg (int reg
)
2571 unsigned char buf
[16];
2573 CORE_ADDR buildaddr
;
2575 EMIT_ASM32 (i386_reg_a
,
2577 buildaddr
= current_insn_ptr
;
2579 buf
[i
++] = 0xb8; /* mov $<n>,%eax */
2580 memcpy (&buf
[i
], ®
, sizeof (reg
));
2582 append_insns (&buildaddr
, i
, buf
);
2583 current_insn_ptr
= buildaddr
;
2584 EMIT_ASM32 (i386_reg_b
,
2585 "mov %eax,4(%esp)\n\t"
2586 "mov 8(%ebp),%eax\n\t"
2588 i386_emit_call (get_raw_reg_func_addr ());
2589 EMIT_ASM32 (i386_reg_c
,
2591 "lea 0x8(%esp),%esp");
2595 i386_emit_pop (void)
2597 EMIT_ASM32 (i386_pop
,
2603 i386_emit_stack_flush (void)
2605 EMIT_ASM32 (i386_stack_flush
,
2611 i386_emit_zero_ext (int arg
)
2616 EMIT_ASM32 (i386_zero_ext_8
,
2617 "and $0xff,%eax\n\t"
2621 EMIT_ASM32 (i386_zero_ext_16
,
2622 "and $0xffff,%eax\n\t"
2626 EMIT_ASM32 (i386_zero_ext_32
,
2635 i386_emit_swap (void)
2637 EMIT_ASM32 (i386_swap
,
2647 i386_emit_stack_adjust (int n
)
2649 unsigned char buf
[16];
2651 CORE_ADDR buildaddr
= current_insn_ptr
;
2654 buf
[i
++] = 0x8d; /* lea $<n>(%esp),%esp */
2658 append_insns (&buildaddr
, i
, buf
);
2659 current_insn_ptr
= buildaddr
;
2662 /* FN's prototype is `LONGEST(*fn)(int)'. */
2665 i386_emit_int_call_1 (CORE_ADDR fn
, int arg1
)
2667 unsigned char buf
[16];
2669 CORE_ADDR buildaddr
;
2671 EMIT_ASM32 (i386_int_call_1_a
,
2672 /* Reserve a bit of stack space. */
2674 /* Put the one argument on the stack. */
2675 buildaddr
= current_insn_ptr
;
2677 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2680 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2682 append_insns (&buildaddr
, i
, buf
);
2683 current_insn_ptr
= buildaddr
;
2684 i386_emit_call (fn
);
2685 EMIT_ASM32 (i386_int_call_1_c
,
2687 "lea 0x8(%esp),%esp");
2690 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2693 i386_emit_void_call_2 (CORE_ADDR fn
, int arg1
)
2695 unsigned char buf
[16];
2697 CORE_ADDR buildaddr
;
2699 EMIT_ASM32 (i386_void_call_2_a
,
2700 /* Preserve %eax only; we don't have to worry about %ebx. */
2702 /* Reserve a bit of stack space for arguments. */
2703 "sub $0x10,%esp\n\t"
2704 /* Copy "top" to the second argument position. (Note that
2705 we can't assume function won't scribble on its
2706 arguments, so don't try to restore from this.) */
2707 "mov %eax,4(%esp)\n\t"
2708 "mov %ebx,8(%esp)");
2709 /* Put the first argument on the stack. */
2710 buildaddr
= current_insn_ptr
;
2712 buf
[i
++] = 0xc7; /* movl $<arg1>,(%esp) */
2715 memcpy (&buf
[i
], &arg1
, sizeof (arg1
));
2717 append_insns (&buildaddr
, i
, buf
);
2718 current_insn_ptr
= buildaddr
;
2719 i386_emit_call (fn
);
2720 EMIT_ASM32 (i386_void_call_2_b
,
2721 "lea 0x10(%esp),%esp\n\t"
2722 /* Restore original stack top. */
2728 i386_emit_eq_goto (int *offset_p
, int *size_p
)
2731 /* Check low half first, more likely to be decider */
2732 "cmpl %eax,(%esp)\n\t"
2733 "jne .Leq_fallthru\n\t"
2734 "cmpl %ebx,4(%esp)\n\t"
2735 "jne .Leq_fallthru\n\t"
2736 "lea 0x8(%esp),%esp\n\t"
2739 /* jmp, but don't trust the assembler to choose the right jump */
2740 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2741 ".Leq_fallthru:\n\t"
2742 "lea 0x8(%esp),%esp\n\t"
2753 i386_emit_ne_goto (int *offset_p
, int *size_p
)
2756 /* Check low half first, more likely to be decider */
2757 "cmpl %eax,(%esp)\n\t"
2759 "cmpl %ebx,4(%esp)\n\t"
2760 "je .Lne_fallthru\n\t"
2762 "lea 0x8(%esp),%esp\n\t"
2765 /* jmp, but don't trust the assembler to choose the right jump */
2766 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2767 ".Lne_fallthru:\n\t"
2768 "lea 0x8(%esp),%esp\n\t"
2779 i386_emit_lt_goto (int *offset_p
, int *size_p
)
2782 "cmpl %ebx,4(%esp)\n\t"
2784 "jne .Llt_fallthru\n\t"
2785 "cmpl %eax,(%esp)\n\t"
2786 "jnl .Llt_fallthru\n\t"
2788 "lea 0x8(%esp),%esp\n\t"
2791 /* jmp, but don't trust the assembler to choose the right jump */
2792 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2793 ".Llt_fallthru:\n\t"
2794 "lea 0x8(%esp),%esp\n\t"
2805 i386_emit_le_goto (int *offset_p
, int *size_p
)
2808 "cmpl %ebx,4(%esp)\n\t"
2810 "jne .Lle_fallthru\n\t"
2811 "cmpl %eax,(%esp)\n\t"
2812 "jnle .Lle_fallthru\n\t"
2814 "lea 0x8(%esp),%esp\n\t"
2817 /* jmp, but don't trust the assembler to choose the right jump */
2818 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2819 ".Lle_fallthru:\n\t"
2820 "lea 0x8(%esp),%esp\n\t"
2831 i386_emit_gt_goto (int *offset_p
, int *size_p
)
2834 "cmpl %ebx,4(%esp)\n\t"
2836 "jne .Lgt_fallthru\n\t"
2837 "cmpl %eax,(%esp)\n\t"
2838 "jng .Lgt_fallthru\n\t"
2840 "lea 0x8(%esp),%esp\n\t"
2843 /* jmp, but don't trust the assembler to choose the right jump */
2844 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2845 ".Lgt_fallthru:\n\t"
2846 "lea 0x8(%esp),%esp\n\t"
2857 i386_emit_ge_goto (int *offset_p
, int *size_p
)
2860 "cmpl %ebx,4(%esp)\n\t"
2862 "jne .Lge_fallthru\n\t"
2863 "cmpl %eax,(%esp)\n\t"
2864 "jnge .Lge_fallthru\n\t"
2866 "lea 0x8(%esp),%esp\n\t"
2869 /* jmp, but don't trust the assembler to choose the right jump */
2870 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2871 ".Lge_fallthru:\n\t"
2872 "lea 0x8(%esp),%esp\n\t"
2882 struct emit_ops i386_emit_ops
=
2890 i386_emit_rsh_signed
,
2891 i386_emit_rsh_unsigned
,
2899 i386_emit_less_signed
,
2900 i386_emit_less_unsigned
,
2904 i386_write_goto_address
,
2909 i386_emit_stack_flush
,
2912 i386_emit_stack_adjust
,
2913 i386_emit_int_call_1
,
2914 i386_emit_void_call_2
,
2924 static struct emit_ops
*
2928 int use_64bit
= register_size (0) == 8;
2931 return &amd64_emit_ops
;
2934 return &i386_emit_ops
;
2937 /* This is initialized assuming an amd64 target.
2938 x86_arch_setup will correct it for i386 or amd64 targets. */
2940 struct linux_target_ops the_low_target
=
2956 x86_stopped_by_watchpoint
,
2957 x86_stopped_data_address
,
2958 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2959 native i386 case (no registers smaller than an xfer unit), and are not
2960 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2963 /* need to fix up i386 siginfo if host is amd64 */
2965 x86_linux_new_process
,
2966 x86_linux_new_thread
,
2967 x86_linux_prepare_to_resume
,
2968 x86_linux_process_qsupported
,
2969 x86_supports_tracepoints
,
2970 x86_get_thread_area
,
2971 x86_install_fast_tracepoint_jump_pad
,
2973 x86_get_min_fast_tracepoint_insn_len
,