]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdbserver/linux-x86-low.cc
sme: Add support for SME
[thirdparty/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
213516ef 3 Copyright (C) 2002-2023 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
03e6fe7e 28#include "nat/x86-xstate.h"
5826e159 29#include "nat/gdb_ptrace.h"
d0722149 30
93813b37
WT
31#ifdef __x86_64__
32#include "nat/amd64-linux-siginfo.h"
33#endif
34
d0722149 35#include "gdb_proc_service.h"
b5737fa9
PA
36/* Don't include elf/common.h if linux/elf.h got included by
37 gdb_proc_service.h. */
38#ifndef ELFMAG0
39#include "elf/common.h"
40#endif
41
268a13a5 42#include "gdbsupport/agent.h"
3aee8918 43#include "tdesc.h"
c144c7a0 44#include "tracepoint.h"
f699aaba 45#include "ax.h"
7b669087 46#include "nat/linux-nat.h"
4b134ca1 47#include "nat/x86-linux.h"
8e5d4070 48#include "nat/x86-linux-dregs.h"
ae91f625 49#include "linux-x86-tdesc.h"
a196ebeb 50
3aee8918 51#ifdef __x86_64__
51a948fd 52static target_desc_up tdesc_amd64_linux_no_xml;
3aee8918 53#endif
51a948fd 54static target_desc_up tdesc_i386_linux_no_xml;
3aee8918 55
1570b33e 56
fa593d66 57static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 58static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 59
1570b33e
L
60/* Backward compatibility for gdb without XML support. */
61
db92ac45 62static const char xmltarget_i386_linux_no_xml[] = "@<target>\
1570b33e
L
63<architecture>i386</architecture>\
64<osabi>GNU/Linux</osabi>\
65</target>";
f6d1620c
L
66
67#ifdef __x86_64__
db92ac45 68static const char xmltarget_amd64_linux_no_xml[] = "@<target>\
1570b33e
L
69<architecture>i386:x86-64</architecture>\
70<osabi>GNU/Linux</osabi>\
71</target>";
f6d1620c 72#endif
d0722149
DE
73
74#include <sys/reg.h>
75#include <sys/procfs.h>
1570b33e
L
76#include <sys/uio.h>
77
d0722149
DE
78#ifndef PTRACE_GET_THREAD_AREA
79#define PTRACE_GET_THREAD_AREA 25
80#endif
81
82/* This definition comes from prctl.h, but some kernels may not have it. */
83#ifndef PTRACE_ARCH_PRCTL
84#define PTRACE_ARCH_PRCTL 30
85#endif
86
87/* The following definitions come from prctl.h, but may be absent
88 for certain configurations. */
89#ifndef ARCH_GET_FS
90#define ARCH_SET_GS 0x1001
91#define ARCH_SET_FS 0x1002
92#define ARCH_GET_FS 0x1003
93#define ARCH_GET_GS 0x1004
94#endif
95
ef0478f6
TBA
96/* Linux target op definitions for the x86 architecture.
97 This is initialized assuming an amd64 target.
98 'low_arch_setup' will correct it for i386 or amd64 targets. */
99
100class x86_target : public linux_process_target
101{
102public:
103
aa8d21c9
TBA
104 const regs_info *get_regs_info () override;
105
3ca4edb6
TBA
106 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
107
007c9b97
TBA
108 bool supports_z_point_type (char z_type) override;
109
b315b67d 110 void process_qsupported (gdb::array_view<const char * const> features) override;
a5b5da92 111
47f70aa7
TBA
112 bool supports_tracepoints () override;
113
809a0c35
TBA
114 bool supports_fast_tracepoints () override;
115
116 int install_fast_tracepoint_jump_pad
117 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
118 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
119 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
120 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
121 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
122 char *err) override;
123
124 int get_min_fast_tracepoint_insn_len () override;
125
ab64c999
TBA
126 struct emit_ops *emit_ops () override;
127
fc5ecdb6
TBA
128 int get_ipa_tdesc_idx () override;
129
797bcff5
TBA
130protected:
131
132 void low_arch_setup () override;
daca57a7
TBA
133
134 bool low_cannot_fetch_register (int regno) override;
135
136 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
137
138 bool low_supports_breakpoints () override;
139
140 CORE_ADDR low_get_pc (regcache *regcache) override;
141
142 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d4807ea2
TBA
143
144 int low_decr_pc_after_break () override;
d7146cda
TBA
145
146 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
147
148 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
149 int size, raw_breakpoint *bp) override;
150
151 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
152 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
153
154 bool low_stopped_by_watchpoint () override;
155
156 CORE_ADDR low_stopped_data_address () override;
b35db733
TBA
157
158 /* collect_ptrace_register/supply_ptrace_register are not needed in the
159 native i386 case (no registers smaller than an xfer unit), and are not
160 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
cb63de7c
TBA
161
162 /* Need to fix up i386 siginfo if host is amd64. */
163 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
164 int direction) override;
fd000fb3
TBA
165
166 arch_process_info *low_new_process () override;
167
168 void low_delete_process (arch_process_info *info) override;
169
170 void low_new_thread (lwp_info *) override;
171
172 void low_delete_thread (arch_lwp_info *) override;
173
174 void low_new_fork (process_info *parent, process_info *child) override;
d7599cc0
TBA
175
176 void low_prepare_to_resume (lwp_info *lwp) override;
a5b5da92 177
13e567af
TBA
178 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
179
9cfd8715
TBA
180 bool low_supports_range_stepping () override;
181
9eedd27d
TBA
182 bool low_supports_catch_syscall () override;
183
184 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
185
a5b5da92
TBA
186private:
187
188 /* Update all the target description of all processes; a new GDB
189 connected, and it may or not support xml target descriptions. */
190 void update_xmltarget ();
ef0478f6
TBA
191};
192
193/* The singleton target ops object. */
194
195static x86_target the_x86_target;
196
aa5ca48f
DE
197/* Per-process arch-specific data we want to keep. */
198
199struct arch_process_info
200{
df7e5265 201 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
202};
203
d0722149
DE
204#ifdef __x86_64__
205
206/* Mapping between the general-purpose registers in `struct user'
207 format and GDB's register array layout.
208 Note that the transfer layout uses 64-bit regs. */
209static /*const*/ int i386_regmap[] =
210{
211 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
212 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
213 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
214 DS * 8, ES * 8, FS * 8, GS * 8
215};
216
217#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
218
219/* So code below doesn't have to care, i386 or amd64. */
220#define ORIG_EAX ORIG_RAX
bc9540e8 221#define REGSIZE 8
d0722149
DE
222
223static const int x86_64_regmap[] =
224{
225 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
226 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
227 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
228 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
229 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
230 DS * 8, ES * 8, FS * 8, GS * 8,
231 -1, -1, -1, -1, -1, -1, -1, -1,
232 -1, -1, -1, -1, -1, -1, -1, -1,
233 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
234 -1,
235 -1, -1, -1, -1, -1, -1, -1, -1,
236 ORIG_RAX * 8,
2735833d 237 21 * 8, 22 * 8,
a196ebeb 238 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
239 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
240 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
241 -1, -1, -1, -1, -1, -1, -1, -1,
242 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
243 -1, -1, -1, -1, -1, -1, -1, -1,
244 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
245 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
246 -1, -1, -1, -1, -1, -1, -1, -1,
247 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
248 -1, -1, -1, -1, -1, -1, -1, -1,
249 -1 /* pkru */
d0722149
DE
250};
251
252#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 253#define X86_64_USER_REGS (GS + 1)
d0722149
DE
254
255#else /* ! __x86_64__ */
256
257/* Mapping between the general-purpose registers in `struct user'
258 format and GDB's register array layout. */
259static /*const*/ int i386_regmap[] =
260{
261 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
262 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
263 EIP * 4, EFL * 4, CS * 4, SS * 4,
264 DS * 4, ES * 4, FS * 4, GS * 4
265};
266
267#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
268
bc9540e8
PA
269#define REGSIZE 4
270
d0722149 271#endif
3aee8918
PA
272
273#ifdef __x86_64__
274
4855cbdc 275/* Returns true if THREAD belongs to a x86-64 process, per the tdesc. */
3aee8918
PA
276
277static int
4855cbdc 278is_64bit_tdesc (thread_info *thread)
3aee8918 279{
4855cbdc 280 struct regcache *regcache = get_thread_regcache (thread, 0);
3aee8918
PA
281
282 return register_size (regcache->tdesc, 0) == 8;
283}
284
285#endif
286
d0722149
DE
287\f
288/* Called by libthread_db. */
289
290ps_err_e
754653a7 291ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
292 lwpid_t lwpid, int idx, void **base)
293{
294#ifdef __x86_64__
4855cbdc
SM
295 lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
296 gdb_assert (lwp != nullptr);
297 int use_64bit = is_64bit_tdesc (get_lwp_thread (lwp));
d0722149
DE
298
299 if (use_64bit)
300 {
301 switch (idx)
302 {
303 case FS:
304 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
305 return PS_OK;
306 break;
307 case GS:
308 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
309 return PS_OK;
310 break;
311 default:
312 return PS_BADADDR;
313 }
314 return PS_ERR;
315 }
316#endif
317
318 {
319 unsigned int desc[4];
320
321 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
322 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
323 return PS_ERR;
324
d1ec4ce7
DE
325 /* Ensure we properly extend the value to 64-bits for x86_64. */
326 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
327 return PS_OK;
328 }
329}
fa593d66
PA
330
331/* Get the thread area address. This is used to recognize which
332 thread is which when tracing with the in-process agent library. We
333 don't read anything from the address, and treat it as opaque; it's
334 the address itself that we assume is unique per-thread. */
335
13e567af
TBA
336int
337x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
fa593d66 338{
4855cbdc
SM
339 lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
340 gdb_assert (lwp != nullptr);
5e219e0f 341#ifdef __x86_64__
4855cbdc 342 int use_64bit = is_64bit_tdesc (get_lwp_thread (lwp));
fa593d66
PA
343
344 if (use_64bit)
345 {
346 void *base;
347 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
348 {
349 *addr = (CORE_ADDR) (uintptr_t) base;
350 return 0;
351 }
352
353 return -1;
354 }
355#endif
356
357 {
d86d4aaf
DE
358 struct thread_info *thr = get_lwp_thread (lwp);
359 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
360 unsigned int desc[4];
361 ULONGEST gs = 0;
362 const int reg_thread_area = 3; /* bits to scale down register value. */
363 int idx;
364
365 collect_register_by_name (regcache, "gs", &gs);
366
367 idx = gs >> reg_thread_area;
368
369 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 370 lwpid_of (thr),
493e2a69 371 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
372 return -1;
373
374 *addr = desc[1];
375 return 0;
376 }
377}
378
379
d0722149 380\f
daca57a7
TBA
381bool
382x86_target::low_cannot_store_register (int regno)
d0722149 383{
3aee8918 384#ifdef __x86_64__
4855cbdc 385 if (is_64bit_tdesc (current_thread))
daca57a7 386 return false;
3aee8918
PA
387#endif
388
d0722149
DE
389 return regno >= I386_NUM_REGS;
390}
391
daca57a7
TBA
392bool
393x86_target::low_cannot_fetch_register (int regno)
d0722149 394{
3aee8918 395#ifdef __x86_64__
4855cbdc 396 if (is_64bit_tdesc (current_thread))
daca57a7 397 return false;
3aee8918
PA
398#endif
399
d0722149
DE
400 return regno >= I386_NUM_REGS;
401}
402
037e8112
TV
403static void
404collect_register_i386 (struct regcache *regcache, int regno, void *buf)
405{
406 collect_register (regcache, regno, buf);
407
408#ifdef __x86_64__
409 /* In case of x86_64 -m32, collect_register only writes 4 bytes, but the
410 space reserved in buf for the register is 8 bytes. Make sure the entire
411 reserved space is initialized. */
412
413 gdb_assert (register_size (regcache->tdesc, regno) == 4);
414
415 if (regno == RAX)
416 {
417 /* Sign extend EAX value to avoid potential syscall restart
418 problems.
419
420 See amd64_linux_collect_native_gregset() in
421 gdb/amd64-linux-nat.c for a detailed explanation. */
422 *(int64_t *) buf = *(int32_t *) buf;
423 }
424 else
425 {
426 /* Zero-extend. */
427 *(uint64_t *) buf = *(uint32_t *) buf;
428 }
429#endif
430}
431
d0722149 432static void
442ea881 433x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
434{
435 int i;
436
437#ifdef __x86_64__
3aee8918 438 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
439 {
440 for (i = 0; i < X86_64_NUM_REGS; i++)
441 if (x86_64_regmap[i] != -1)
442ea881 442 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d 443
d0722149
DE
444 return;
445 }
446#endif
447
448 for (i = 0; i < I386_NUM_REGS; i++)
037e8112 449 collect_register_i386 (regcache, i, ((char *) buf) + i386_regmap[i]);
3f52fdbc 450
037e8112
TV
451 /* Handle ORIG_EAX, which is not in i386_regmap. */
452 collect_register_i386 (regcache, find_regno (regcache->tdesc, "orig_eax"),
453 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
454}
455
456static void
442ea881 457x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
458{
459 int i;
460
461#ifdef __x86_64__
3aee8918 462 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
463 {
464 for (i = 0; i < X86_64_NUM_REGS; i++)
465 if (x86_64_regmap[i] != -1)
442ea881 466 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d 467
d0722149
DE
468 return;
469 }
470#endif
471
472 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 473 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 474
442ea881 475 supply_register_by_name (regcache, "orig_eax",
bc9540e8 476 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
477}
478
479static void
442ea881 480x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
481{
482#ifdef __x86_64__
442ea881 483 i387_cache_to_fxsave (regcache, buf);
d0722149 484#else
442ea881 485 i387_cache_to_fsave (regcache, buf);
d0722149
DE
486#endif
487}
488
489static void
442ea881 490x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
491{
492#ifdef __x86_64__
442ea881 493 i387_fxsave_to_cache (regcache, buf);
d0722149 494#else
442ea881 495 i387_fsave_to_cache (regcache, buf);
d0722149
DE
496#endif
497}
498
499#ifndef __x86_64__
500
501static void
442ea881 502x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 503{
442ea881 504 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
505}
506
507static void
442ea881 508x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 509{
442ea881 510 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
511}
512
513#endif
514
1570b33e
L
515static void
516x86_fill_xstateregset (struct regcache *regcache, void *buf)
517{
518 i387_cache_to_xsave (regcache, buf);
519}
520
521static void
522x86_store_xstateregset (struct regcache *regcache, const void *buf)
523{
524 i387_xsave_to_cache (regcache, buf);
525}
526
d0722149
DE
527/* ??? The non-biarch i386 case stores all the i387 regs twice.
528 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
529 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
530 doesn't work. IWBN to avoid the duplication in the case where it
531 does work. Maybe the arch_setup routine could check whether it works
3aee8918 532 and update the supported regsets accordingly. */
d0722149 533
3aee8918 534static struct regset_info x86_regsets[] =
d0722149
DE
535{
536#ifdef HAVE_PTRACE_GETREGS
1570b33e 537 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
538 GENERAL_REGS,
539 x86_fill_gregset, x86_store_gregset },
1570b33e
L
540 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
541 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
542# ifndef __x86_64__
543# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 544 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
545 EXTENDED_REGS,
546 x86_fill_fpxregset, x86_store_fpxregset },
547# endif
548# endif
1570b33e 549 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
550 FP_REGS,
551 x86_fill_fpregset, x86_store_fpregset },
552#endif /* HAVE_PTRACE_GETREGS */
50bc912a 553 NULL_REGSET
d0722149
DE
554};
555
bf9ae9d8
TBA
556bool
557x86_target::low_supports_breakpoints ()
558{
559 return true;
560}
561
562CORE_ADDR
563x86_target::low_get_pc (regcache *regcache)
d0722149 564{
3aee8918 565 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
566
567 if (use_64bit)
568 {
6598661d
PA
569 uint64_t pc;
570
442ea881 571 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
572 return (CORE_ADDR) pc;
573 }
574 else
575 {
6598661d
PA
576 uint32_t pc;
577
442ea881 578 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
579 return (CORE_ADDR) pc;
580 }
581}
582
bf9ae9d8
TBA
583void
584x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
d0722149 585{
3aee8918 586 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
587
588 if (use_64bit)
589 {
6598661d
PA
590 uint64_t newpc = pc;
591
442ea881 592 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
593 }
594 else
595 {
6598661d
PA
596 uint32_t newpc = pc;
597
442ea881 598 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
599 }
600}
d4807ea2
TBA
601
602int
603x86_target::low_decr_pc_after_break ()
604{
605 return 1;
606}
607
d0722149 608\f
dd373349 609static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
610#define x86_breakpoint_len 1
611
d7146cda
TBA
612bool
613x86_target::low_breakpoint_at (CORE_ADDR pc)
d0722149
DE
614{
615 unsigned char c;
616
d7146cda 617 read_memory (pc, &c, 1);
d0722149 618 if (c == 0xCC)
d7146cda 619 return true;
d0722149 620
d7146cda 621 return false;
d0722149
DE
622}
623\f
42995dbd 624/* Low-level function vector. */
df7e5265 625struct x86_dr_low_type x86_dr_low =
42995dbd 626 {
d33472ad
GB
627 x86_linux_dr_set_control,
628 x86_linux_dr_set_addr,
629 x86_linux_dr_get_addr,
630 x86_linux_dr_get_status,
631 x86_linux_dr_get_control,
42995dbd
GB
632 sizeof (void *),
633 };
aa5ca48f 634\f
90d74c30 635/* Breakpoint/Watchpoint support. */
aa5ca48f 636
007c9b97
TBA
637bool
638x86_target::supports_z_point_type (char z_type)
802e8e6d
PA
639{
640 switch (z_type)
641 {
642 case Z_PACKET_SW_BP:
643 case Z_PACKET_HW_BP:
644 case Z_PACKET_WRITE_WP:
645 case Z_PACKET_ACCESS_WP:
007c9b97 646 return true;
802e8e6d 647 default:
007c9b97 648 return false;
802e8e6d
PA
649 }
650}
651
9db9aa23
TBA
652int
653x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
654 int size, raw_breakpoint *bp)
aa5ca48f
DE
655{
656 struct process_info *proc = current_process ();
802e8e6d 657
aa5ca48f
DE
658 switch (type)
659 {
802e8e6d
PA
660 case raw_bkpt_type_hw:
661 case raw_bkpt_type_write_wp:
662 case raw_bkpt_type_access_wp:
a4165e94 663 {
802e8e6d
PA
664 enum target_hw_bp_type hw_type
665 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 666 struct x86_debug_reg_state *state
fe978cb0 667 = &proc->priv->arch_private->debug_reg_state;
a4165e94 668
df7e5265 669 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 670 }
961bd387 671
aa5ca48f
DE
672 default:
673 /* Unsupported. */
674 return 1;
675 }
676}
677
9db9aa23
TBA
678int
679x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
680 int size, raw_breakpoint *bp)
aa5ca48f
DE
681{
682 struct process_info *proc = current_process ();
802e8e6d 683
aa5ca48f
DE
684 switch (type)
685 {
802e8e6d
PA
686 case raw_bkpt_type_hw:
687 case raw_bkpt_type_write_wp:
688 case raw_bkpt_type_access_wp:
a4165e94 689 {
802e8e6d
PA
690 enum target_hw_bp_type hw_type
691 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 692 struct x86_debug_reg_state *state
fe978cb0 693 = &proc->priv->arch_private->debug_reg_state;
a4165e94 694
df7e5265 695 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 696 }
aa5ca48f
DE
697 default:
698 /* Unsupported. */
699 return 1;
700 }
701}
702
ac1bbaca
TBA
703bool
704x86_target::low_stopped_by_watchpoint ()
aa5ca48f
DE
705{
706 struct process_info *proc = current_process ();
fe978cb0 707 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
708}
709
ac1bbaca
TBA
710CORE_ADDR
711x86_target::low_stopped_data_address ()
aa5ca48f
DE
712{
713 struct process_info *proc = current_process ();
714 CORE_ADDR addr;
fe978cb0 715 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 716 &addr))
aa5ca48f
DE
717 return addr;
718 return 0;
719}
720\f
721/* Called when a new process is created. */
722
fd000fb3
TBA
723arch_process_info *
724x86_target::low_new_process ()
aa5ca48f 725{
ed859da7 726 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 727
df7e5265 728 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
729
730 return info;
731}
732
04ec7890
SM
733/* Called when a process is being deleted. */
734
fd000fb3
TBA
735void
736x86_target::low_delete_process (arch_process_info *info)
04ec7890
SM
737{
738 xfree (info);
739}
740
fd000fb3
TBA
741void
742x86_target::low_new_thread (lwp_info *lwp)
743{
744 /* This comes from nat/. */
745 x86_linux_new_thread (lwp);
746}
3a8a0396 747
fd000fb3
TBA
748void
749x86_target::low_delete_thread (arch_lwp_info *alwp)
750{
751 /* This comes from nat/. */
752 x86_linux_delete_thread (alwp);
753}
754
755/* Target routine for new_fork. */
756
757void
758x86_target::low_new_fork (process_info *parent, process_info *child)
3a8a0396
DB
759{
760 /* These are allocated by linux_add_process. */
761 gdb_assert (parent->priv != NULL
762 && parent->priv->arch_private != NULL);
763 gdb_assert (child->priv != NULL
764 && child->priv->arch_private != NULL);
765
766 /* Linux kernel before 2.6.33 commit
767 72f674d203cd230426437cdcf7dd6f681dad8b0d
768 will inherit hardware debug registers from parent
769 on fork/vfork/clone. Newer Linux kernels create such tasks with
770 zeroed debug registers.
771
772 GDB core assumes the child inherits the watchpoints/hw
773 breakpoints of the parent, and will remove them all from the
774 forked off process. Copy the debug registers mirrors into the
775 new process so that all breakpoints and watchpoints can be
776 removed together. The debug registers mirror will become zeroed
777 in the end before detaching the forked off process, thus making
778 this compatible with older Linux kernels too. */
779
780 *child->priv->arch_private = *parent->priv->arch_private;
781}
782
d7599cc0
TBA
783void
784x86_target::low_prepare_to_resume (lwp_info *lwp)
785{
786 /* This comes from nat/. */
787 x86_linux_prepare_to_resume (lwp);
788}
789
70a0bb6b
GB
790/* See nat/x86-dregs.h. */
791
792struct x86_debug_reg_state *
793x86_debug_reg_state (pid_t pid)
794{
795 struct process_info *proc = find_process_pid (pid);
796
797 return &proc->priv->arch_private->debug_reg_state;
798}
aa5ca48f 799\f
d0722149
DE
800/* When GDBSERVER is built as a 64-bit application on linux, the
801 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
802 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
803 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
804 conversion in-place ourselves. */
805
9cf12d57 806/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
807 layout of the inferiors' architecture. Returns true if any
808 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 809 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
810 INF. */
811
cb63de7c
TBA
812bool
813x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
814{
815#ifdef __x86_64__
760256f9 816 unsigned int machine;
0bfdf32f 817 int tid = lwpid_of (current_thread);
760256f9
PA
818 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
819
d0722149 820 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
4855cbdc 821 if (!is_64bit_tdesc (current_thread))
9cf12d57 822 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 823 FIXUP_32);
c92b5177 824 /* No fixup for native x32 GDB. */
760256f9 825 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 826 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 827 FIXUP_X32);
d0722149
DE
828#endif
829
cb63de7c 830 return false;
d0722149
DE
831}
832\f
1570b33e
L
833static int use_xml;
834
3aee8918
PA
835/* Format of XSAVE extended state is:
836 struct
837 {
838 fxsave_bytes[0..463]
839 sw_usable_bytes[464..511]
840 xstate_hdr_bytes[512..575]
841 avx_bytes[576..831]
842 future_state etc
843 };
844
845 Same memory layout will be used for the coredump NT_X86_XSTATE
846 representing the XSAVE extended state registers.
847
848 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
849 extended state mask, which is the same as the extended control register
850 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
851 together with the mask saved in the xstate_hdr_bytes to determine what
852 states the processor/OS supports and what state, used or initialized,
853 the process/thread is in. */
854#define I386_LINUX_XSAVE_XCR0_OFFSET 464
855
856/* Does the current host support the GETFPXREGS request? The header
857 file may or may not define it, and even if it is defined, the
858 kernel will return EIO if it's running on a pre-SSE processor. */
859int have_ptrace_getfpxregs =
860#ifdef HAVE_PTRACE_GETFPXREGS
861 -1
862#else
863 0
864#endif
865;
1570b33e 866
3aee8918
PA
867/* Get Linux/x86 target description from running target. */
868
869static const struct target_desc *
870x86_linux_read_description (void)
1570b33e 871{
3aee8918
PA
872 unsigned int machine;
873 int is_elf64;
a196ebeb 874 int xcr0_features;
3aee8918
PA
875 int tid;
876 static uint64_t xcr0;
03e6fe7e 877 static int xsave_len;
3a13a53b 878 struct regset_info *regset;
1570b33e 879
0bfdf32f 880 tid = lwpid_of (current_thread);
1570b33e 881
3aee8918 882 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 883
3aee8918 884 if (sizeof (void *) == 4)
3a13a53b 885 {
3aee8918
PA
886 if (is_elf64 > 0)
887 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
888#ifndef __x86_64__
889 else if (machine == EM_X86_64)
890 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
891#endif
892 }
3a13a53b 893
3aee8918
PA
894#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
895 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
896 {
897 elf_fpxregset_t fpxregs;
3a13a53b 898
3aee8918 899 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 900 {
3aee8918
PA
901 have_ptrace_getfpxregs = 0;
902 have_ptrace_getregset = 0;
f49ff000 903 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 904 }
3aee8918
PA
905 else
906 have_ptrace_getfpxregs = 1;
3a13a53b 907 }
1570b33e
L
908#endif
909
910 if (!use_xml)
911 {
912 /* Don't use XML. */
913#ifdef __x86_64__
3aee8918 914 if (machine == EM_X86_64)
51a948fd 915 return tdesc_amd64_linux_no_xml.get ();
1570b33e 916 else
1570b33e 917#endif
51a948fd 918 return tdesc_i386_linux_no_xml.get ();
1570b33e
L
919 }
920
1570b33e
L
921 if (have_ptrace_getregset == -1)
922 {
df7e5265 923 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 924 struct iovec iov;
1570b33e
L
925
926 iov.iov_base = xstateregs;
927 iov.iov_len = sizeof (xstateregs);
928
929 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
930 if (ptrace (PTRACE_GETREGSET, tid,
931 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
932 have_ptrace_getregset = 0;
933 else
1570b33e 934 {
3aee8918
PA
935 have_ptrace_getregset = 1;
936
937 /* Get XCR0 from XSAVE extended state. */
938 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
939 / sizeof (uint64_t))];
940
03e6fe7e
JB
941 xsave_len = x86_xsave_length ();
942
3aee8918
PA
943 /* Use PTRACE_GETREGSET if it is available. */
944 for (regset = x86_regsets;
945 regset->fill_function != NULL; regset++)
946 if (regset->get_request == PTRACE_GETREGSET)
03e6fe7e 947 regset->size = xsave_len;
3aee8918
PA
948 else if (regset->type != GENERAL_REGS)
949 regset->size = 0;
1570b33e 950 }
1570b33e
L
951 }
952
3aee8918 953 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 954 xcr0_features = (have_ptrace_getregset
2e1e43e1 955 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 956
a196ebeb 957 if (xcr0_features)
03e6fe7e 958 i387_set_xsave_mask (xcr0, xsave_len);
1570b33e 959
3aee8918
PA
960 if (machine == EM_X86_64)
961 {
1570b33e 962#ifdef __x86_64__
b4570e4b 963 const target_desc *tdesc = NULL;
a196ebeb 964
b4570e4b 965 if (xcr0_features)
3aee8918 966 {
b4570e4b
YQ
967 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
968 !is_elf64);
1570b33e 969 }
b4570e4b
YQ
970
971 if (tdesc == NULL)
972 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
973 return tdesc;
3aee8918 974#endif
1570b33e 975 }
3aee8918
PA
976 else
977 {
f49ff000 978 const target_desc *tdesc = NULL;
a1fa17ee 979
f49ff000
YQ
980 if (xcr0_features)
981 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 982
f49ff000
YQ
983 if (tdesc == NULL)
984 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 985
f49ff000 986 return tdesc;
3aee8918
PA
987 }
988
989 gdb_assert_not_reached ("failed to return tdesc");
990}
991
3aee8918
PA
992/* Update all the target description of all processes; a new GDB
993 connected, and it may or not support xml target descriptions. */
994
797bcff5
TBA
995void
996x86_target::update_xmltarget ()
3aee8918 997{
24583e45 998 scoped_restore_current_thread restore_thread;
3aee8918
PA
999
1000 /* Before changing the register cache's internal layout, flush the
1001 contents of the current valid caches back to the threads, and
1002 release the current regcache objects. */
1003 regcache_release ();
1004
797bcff5 1005 for_each_process ([this] (process_info *proc) {
9179355e
SM
1006 int pid = proc->pid;
1007
1008 /* Look up any thread of this process. */
24583e45 1009 switch_to_thread (find_any_thread_of_pid (pid));
9179355e 1010
797bcff5 1011 low_arch_setup ();
9179355e 1012 });
1570b33e
L
1013}
1014
1015/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1016 PTRACE_GETREGSET. */
1017
a5b5da92 1018void
b315b67d 1019x86_target::process_qsupported (gdb::array_view<const char * const> features)
1570b33e
L
1020{
1021 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1022 with "i386" in qSupported query, it supports x86 XML target
1023 descriptions. */
1024 use_xml = 0;
1570b33e 1025
b315b67d
SM
1026 for (const char *feature : features)
1027 {
06e03fff 1028 if (startswith (feature, "xmlRegisters="))
1570b33e 1029 {
06e03fff 1030 char *copy = xstrdup (feature + 13);
06e03fff 1031
ca3a04f6
CB
1032 char *saveptr;
1033 for (char *p = strtok_r (copy, ",", &saveptr);
1034 p != NULL;
1035 p = strtok_r (NULL, ",", &saveptr))
1570b33e 1036 {
06e03fff
PA
1037 if (strcmp (p, "i386") == 0)
1038 {
1039 use_xml = 1;
1040 break;
1041 }
1570b33e 1042 }
1570b33e 1043
06e03fff
PA
1044 free (copy);
1045 }
1570b33e 1046 }
b315b67d 1047
a5b5da92 1048 update_xmltarget ();
1570b33e
L
1049}
1050
3aee8918 1051/* Common for x86/x86-64. */
d0722149 1052
3aee8918
PA
1053static struct regsets_info x86_regsets_info =
1054 {
1055 x86_regsets, /* regsets */
1056 0, /* num_regsets */
1057 NULL, /* disabled_regsets */
1058 };
214d508e
L
1059
1060#ifdef __x86_64__
3aee8918
PA
1061static struct regs_info amd64_linux_regs_info =
1062 {
1063 NULL, /* regset_bitmap */
1064 NULL, /* usrregs_info */
1065 &x86_regsets_info
1066 };
d0722149 1067#endif
3aee8918
PA
1068static struct usrregs_info i386_linux_usrregs_info =
1069 {
1070 I386_NUM_REGS,
1071 i386_regmap,
1072 };
d0722149 1073
3aee8918
PA
1074static struct regs_info i386_linux_regs_info =
1075 {
1076 NULL, /* regset_bitmap */
1077 &i386_linux_usrregs_info,
1078 &x86_regsets_info
1079 };
d0722149 1080
aa8d21c9
TBA
1081const regs_info *
1082x86_target::get_regs_info ()
3aee8918
PA
1083{
1084#ifdef __x86_64__
4855cbdc 1085 if (is_64bit_tdesc (current_thread))
3aee8918
PA
1086 return &amd64_linux_regs_info;
1087 else
1088#endif
1089 return &i386_linux_regs_info;
1090}
d0722149 1091
3aee8918
PA
1092/* Initialize the target description for the architecture of the
1093 inferior. */
1570b33e 1094
797bcff5
TBA
1095void
1096x86_target::low_arch_setup ()
3aee8918
PA
1097{
1098 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1099}
1100
9eedd27d
TBA
1101bool
1102x86_target::low_supports_catch_syscall ()
1103{
1104 return true;
1105}
1106
82075af2
JS
1107/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1108 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1109
9eedd27d
TBA
1110void
1111x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
82075af2
JS
1112{
1113 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1114
1115 if (use_64bit)
1116 {
1117 long l_sysno;
82075af2
JS
1118
1119 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1120 *sysno = (int) l_sysno;
82075af2
JS
1121 }
1122 else
4cc32bec 1123 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1124}
1125
47f70aa7
TBA
1126bool
1127x86_target::supports_tracepoints ()
219f2f23 1128{
47f70aa7 1129 return true;
219f2f23
PA
1130}
1131
fa593d66
PA
1132static void
1133append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1134{
4196ab2a 1135 target_write_memory (*to, buf, len);
fa593d66
PA
1136 *to += len;
1137}
1138
1139static int
a121b7c1 1140push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1141{
1142 unsigned char *buf_org = buf;
1143
1144 while (1)
1145 {
1146 char *endptr;
1147 unsigned long ul = strtoul (op, &endptr, 16);
1148
1149 if (endptr == op)
1150 break;
1151
1152 *buf++ = ul;
1153 op = endptr;
1154 }
1155
1156 return buf - buf_org;
1157}
1158
1159#ifdef __x86_64__
1160
1161/* Build a jump pad that saves registers and calls a collection
1162 function. Writes a jump instruction to the jump pad to
1163 JJUMPAD_INSN. The caller is responsible to write it in at the
1164 tracepoint address. */
1165
1166static int
1167amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1168 CORE_ADDR collector,
1169 CORE_ADDR lockaddr,
1170 ULONGEST orig_size,
1171 CORE_ADDR *jump_entry,
405f8e94
SS
1172 CORE_ADDR *trampoline,
1173 ULONGEST *trampoline_size,
fa593d66
PA
1174 unsigned char *jjump_pad_insn,
1175 ULONGEST *jjump_pad_insn_size,
1176 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1177 CORE_ADDR *adjusted_insn_addr_end,
1178 char *err)
fa593d66
PA
1179{
1180 unsigned char buf[40];
1181 int i, offset;
f4647387
YQ
1182 int64_t loffset;
1183
fa593d66
PA
1184 CORE_ADDR buildaddr = *jump_entry;
1185
1186 /* Build the jump pad. */
1187
1188 /* First, do tracepoint data collection. Save registers. */
1189 i = 0;
1190 /* Need to ensure stack pointer saved first. */
1191 buf[i++] = 0x54; /* push %rsp */
1192 buf[i++] = 0x55; /* push %rbp */
1193 buf[i++] = 0x57; /* push %rdi */
1194 buf[i++] = 0x56; /* push %rsi */
1195 buf[i++] = 0x52; /* push %rdx */
1196 buf[i++] = 0x51; /* push %rcx */
1197 buf[i++] = 0x53; /* push %rbx */
1198 buf[i++] = 0x50; /* push %rax */
1199 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1200 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1201 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1202 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1203 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1204 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1205 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1206 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1207 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1208 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1209 buf[i++] = 0xbf;
c8ef42ee
PA
1210 memcpy (buf + i, &tpaddr, 8);
1211 i += 8;
fa593d66
PA
1212 buf[i++] = 0x57; /* push %rdi */
1213 append_insns (&buildaddr, i, buf);
1214
1215 /* Stack space for the collecting_t object. */
1216 i = 0;
1217 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1218 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1219 memcpy (buf + i, &tpoint, 8);
1220 i += 8;
1221 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1222 i += push_opcode (&buf[i],
1223 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1224 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1225 append_insns (&buildaddr, i, buf);
1226
1227 /* spin-lock. */
1228 i = 0;
1229 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1230 memcpy (&buf[i], (void *) &lockaddr, 8);
1231 i += 8;
1232 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1233 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1234 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1235 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1236 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1237 append_insns (&buildaddr, i, buf);
1238
1239 /* Set up the gdb_collect call. */
1240 /* At this point, (stack pointer + 0x18) is the base of our saved
1241 register block. */
1242
1243 i = 0;
1244 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1245 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1246
1247 /* tpoint address may be 64-bit wide. */
1248 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1249 memcpy (buf + i, &tpoint, 8);
1250 i += 8;
1251 append_insns (&buildaddr, i, buf);
1252
1253 /* The collector function being in the shared library, may be
1254 >31-bits away off the jump pad. */
1255 i = 0;
1256 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1257 memcpy (buf + i, &collector, 8);
1258 i += 8;
1259 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1260 append_insns (&buildaddr, i, buf);
1261
1262 /* Clear the spin-lock. */
1263 i = 0;
1264 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1265 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1266 memcpy (buf + i, &lockaddr, 8);
1267 i += 8;
1268 append_insns (&buildaddr, i, buf);
1269
1270 /* Remove stack that had been used for the collect_t object. */
1271 i = 0;
1272 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1273 append_insns (&buildaddr, i, buf);
1274
1275 /* Restore register state. */
1276 i = 0;
1277 buf[i++] = 0x48; /* add $0x8,%rsp */
1278 buf[i++] = 0x83;
1279 buf[i++] = 0xc4;
1280 buf[i++] = 0x08;
1281 buf[i++] = 0x9d; /* popfq */
1282 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1283 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1284 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1285 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1286 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1287 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1288 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1289 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1290 buf[i++] = 0x58; /* pop %rax */
1291 buf[i++] = 0x5b; /* pop %rbx */
1292 buf[i++] = 0x59; /* pop %rcx */
1293 buf[i++] = 0x5a; /* pop %rdx */
1294 buf[i++] = 0x5e; /* pop %rsi */
1295 buf[i++] = 0x5f; /* pop %rdi */
1296 buf[i++] = 0x5d; /* pop %rbp */
1297 buf[i++] = 0x5c; /* pop %rsp */
1298 append_insns (&buildaddr, i, buf);
1299
1300 /* Now, adjust the original instruction to execute in the jump
1301 pad. */
1302 *adjusted_insn_addr = buildaddr;
1303 relocate_instruction (&buildaddr, tpaddr);
1304 *adjusted_insn_addr_end = buildaddr;
1305
1306 /* Finally, write a jump back to the program. */
f4647387
YQ
1307
1308 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1309 if (loffset > INT_MAX || loffset < INT_MIN)
1310 {
1311 sprintf (err,
1312 "E.Jump back from jump pad too far from tracepoint "
1313 "(offset 0x%" PRIx64 " > int32).", loffset);
1314 return 1;
1315 }
1316
1317 offset = (int) loffset;
fa593d66
PA
1318 memcpy (buf, jump_insn, sizeof (jump_insn));
1319 memcpy (buf + 1, &offset, 4);
1320 append_insns (&buildaddr, sizeof (jump_insn), buf);
1321
1322 /* The jump pad is now built. Wire in a jump to our jump pad. This
1323 is always done last (by our caller actually), so that we can
1324 install fast tracepoints with threads running. This relies on
1325 the agent's atomic write support. */
f4647387
YQ
1326 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1327 if (loffset > INT_MAX || loffset < INT_MIN)
1328 {
1329 sprintf (err,
1330 "E.Jump pad too far from tracepoint "
1331 "(offset 0x%" PRIx64 " > int32).", loffset);
1332 return 1;
1333 }
1334
1335 offset = (int) loffset;
1336
fa593d66
PA
1337 memcpy (buf, jump_insn, sizeof (jump_insn));
1338 memcpy (buf + 1, &offset, 4);
1339 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1340 *jjump_pad_insn_size = sizeof (jump_insn);
1341
1342 /* Return the end address of our pad. */
1343 *jump_entry = buildaddr;
1344
1345 return 0;
1346}
1347
1348#endif /* __x86_64__ */
1349
1350/* Build a jump pad that saves registers and calls a collection
1351 function. Writes a jump instruction to the jump pad to
1352 JJUMPAD_INSN. The caller is responsible to write it in at the
1353 tracepoint address. */
1354
1355static int
1356i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1357 CORE_ADDR collector,
1358 CORE_ADDR lockaddr,
1359 ULONGEST orig_size,
1360 CORE_ADDR *jump_entry,
405f8e94
SS
1361 CORE_ADDR *trampoline,
1362 ULONGEST *trampoline_size,
fa593d66
PA
1363 unsigned char *jjump_pad_insn,
1364 ULONGEST *jjump_pad_insn_size,
1365 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1366 CORE_ADDR *adjusted_insn_addr_end,
1367 char *err)
fa593d66
PA
1368{
1369 unsigned char buf[0x100];
1370 int i, offset;
1371 CORE_ADDR buildaddr = *jump_entry;
1372
1373 /* Build the jump pad. */
1374
1375 /* First, do tracepoint data collection. Save registers. */
1376 i = 0;
1377 buf[i++] = 0x60; /* pushad */
1378 buf[i++] = 0x68; /* push tpaddr aka $pc */
1379 *((int *)(buf + i)) = (int) tpaddr;
1380 i += 4;
1381 buf[i++] = 0x9c; /* pushf */
1382 buf[i++] = 0x1e; /* push %ds */
1383 buf[i++] = 0x06; /* push %es */
1384 buf[i++] = 0x0f; /* push %fs */
1385 buf[i++] = 0xa0;
1386 buf[i++] = 0x0f; /* push %gs */
1387 buf[i++] = 0xa8;
1388 buf[i++] = 0x16; /* push %ss */
1389 buf[i++] = 0x0e; /* push %cs */
1390 append_insns (&buildaddr, i, buf);
1391
1392 /* Stack space for the collecting_t object. */
1393 i = 0;
1394 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1395
1396 /* Build the object. */
1397 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1398 memcpy (buf + i, &tpoint, 4);
1399 i += 4;
1400 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1401
1402 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1403 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1404 append_insns (&buildaddr, i, buf);
1405
1406 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1407 If we cared for it, this could be using xchg alternatively. */
1408
1409 i = 0;
1410 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1411 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1412 %esp,<lockaddr> */
1413 memcpy (&buf[i], (void *) &lockaddr, 4);
1414 i += 4;
1415 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1416 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1417 append_insns (&buildaddr, i, buf);
1418
1419
1420 /* Set up arguments to the gdb_collect call. */
1421 i = 0;
1422 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1423 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1424 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1425 append_insns (&buildaddr, i, buf);
1426
1427 i = 0;
1428 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1429 append_insns (&buildaddr, i, buf);
1430
1431 i = 0;
1432 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1433 memcpy (&buf[i], (void *) &tpoint, 4);
1434 i += 4;
1435 append_insns (&buildaddr, i, buf);
1436
1437 buf[0] = 0xe8; /* call <reladdr> */
1438 offset = collector - (buildaddr + sizeof (jump_insn));
1439 memcpy (buf + 1, &offset, 4);
1440 append_insns (&buildaddr, 5, buf);
1441 /* Clean up after the call. */
1442 buf[0] = 0x83; /* add $0x8,%esp */
1443 buf[1] = 0xc4;
1444 buf[2] = 0x08;
1445 append_insns (&buildaddr, 3, buf);
1446
1447
1448 /* Clear the spin-lock. This would need the LOCK prefix on older
1449 broken archs. */
1450 i = 0;
1451 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1452 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1453 memcpy (buf + i, &lockaddr, 4);
1454 i += 4;
1455 append_insns (&buildaddr, i, buf);
1456
1457
1458 /* Remove stack that had been used for the collect_t object. */
1459 i = 0;
1460 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1461 append_insns (&buildaddr, i, buf);
1462
1463 i = 0;
1464 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1465 buf[i++] = 0xc4;
1466 buf[i++] = 0x04;
1467 buf[i++] = 0x17; /* pop %ss */
1468 buf[i++] = 0x0f; /* pop %gs */
1469 buf[i++] = 0xa9;
1470 buf[i++] = 0x0f; /* pop %fs */
1471 buf[i++] = 0xa1;
1472 buf[i++] = 0x07; /* pop %es */
405f8e94 1473 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1474 buf[i++] = 0x9d; /* popf */
1475 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1476 buf[i++] = 0xc4;
1477 buf[i++] = 0x04;
1478 buf[i++] = 0x61; /* popad */
1479 append_insns (&buildaddr, i, buf);
1480
1481 /* Now, adjust the original instruction to execute in the jump
1482 pad. */
1483 *adjusted_insn_addr = buildaddr;
1484 relocate_instruction (&buildaddr, tpaddr);
1485 *adjusted_insn_addr_end = buildaddr;
1486
1487 /* Write the jump back to the program. */
1488 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1489 memcpy (buf, jump_insn, sizeof (jump_insn));
1490 memcpy (buf + 1, &offset, 4);
1491 append_insns (&buildaddr, sizeof (jump_insn), buf);
1492
1493 /* The jump pad is now built. Wire in a jump to our jump pad. This
1494 is always done last (by our caller actually), so that we can
1495 install fast tracepoints with threads running. This relies on
1496 the agent's atomic write support. */
405f8e94
SS
1497 if (orig_size == 4)
1498 {
1499 /* Create a trampoline. */
1500 *trampoline_size = sizeof (jump_insn);
1501 if (!claim_trampoline_space (*trampoline_size, trampoline))
1502 {
1503 /* No trampoline space available. */
1504 strcpy (err,
1505 "E.Cannot allocate trampoline space needed for fast "
1506 "tracepoints on 4-byte instructions.");
1507 return 1;
1508 }
1509
1510 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1511 memcpy (buf, jump_insn, sizeof (jump_insn));
1512 memcpy (buf + 1, &offset, 4);
4196ab2a 1513 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1514
1515 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1516 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1517 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1518 memcpy (buf + 2, &offset, 2);
1519 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1520 *jjump_pad_insn_size = sizeof (small_jump_insn);
1521 }
1522 else
1523 {
1524 /* Else use a 32-bit relative jump instruction. */
1525 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1526 memcpy (buf, jump_insn, sizeof (jump_insn));
1527 memcpy (buf + 1, &offset, 4);
1528 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1529 *jjump_pad_insn_size = sizeof (jump_insn);
1530 }
fa593d66
PA
1531
1532 /* Return the end address of our pad. */
1533 *jump_entry = buildaddr;
1534
1535 return 0;
1536}
1537
809a0c35
TBA
1538bool
1539x86_target::supports_fast_tracepoints ()
1540{
1541 return true;
1542}
1543
1544int
1545x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1546 CORE_ADDR tpaddr,
1547 CORE_ADDR collector,
1548 CORE_ADDR lockaddr,
1549 ULONGEST orig_size,
1550 CORE_ADDR *jump_entry,
1551 CORE_ADDR *trampoline,
1552 ULONGEST *trampoline_size,
1553 unsigned char *jjump_pad_insn,
1554 ULONGEST *jjump_pad_insn_size,
1555 CORE_ADDR *adjusted_insn_addr,
1556 CORE_ADDR *adjusted_insn_addr_end,
1557 char *err)
fa593d66
PA
1558{
1559#ifdef __x86_64__
4855cbdc 1560 if (is_64bit_tdesc (current_thread))
fa593d66
PA
1561 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1562 collector, lockaddr,
1563 orig_size, jump_entry,
405f8e94 1564 trampoline, trampoline_size,
fa593d66
PA
1565 jjump_pad_insn,
1566 jjump_pad_insn_size,
1567 adjusted_insn_addr,
405f8e94
SS
1568 adjusted_insn_addr_end,
1569 err);
fa593d66
PA
1570#endif
1571
1572 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1573 collector, lockaddr,
1574 orig_size, jump_entry,
405f8e94 1575 trampoline, trampoline_size,
fa593d66
PA
1576 jjump_pad_insn,
1577 jjump_pad_insn_size,
1578 adjusted_insn_addr,
405f8e94
SS
1579 adjusted_insn_addr_end,
1580 err);
1581}
1582
1583/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1584 architectures. */
1585
809a0c35
TBA
1586int
1587x86_target::get_min_fast_tracepoint_insn_len ()
405f8e94
SS
1588{
1589 static int warned_about_fast_tracepoints = 0;
1590
1591#ifdef __x86_64__
1592 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1593 used for fast tracepoints. */
4855cbdc 1594 if (is_64bit_tdesc (current_thread))
405f8e94
SS
1595 return 5;
1596#endif
1597
58b4daa5 1598 if (agent_loaded_p ())
405f8e94
SS
1599 {
1600 char errbuf[IPA_BUFSIZ];
1601
1602 errbuf[0] = '\0';
1603
1604 /* On x86, if trampolines are available, then 4-byte jump instructions
1605 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1606 with a 4-byte offset are used instead. */
1607 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1608 return 4;
1609 else
1610 {
1611 /* GDB has no channel to explain to user why a shorter fast
1612 tracepoint is not possible, but at least make GDBserver
1613 mention that something has gone awry. */
1614 if (!warned_about_fast_tracepoints)
1615 {
422186a9 1616 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1617 warned_about_fast_tracepoints = 1;
1618 }
1619 return 5;
1620 }
1621 }
1622 else
1623 {
1624 /* Indicate that the minimum length is currently unknown since the IPA
1625 has not loaded yet. */
1626 return 0;
1627 }
fa593d66
PA
1628}
1629
6a271cae
PA
1630static void
1631add_insns (unsigned char *start, int len)
1632{
1633 CORE_ADDR buildaddr = current_insn_ptr;
1634
c058728c
SM
1635 threads_debug_printf ("Adding %d bytes of insn at %s",
1636 len, paddress (buildaddr));
6a271cae
PA
1637
1638 append_insns (&buildaddr, len, start);
1639 current_insn_ptr = buildaddr;
1640}
1641
6a271cae
PA
1642/* Our general strategy for emitting code is to avoid specifying raw
1643 bytes whenever possible, and instead copy a block of inline asm
1644 that is embedded in the function. This is a little messy, because
1645 we need to keep the compiler from discarding what looks like dead
1646 code, plus suppress various warnings. */
1647
9e4344e5
PA
1648#define EMIT_ASM(NAME, INSNS) \
1649 do \
1650 { \
1651 extern unsigned char start_ ## NAME, end_ ## NAME; \
1652 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1653 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1654 "\t" "start_" #NAME ":" \
1655 "\t" INSNS "\n" \
1656 "\t" "end_" #NAME ":"); \
1657 } while (0)
6a271cae
PA
1658
1659#ifdef __x86_64__
1660
1661#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1662 do \
1663 { \
1664 extern unsigned char start_ ## NAME, end_ ## NAME; \
1665 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1666 __asm__ (".code32\n" \
1667 "\t" "jmp end_" #NAME "\n" \
1668 "\t" "start_" #NAME ":\n" \
1669 "\t" INSNS "\n" \
1670 "\t" "end_" #NAME ":\n" \
1671 ".code64\n"); \
1672 } while (0)
6a271cae
PA
1673
1674#else
1675
1676#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1677
1678#endif
1679
1680#ifdef __x86_64__
1681
1682static void
1683amd64_emit_prologue (void)
1684{
1685 EMIT_ASM (amd64_prologue,
1686 "pushq %rbp\n\t"
1687 "movq %rsp,%rbp\n\t"
1688 "sub $0x20,%rsp\n\t"
1689 "movq %rdi,-8(%rbp)\n\t"
1690 "movq %rsi,-16(%rbp)");
1691}
1692
1693
1694static void
1695amd64_emit_epilogue (void)
1696{
1697 EMIT_ASM (amd64_epilogue,
1698 "movq -16(%rbp),%rdi\n\t"
1699 "movq %rax,(%rdi)\n\t"
1700 "xor %rax,%rax\n\t"
1701 "leave\n\t"
1702 "ret");
1703}
1704
1705static void
1706amd64_emit_add (void)
1707{
1708 EMIT_ASM (amd64_add,
1709 "add (%rsp),%rax\n\t"
1710 "lea 0x8(%rsp),%rsp");
1711}
1712
1713static void
1714amd64_emit_sub (void)
1715{
1716 EMIT_ASM (amd64_sub,
1717 "sub %rax,(%rsp)\n\t"
1718 "pop %rax");
1719}
1720
1721static void
1722amd64_emit_mul (void)
1723{
1724 emit_error = 1;
1725}
1726
1727static void
1728amd64_emit_lsh (void)
1729{
1730 emit_error = 1;
1731}
1732
1733static void
1734amd64_emit_rsh_signed (void)
1735{
1736 emit_error = 1;
1737}
1738
1739static void
1740amd64_emit_rsh_unsigned (void)
1741{
1742 emit_error = 1;
1743}
1744
1745static void
1746amd64_emit_ext (int arg)
1747{
1748 switch (arg)
1749 {
1750 case 8:
1751 EMIT_ASM (amd64_ext_8,
1752 "cbtw\n\t"
1753 "cwtl\n\t"
1754 "cltq");
1755 break;
1756 case 16:
1757 EMIT_ASM (amd64_ext_16,
1758 "cwtl\n\t"
1759 "cltq");
1760 break;
1761 case 32:
1762 EMIT_ASM (amd64_ext_32,
1763 "cltq");
1764 break;
1765 default:
1766 emit_error = 1;
1767 }
1768}
1769
1770static void
1771amd64_emit_log_not (void)
1772{
1773 EMIT_ASM (amd64_log_not,
1774 "test %rax,%rax\n\t"
1775 "sete %cl\n\t"
1776 "movzbq %cl,%rax");
1777}
1778
1779static void
1780amd64_emit_bit_and (void)
1781{
1782 EMIT_ASM (amd64_and,
1783 "and (%rsp),%rax\n\t"
1784 "lea 0x8(%rsp),%rsp");
1785}
1786
1787static void
1788amd64_emit_bit_or (void)
1789{
1790 EMIT_ASM (amd64_or,
1791 "or (%rsp),%rax\n\t"
1792 "lea 0x8(%rsp),%rsp");
1793}
1794
1795static void
1796amd64_emit_bit_xor (void)
1797{
1798 EMIT_ASM (amd64_xor,
1799 "xor (%rsp),%rax\n\t"
1800 "lea 0x8(%rsp),%rsp");
1801}
1802
1803static void
1804amd64_emit_bit_not (void)
1805{
1806 EMIT_ASM (amd64_bit_not,
1807 "xorq $0xffffffffffffffff,%rax");
1808}
1809
1810static void
1811amd64_emit_equal (void)
1812{
1813 EMIT_ASM (amd64_equal,
1814 "cmp %rax,(%rsp)\n\t"
1815 "je .Lamd64_equal_true\n\t"
1816 "xor %rax,%rax\n\t"
1817 "jmp .Lamd64_equal_end\n\t"
1818 ".Lamd64_equal_true:\n\t"
1819 "mov $0x1,%rax\n\t"
1820 ".Lamd64_equal_end:\n\t"
1821 "lea 0x8(%rsp),%rsp");
1822}
1823
1824static void
1825amd64_emit_less_signed (void)
1826{
1827 EMIT_ASM (amd64_less_signed,
1828 "cmp %rax,(%rsp)\n\t"
1829 "jl .Lamd64_less_signed_true\n\t"
1830 "xor %rax,%rax\n\t"
1831 "jmp .Lamd64_less_signed_end\n\t"
1832 ".Lamd64_less_signed_true:\n\t"
1833 "mov $1,%rax\n\t"
1834 ".Lamd64_less_signed_end:\n\t"
1835 "lea 0x8(%rsp),%rsp");
1836}
1837
1838static void
1839amd64_emit_less_unsigned (void)
1840{
1841 EMIT_ASM (amd64_less_unsigned,
1842 "cmp %rax,(%rsp)\n\t"
1843 "jb .Lamd64_less_unsigned_true\n\t"
1844 "xor %rax,%rax\n\t"
1845 "jmp .Lamd64_less_unsigned_end\n\t"
1846 ".Lamd64_less_unsigned_true:\n\t"
1847 "mov $1,%rax\n\t"
1848 ".Lamd64_less_unsigned_end:\n\t"
1849 "lea 0x8(%rsp),%rsp");
1850}
1851
1852static void
1853amd64_emit_ref (int size)
1854{
1855 switch (size)
1856 {
1857 case 1:
1858 EMIT_ASM (amd64_ref1,
1859 "movb (%rax),%al");
1860 break;
1861 case 2:
1862 EMIT_ASM (amd64_ref2,
1863 "movw (%rax),%ax");
1864 break;
1865 case 4:
1866 EMIT_ASM (amd64_ref4,
1867 "movl (%rax),%eax");
1868 break;
1869 case 8:
1870 EMIT_ASM (amd64_ref8,
1871 "movq (%rax),%rax");
1872 break;
1873 }
1874}
1875
1876static void
1877amd64_emit_if_goto (int *offset_p, int *size_p)
1878{
1879 EMIT_ASM (amd64_if_goto,
1880 "mov %rax,%rcx\n\t"
1881 "pop %rax\n\t"
1882 "cmp $0,%rcx\n\t"
1883 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1884 if (offset_p)
1885 *offset_p = 10;
1886 if (size_p)
1887 *size_p = 4;
1888}
1889
1890static void
1891amd64_emit_goto (int *offset_p, int *size_p)
1892{
1893 EMIT_ASM (amd64_goto,
1894 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1895 if (offset_p)
1896 *offset_p = 1;
1897 if (size_p)
1898 *size_p = 4;
1899}
1900
1901static void
1902amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1903{
1904 int diff = (to - (from + size));
1905 unsigned char buf[sizeof (int)];
1906
1907 if (size != 4)
1908 {
1909 emit_error = 1;
1910 return;
1911 }
1912
1913 memcpy (buf, &diff, sizeof (int));
4196ab2a 1914 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1915}
1916
1917static void
4e29fb54 1918amd64_emit_const (LONGEST num)
6a271cae
PA
1919{
1920 unsigned char buf[16];
1921 int i;
1922 CORE_ADDR buildaddr = current_insn_ptr;
1923
1924 i = 0;
1925 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1926 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1927 i += 8;
1928 append_insns (&buildaddr, i, buf);
1929 current_insn_ptr = buildaddr;
1930}
1931
1932static void
1933amd64_emit_call (CORE_ADDR fn)
1934{
1935 unsigned char buf[16];
1936 int i;
1937 CORE_ADDR buildaddr;
4e29fb54 1938 LONGEST offset64;
6a271cae
PA
1939
1940 /* The destination function being in the shared library, may be
1941 >31-bits away off the compiled code pad. */
1942
1943 buildaddr = current_insn_ptr;
1944
1945 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1946
1947 i = 0;
1948
1949 if (offset64 > INT_MAX || offset64 < INT_MIN)
1950 {
1951 /* Offset is too large for a call. Use callq, but that requires
1952 a register, so avoid it if possible. Use r10, since it is
1953 call-clobbered, we don't have to push/pop it. */
1954 buf[i++] = 0x48; /* mov $fn,%r10 */
1955 buf[i++] = 0xba;
1956 memcpy (buf + i, &fn, 8);
1957 i += 8;
1958 buf[i++] = 0xff; /* callq *%r10 */
1959 buf[i++] = 0xd2;
1960 }
1961 else
1962 {
1963 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1964
1965 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1966 memcpy (buf + i, &offset32, 4);
1967 i += 4;
1968 }
1969
1970 append_insns (&buildaddr, i, buf);
1971 current_insn_ptr = buildaddr;
1972}
1973
1974static void
1975amd64_emit_reg (int reg)
1976{
1977 unsigned char buf[16];
1978 int i;
1979 CORE_ADDR buildaddr;
1980
1981 /* Assume raw_regs is still in %rdi. */
1982 buildaddr = current_insn_ptr;
1983 i = 0;
1984 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1985 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1986 i += 4;
1987 append_insns (&buildaddr, i, buf);
1988 current_insn_ptr = buildaddr;
1989 amd64_emit_call (get_raw_reg_func_addr ());
1990}
1991
1992static void
1993amd64_emit_pop (void)
1994{
1995 EMIT_ASM (amd64_pop,
1996 "pop %rax");
1997}
1998
1999static void
2000amd64_emit_stack_flush (void)
2001{
2002 EMIT_ASM (amd64_stack_flush,
2003 "push %rax");
2004}
2005
2006static void
2007amd64_emit_zero_ext (int arg)
2008{
2009 switch (arg)
2010 {
2011 case 8:
2012 EMIT_ASM (amd64_zero_ext_8,
2013 "and $0xff,%rax");
2014 break;
2015 case 16:
2016 EMIT_ASM (amd64_zero_ext_16,
2017 "and $0xffff,%rax");
2018 break;
2019 case 32:
2020 EMIT_ASM (amd64_zero_ext_32,
2021 "mov $0xffffffff,%rcx\n\t"
2022 "and %rcx,%rax");
2023 break;
2024 default:
2025 emit_error = 1;
2026 }
2027}
2028
2029static void
2030amd64_emit_swap (void)
2031{
2032 EMIT_ASM (amd64_swap,
2033 "mov %rax,%rcx\n\t"
2034 "pop %rax\n\t"
2035 "push %rcx");
2036}
2037
2038static void
2039amd64_emit_stack_adjust (int n)
2040{
2041 unsigned char buf[16];
2042 int i;
2043 CORE_ADDR buildaddr = current_insn_ptr;
2044
2045 i = 0;
2046 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2047 buf[i++] = 0x8d;
2048 buf[i++] = 0x64;
2049 buf[i++] = 0x24;
2050 /* This only handles adjustments up to 16, but we don't expect any more. */
2051 buf[i++] = n * 8;
2052 append_insns (&buildaddr, i, buf);
2053 current_insn_ptr = buildaddr;
2054}
2055
2056/* FN's prototype is `LONGEST(*fn)(int)'. */
2057
2058static void
2059amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2060{
2061 unsigned char buf[16];
2062 int i;
2063 CORE_ADDR buildaddr;
2064
2065 buildaddr = current_insn_ptr;
2066 i = 0;
2067 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2068 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2069 i += 4;
2070 append_insns (&buildaddr, i, buf);
2071 current_insn_ptr = buildaddr;
2072 amd64_emit_call (fn);
2073}
2074
4e29fb54 2075/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2076
2077static void
2078amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2079{
2080 unsigned char buf[16];
2081 int i;
2082 CORE_ADDR buildaddr;
2083
2084 buildaddr = current_insn_ptr;
2085 i = 0;
2086 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2087 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2088 i += 4;
2089 append_insns (&buildaddr, i, buf);
2090 current_insn_ptr = buildaddr;
2091 EMIT_ASM (amd64_void_call_2_a,
2092 /* Save away a copy of the stack top. */
2093 "push %rax\n\t"
2094 /* Also pass top as the second argument. */
2095 "mov %rax,%rsi");
2096 amd64_emit_call (fn);
2097 EMIT_ASM (amd64_void_call_2_b,
2098 /* Restore the stack top, %rax may have been trashed. */
2099 "pop %rax");
2100}
2101
df4a0200 2102static void
6b9801d4
SS
2103amd64_emit_eq_goto (int *offset_p, int *size_p)
2104{
2105 EMIT_ASM (amd64_eq,
2106 "cmp %rax,(%rsp)\n\t"
2107 "jne .Lamd64_eq_fallthru\n\t"
2108 "lea 0x8(%rsp),%rsp\n\t"
2109 "pop %rax\n\t"
2110 /* jmp, but don't trust the assembler to choose the right jump */
2111 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2112 ".Lamd64_eq_fallthru:\n\t"
2113 "lea 0x8(%rsp),%rsp\n\t"
2114 "pop %rax");
2115
2116 if (offset_p)
2117 *offset_p = 13;
2118 if (size_p)
2119 *size_p = 4;
2120}
2121
df4a0200 2122static void
6b9801d4
SS
2123amd64_emit_ne_goto (int *offset_p, int *size_p)
2124{
2125 EMIT_ASM (amd64_ne,
2126 "cmp %rax,(%rsp)\n\t"
2127 "je .Lamd64_ne_fallthru\n\t"
2128 "lea 0x8(%rsp),%rsp\n\t"
2129 "pop %rax\n\t"
2130 /* jmp, but don't trust the assembler to choose the right jump */
2131 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2132 ".Lamd64_ne_fallthru:\n\t"
2133 "lea 0x8(%rsp),%rsp\n\t"
2134 "pop %rax");
2135
2136 if (offset_p)
2137 *offset_p = 13;
2138 if (size_p)
2139 *size_p = 4;
2140}
2141
df4a0200 2142static void
6b9801d4
SS
2143amd64_emit_lt_goto (int *offset_p, int *size_p)
2144{
2145 EMIT_ASM (amd64_lt,
2146 "cmp %rax,(%rsp)\n\t"
2147 "jnl .Lamd64_lt_fallthru\n\t"
2148 "lea 0x8(%rsp),%rsp\n\t"
2149 "pop %rax\n\t"
2150 /* jmp, but don't trust the assembler to choose the right jump */
2151 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2152 ".Lamd64_lt_fallthru:\n\t"
2153 "lea 0x8(%rsp),%rsp\n\t"
2154 "pop %rax");
2155
2156 if (offset_p)
2157 *offset_p = 13;
2158 if (size_p)
2159 *size_p = 4;
2160}
2161
df4a0200 2162static void
6b9801d4
SS
2163amd64_emit_le_goto (int *offset_p, int *size_p)
2164{
2165 EMIT_ASM (amd64_le,
2166 "cmp %rax,(%rsp)\n\t"
2167 "jnle .Lamd64_le_fallthru\n\t"
2168 "lea 0x8(%rsp),%rsp\n\t"
2169 "pop %rax\n\t"
2170 /* jmp, but don't trust the assembler to choose the right jump */
2171 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2172 ".Lamd64_le_fallthru:\n\t"
2173 "lea 0x8(%rsp),%rsp\n\t"
2174 "pop %rax");
2175
2176 if (offset_p)
2177 *offset_p = 13;
2178 if (size_p)
2179 *size_p = 4;
2180}
2181
df4a0200 2182static void
6b9801d4
SS
2183amd64_emit_gt_goto (int *offset_p, int *size_p)
2184{
2185 EMIT_ASM (amd64_gt,
2186 "cmp %rax,(%rsp)\n\t"
2187 "jng .Lamd64_gt_fallthru\n\t"
2188 "lea 0x8(%rsp),%rsp\n\t"
2189 "pop %rax\n\t"
2190 /* jmp, but don't trust the assembler to choose the right jump */
2191 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2192 ".Lamd64_gt_fallthru:\n\t"
2193 "lea 0x8(%rsp),%rsp\n\t"
2194 "pop %rax");
2195
2196 if (offset_p)
2197 *offset_p = 13;
2198 if (size_p)
2199 *size_p = 4;
2200}
2201
df4a0200 2202static void
6b9801d4
SS
2203amd64_emit_ge_goto (int *offset_p, int *size_p)
2204{
2205 EMIT_ASM (amd64_ge,
2206 "cmp %rax,(%rsp)\n\t"
2207 "jnge .Lamd64_ge_fallthru\n\t"
2208 ".Lamd64_ge_jump:\n\t"
2209 "lea 0x8(%rsp),%rsp\n\t"
2210 "pop %rax\n\t"
2211 /* jmp, but don't trust the assembler to choose the right jump */
2212 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2213 ".Lamd64_ge_fallthru:\n\t"
2214 "lea 0x8(%rsp),%rsp\n\t"
2215 "pop %rax");
2216
2217 if (offset_p)
2218 *offset_p = 13;
2219 if (size_p)
2220 *size_p = 4;
2221}
2222
6bd434d6 2223static emit_ops amd64_emit_ops =
6a271cae
PA
2224 {
2225 amd64_emit_prologue,
2226 amd64_emit_epilogue,
2227 amd64_emit_add,
2228 amd64_emit_sub,
2229 amd64_emit_mul,
2230 amd64_emit_lsh,
2231 amd64_emit_rsh_signed,
2232 amd64_emit_rsh_unsigned,
2233 amd64_emit_ext,
2234 amd64_emit_log_not,
2235 amd64_emit_bit_and,
2236 amd64_emit_bit_or,
2237 amd64_emit_bit_xor,
2238 amd64_emit_bit_not,
2239 amd64_emit_equal,
2240 amd64_emit_less_signed,
2241 amd64_emit_less_unsigned,
2242 amd64_emit_ref,
2243 amd64_emit_if_goto,
2244 amd64_emit_goto,
2245 amd64_write_goto_address,
2246 amd64_emit_const,
2247 amd64_emit_call,
2248 amd64_emit_reg,
2249 amd64_emit_pop,
2250 amd64_emit_stack_flush,
2251 amd64_emit_zero_ext,
2252 amd64_emit_swap,
2253 amd64_emit_stack_adjust,
2254 amd64_emit_int_call_1,
6b9801d4
SS
2255 amd64_emit_void_call_2,
2256 amd64_emit_eq_goto,
2257 amd64_emit_ne_goto,
2258 amd64_emit_lt_goto,
2259 amd64_emit_le_goto,
2260 amd64_emit_gt_goto,
2261 amd64_emit_ge_goto
6a271cae
PA
2262 };
2263
2264#endif /* __x86_64__ */
2265
2266static void
2267i386_emit_prologue (void)
2268{
2269 EMIT_ASM32 (i386_prologue,
2270 "push %ebp\n\t"
bf15cbda
SS
2271 "mov %esp,%ebp\n\t"
2272 "push %ebx");
6a271cae
PA
2273 /* At this point, the raw regs base address is at 8(%ebp), and the
2274 value pointer is at 12(%ebp). */
2275}
2276
2277static void
2278i386_emit_epilogue (void)
2279{
2280 EMIT_ASM32 (i386_epilogue,
2281 "mov 12(%ebp),%ecx\n\t"
2282 "mov %eax,(%ecx)\n\t"
2283 "mov %ebx,0x4(%ecx)\n\t"
2284 "xor %eax,%eax\n\t"
bf15cbda 2285 "pop %ebx\n\t"
6a271cae
PA
2286 "pop %ebp\n\t"
2287 "ret");
2288}
2289
2290static void
2291i386_emit_add (void)
2292{
2293 EMIT_ASM32 (i386_add,
2294 "add (%esp),%eax\n\t"
2295 "adc 0x4(%esp),%ebx\n\t"
2296 "lea 0x8(%esp),%esp");
2297}
2298
2299static void
2300i386_emit_sub (void)
2301{
2302 EMIT_ASM32 (i386_sub,
2303 "subl %eax,(%esp)\n\t"
2304 "sbbl %ebx,4(%esp)\n\t"
2305 "pop %eax\n\t"
2306 "pop %ebx\n\t");
2307}
2308
2309static void
2310i386_emit_mul (void)
2311{
2312 emit_error = 1;
2313}
2314
2315static void
2316i386_emit_lsh (void)
2317{
2318 emit_error = 1;
2319}
2320
2321static void
2322i386_emit_rsh_signed (void)
2323{
2324 emit_error = 1;
2325}
2326
2327static void
2328i386_emit_rsh_unsigned (void)
2329{
2330 emit_error = 1;
2331}
2332
2333static void
2334i386_emit_ext (int arg)
2335{
2336 switch (arg)
2337 {
2338 case 8:
2339 EMIT_ASM32 (i386_ext_8,
2340 "cbtw\n\t"
2341 "cwtl\n\t"
2342 "movl %eax,%ebx\n\t"
2343 "sarl $31,%ebx");
2344 break;
2345 case 16:
2346 EMIT_ASM32 (i386_ext_16,
2347 "cwtl\n\t"
2348 "movl %eax,%ebx\n\t"
2349 "sarl $31,%ebx");
2350 break;
2351 case 32:
2352 EMIT_ASM32 (i386_ext_32,
2353 "movl %eax,%ebx\n\t"
2354 "sarl $31,%ebx");
2355 break;
2356 default:
2357 emit_error = 1;
2358 }
2359}
2360
2361static void
2362i386_emit_log_not (void)
2363{
2364 EMIT_ASM32 (i386_log_not,
2365 "or %ebx,%eax\n\t"
2366 "test %eax,%eax\n\t"
2367 "sete %cl\n\t"
2368 "xor %ebx,%ebx\n\t"
2369 "movzbl %cl,%eax");
2370}
2371
2372static void
2373i386_emit_bit_and (void)
2374{
2375 EMIT_ASM32 (i386_and,
2376 "and (%esp),%eax\n\t"
2377 "and 0x4(%esp),%ebx\n\t"
2378 "lea 0x8(%esp),%esp");
2379}
2380
2381static void
2382i386_emit_bit_or (void)
2383{
2384 EMIT_ASM32 (i386_or,
2385 "or (%esp),%eax\n\t"
2386 "or 0x4(%esp),%ebx\n\t"
2387 "lea 0x8(%esp),%esp");
2388}
2389
2390static void
2391i386_emit_bit_xor (void)
2392{
2393 EMIT_ASM32 (i386_xor,
2394 "xor (%esp),%eax\n\t"
2395 "xor 0x4(%esp),%ebx\n\t"
2396 "lea 0x8(%esp),%esp");
2397}
2398
2399static void
2400i386_emit_bit_not (void)
2401{
2402 EMIT_ASM32 (i386_bit_not,
2403 "xor $0xffffffff,%eax\n\t"
2404 "xor $0xffffffff,%ebx\n\t");
2405}
2406
2407static void
2408i386_emit_equal (void)
2409{
2410 EMIT_ASM32 (i386_equal,
2411 "cmpl %ebx,4(%esp)\n\t"
2412 "jne .Li386_equal_false\n\t"
2413 "cmpl %eax,(%esp)\n\t"
2414 "je .Li386_equal_true\n\t"
2415 ".Li386_equal_false:\n\t"
2416 "xor %eax,%eax\n\t"
2417 "jmp .Li386_equal_end\n\t"
2418 ".Li386_equal_true:\n\t"
2419 "mov $1,%eax\n\t"
2420 ".Li386_equal_end:\n\t"
2421 "xor %ebx,%ebx\n\t"
2422 "lea 0x8(%esp),%esp");
2423}
2424
2425static void
2426i386_emit_less_signed (void)
2427{
2428 EMIT_ASM32 (i386_less_signed,
2429 "cmpl %ebx,4(%esp)\n\t"
2430 "jl .Li386_less_signed_true\n\t"
2431 "jne .Li386_less_signed_false\n\t"
2432 "cmpl %eax,(%esp)\n\t"
2433 "jl .Li386_less_signed_true\n\t"
2434 ".Li386_less_signed_false:\n\t"
2435 "xor %eax,%eax\n\t"
2436 "jmp .Li386_less_signed_end\n\t"
2437 ".Li386_less_signed_true:\n\t"
2438 "mov $1,%eax\n\t"
2439 ".Li386_less_signed_end:\n\t"
2440 "xor %ebx,%ebx\n\t"
2441 "lea 0x8(%esp),%esp");
2442}
2443
2444static void
2445i386_emit_less_unsigned (void)
2446{
2447 EMIT_ASM32 (i386_less_unsigned,
2448 "cmpl %ebx,4(%esp)\n\t"
2449 "jb .Li386_less_unsigned_true\n\t"
2450 "jne .Li386_less_unsigned_false\n\t"
2451 "cmpl %eax,(%esp)\n\t"
2452 "jb .Li386_less_unsigned_true\n\t"
2453 ".Li386_less_unsigned_false:\n\t"
2454 "xor %eax,%eax\n\t"
2455 "jmp .Li386_less_unsigned_end\n\t"
2456 ".Li386_less_unsigned_true:\n\t"
2457 "mov $1,%eax\n\t"
2458 ".Li386_less_unsigned_end:\n\t"
2459 "xor %ebx,%ebx\n\t"
2460 "lea 0x8(%esp),%esp");
2461}
2462
2463static void
2464i386_emit_ref (int size)
2465{
2466 switch (size)
2467 {
2468 case 1:
2469 EMIT_ASM32 (i386_ref1,
2470 "movb (%eax),%al");
2471 break;
2472 case 2:
2473 EMIT_ASM32 (i386_ref2,
2474 "movw (%eax),%ax");
2475 break;
2476 case 4:
2477 EMIT_ASM32 (i386_ref4,
2478 "movl (%eax),%eax");
2479 break;
2480 case 8:
2481 EMIT_ASM32 (i386_ref8,
2482 "movl 4(%eax),%ebx\n\t"
2483 "movl (%eax),%eax");
2484 break;
2485 }
2486}
2487
2488static void
2489i386_emit_if_goto (int *offset_p, int *size_p)
2490{
2491 EMIT_ASM32 (i386_if_goto,
2492 "mov %eax,%ecx\n\t"
2493 "or %ebx,%ecx\n\t"
2494 "pop %eax\n\t"
2495 "pop %ebx\n\t"
2496 "cmpl $0,%ecx\n\t"
2497 /* Don't trust the assembler to choose the right jump */
2498 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2499
2500 if (offset_p)
2501 *offset_p = 11; /* be sure that this matches the sequence above */
2502 if (size_p)
2503 *size_p = 4;
2504}
2505
2506static void
2507i386_emit_goto (int *offset_p, int *size_p)
2508{
2509 EMIT_ASM32 (i386_goto,
2510 /* Don't trust the assembler to choose the right jump */
2511 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2512 if (offset_p)
2513 *offset_p = 1;
2514 if (size_p)
2515 *size_p = 4;
2516}
2517
2518static void
2519i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2520{
2521 int diff = (to - (from + size));
2522 unsigned char buf[sizeof (int)];
2523
2524 /* We're only doing 4-byte sizes at the moment. */
2525 if (size != 4)
2526 {
2527 emit_error = 1;
2528 return;
2529 }
2530
2531 memcpy (buf, &diff, sizeof (int));
4196ab2a 2532 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2533}
2534
2535static void
4e29fb54 2536i386_emit_const (LONGEST num)
6a271cae
PA
2537{
2538 unsigned char buf[16];
b00ad6ff 2539 int i, hi, lo;
6a271cae
PA
2540 CORE_ADDR buildaddr = current_insn_ptr;
2541
2542 i = 0;
2543 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2544 lo = num & 0xffffffff;
2545 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2546 i += 4;
2547 hi = ((num >> 32) & 0xffffffff);
2548 if (hi)
2549 {
2550 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2551 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2552 i += 4;
2553 }
2554 else
2555 {
2556 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2557 }
2558 append_insns (&buildaddr, i, buf);
2559 current_insn_ptr = buildaddr;
2560}
2561
2562static void
2563i386_emit_call (CORE_ADDR fn)
2564{
2565 unsigned char buf[16];
2566 int i, offset;
2567 CORE_ADDR buildaddr;
2568
2569 buildaddr = current_insn_ptr;
2570 i = 0;
2571 buf[i++] = 0xe8; /* call <reladdr> */
2572 offset = ((int) fn) - (buildaddr + 5);
2573 memcpy (buf + 1, &offset, 4);
2574 append_insns (&buildaddr, 5, buf);
2575 current_insn_ptr = buildaddr;
2576}
2577
2578static void
2579i386_emit_reg (int reg)
2580{
2581 unsigned char buf[16];
2582 int i;
2583 CORE_ADDR buildaddr;
2584
2585 EMIT_ASM32 (i386_reg_a,
2586 "sub $0x8,%esp");
2587 buildaddr = current_insn_ptr;
2588 i = 0;
2589 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2590 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2591 i += 4;
2592 append_insns (&buildaddr, i, buf);
2593 current_insn_ptr = buildaddr;
2594 EMIT_ASM32 (i386_reg_b,
2595 "mov %eax,4(%esp)\n\t"
2596 "mov 8(%ebp),%eax\n\t"
2597 "mov %eax,(%esp)");
2598 i386_emit_call (get_raw_reg_func_addr ());
2599 EMIT_ASM32 (i386_reg_c,
2600 "xor %ebx,%ebx\n\t"
2601 "lea 0x8(%esp),%esp");
2602}
2603
2604static void
2605i386_emit_pop (void)
2606{
2607 EMIT_ASM32 (i386_pop,
2608 "pop %eax\n\t"
2609 "pop %ebx");
2610}
2611
2612static void
2613i386_emit_stack_flush (void)
2614{
2615 EMIT_ASM32 (i386_stack_flush,
2616 "push %ebx\n\t"
2617 "push %eax");
2618}
2619
2620static void
2621i386_emit_zero_ext (int arg)
2622{
2623 switch (arg)
2624 {
2625 case 8:
2626 EMIT_ASM32 (i386_zero_ext_8,
2627 "and $0xff,%eax\n\t"
2628 "xor %ebx,%ebx");
2629 break;
2630 case 16:
2631 EMIT_ASM32 (i386_zero_ext_16,
2632 "and $0xffff,%eax\n\t"
2633 "xor %ebx,%ebx");
2634 break;
2635 case 32:
2636 EMIT_ASM32 (i386_zero_ext_32,
2637 "xor %ebx,%ebx");
2638 break;
2639 default:
2640 emit_error = 1;
2641 }
2642}
2643
2644static void
2645i386_emit_swap (void)
2646{
2647 EMIT_ASM32 (i386_swap,
2648 "mov %eax,%ecx\n\t"
2649 "mov %ebx,%edx\n\t"
2650 "pop %eax\n\t"
2651 "pop %ebx\n\t"
2652 "push %edx\n\t"
2653 "push %ecx");
2654}
2655
2656static void
2657i386_emit_stack_adjust (int n)
2658{
2659 unsigned char buf[16];
2660 int i;
2661 CORE_ADDR buildaddr = current_insn_ptr;
2662
2663 i = 0;
2664 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2665 buf[i++] = 0x64;
2666 buf[i++] = 0x24;
2667 buf[i++] = n * 8;
2668 append_insns (&buildaddr, i, buf);
2669 current_insn_ptr = buildaddr;
2670}
2671
2672/* FN's prototype is `LONGEST(*fn)(int)'. */
2673
2674static void
2675i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2676{
2677 unsigned char buf[16];
2678 int i;
2679 CORE_ADDR buildaddr;
2680
2681 EMIT_ASM32 (i386_int_call_1_a,
2682 /* Reserve a bit of stack space. */
2683 "sub $0x8,%esp");
2684 /* Put the one argument on the stack. */
2685 buildaddr = current_insn_ptr;
2686 i = 0;
2687 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2688 buf[i++] = 0x04;
2689 buf[i++] = 0x24;
b00ad6ff 2690 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2691 i += 4;
2692 append_insns (&buildaddr, i, buf);
2693 current_insn_ptr = buildaddr;
2694 i386_emit_call (fn);
2695 EMIT_ASM32 (i386_int_call_1_c,
2696 "mov %edx,%ebx\n\t"
2697 "lea 0x8(%esp),%esp");
2698}
2699
4e29fb54 2700/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2701
2702static void
2703i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2704{
2705 unsigned char buf[16];
2706 int i;
2707 CORE_ADDR buildaddr;
2708
2709 EMIT_ASM32 (i386_void_call_2_a,
2710 /* Preserve %eax only; we don't have to worry about %ebx. */
2711 "push %eax\n\t"
2712 /* Reserve a bit of stack space for arguments. */
2713 "sub $0x10,%esp\n\t"
2714 /* Copy "top" to the second argument position. (Note that
2715 we can't assume function won't scribble on its
2716 arguments, so don't try to restore from this.) */
2717 "mov %eax,4(%esp)\n\t"
2718 "mov %ebx,8(%esp)");
2719 /* Put the first argument on the stack. */
2720 buildaddr = current_insn_ptr;
2721 i = 0;
2722 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2723 buf[i++] = 0x04;
2724 buf[i++] = 0x24;
b00ad6ff 2725 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2726 i += 4;
2727 append_insns (&buildaddr, i, buf);
2728 current_insn_ptr = buildaddr;
2729 i386_emit_call (fn);
2730 EMIT_ASM32 (i386_void_call_2_b,
2731 "lea 0x10(%esp),%esp\n\t"
2732 /* Restore original stack top. */
2733 "pop %eax");
2734}
2735
6b9801d4 2736
df4a0200 2737static void
6b9801d4
SS
2738i386_emit_eq_goto (int *offset_p, int *size_p)
2739{
2740 EMIT_ASM32 (eq,
2741 /* Check low half first, more likely to be decider */
2742 "cmpl %eax,(%esp)\n\t"
2743 "jne .Leq_fallthru\n\t"
2744 "cmpl %ebx,4(%esp)\n\t"
2745 "jne .Leq_fallthru\n\t"
2746 "lea 0x8(%esp),%esp\n\t"
2747 "pop %eax\n\t"
2748 "pop %ebx\n\t"
2749 /* jmp, but don't trust the assembler to choose the right jump */
2750 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2751 ".Leq_fallthru:\n\t"
2752 "lea 0x8(%esp),%esp\n\t"
2753 "pop %eax\n\t"
2754 "pop %ebx");
2755
2756 if (offset_p)
2757 *offset_p = 18;
2758 if (size_p)
2759 *size_p = 4;
2760}
2761
df4a0200 2762static void
6b9801d4
SS
2763i386_emit_ne_goto (int *offset_p, int *size_p)
2764{
2765 EMIT_ASM32 (ne,
2766 /* Check low half first, more likely to be decider */
2767 "cmpl %eax,(%esp)\n\t"
2768 "jne .Lne_jump\n\t"
2769 "cmpl %ebx,4(%esp)\n\t"
2770 "je .Lne_fallthru\n\t"
2771 ".Lne_jump:\n\t"
2772 "lea 0x8(%esp),%esp\n\t"
2773 "pop %eax\n\t"
2774 "pop %ebx\n\t"
2775 /* jmp, but don't trust the assembler to choose the right jump */
2776 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2777 ".Lne_fallthru:\n\t"
2778 "lea 0x8(%esp),%esp\n\t"
2779 "pop %eax\n\t"
2780 "pop %ebx");
2781
2782 if (offset_p)
2783 *offset_p = 18;
2784 if (size_p)
2785 *size_p = 4;
2786}
2787
df4a0200 2788static void
6b9801d4
SS
2789i386_emit_lt_goto (int *offset_p, int *size_p)
2790{
2791 EMIT_ASM32 (lt,
2792 "cmpl %ebx,4(%esp)\n\t"
2793 "jl .Llt_jump\n\t"
2794 "jne .Llt_fallthru\n\t"
2795 "cmpl %eax,(%esp)\n\t"
2796 "jnl .Llt_fallthru\n\t"
2797 ".Llt_jump:\n\t"
2798 "lea 0x8(%esp),%esp\n\t"
2799 "pop %eax\n\t"
2800 "pop %ebx\n\t"
2801 /* jmp, but don't trust the assembler to choose the right jump */
2802 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2803 ".Llt_fallthru:\n\t"
2804 "lea 0x8(%esp),%esp\n\t"
2805 "pop %eax\n\t"
2806 "pop %ebx");
2807
2808 if (offset_p)
2809 *offset_p = 20;
2810 if (size_p)
2811 *size_p = 4;
2812}
2813
df4a0200 2814static void
6b9801d4
SS
2815i386_emit_le_goto (int *offset_p, int *size_p)
2816{
2817 EMIT_ASM32 (le,
2818 "cmpl %ebx,4(%esp)\n\t"
2819 "jle .Lle_jump\n\t"
2820 "jne .Lle_fallthru\n\t"
2821 "cmpl %eax,(%esp)\n\t"
2822 "jnle .Lle_fallthru\n\t"
2823 ".Lle_jump:\n\t"
2824 "lea 0x8(%esp),%esp\n\t"
2825 "pop %eax\n\t"
2826 "pop %ebx\n\t"
2827 /* jmp, but don't trust the assembler to choose the right jump */
2828 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2829 ".Lle_fallthru:\n\t"
2830 "lea 0x8(%esp),%esp\n\t"
2831 "pop %eax\n\t"
2832 "pop %ebx");
2833
2834 if (offset_p)
2835 *offset_p = 20;
2836 if (size_p)
2837 *size_p = 4;
2838}
2839
df4a0200 2840static void
6b9801d4
SS
2841i386_emit_gt_goto (int *offset_p, int *size_p)
2842{
2843 EMIT_ASM32 (gt,
2844 "cmpl %ebx,4(%esp)\n\t"
2845 "jg .Lgt_jump\n\t"
2846 "jne .Lgt_fallthru\n\t"
2847 "cmpl %eax,(%esp)\n\t"
2848 "jng .Lgt_fallthru\n\t"
2849 ".Lgt_jump:\n\t"
2850 "lea 0x8(%esp),%esp\n\t"
2851 "pop %eax\n\t"
2852 "pop %ebx\n\t"
2853 /* jmp, but don't trust the assembler to choose the right jump */
2854 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2855 ".Lgt_fallthru:\n\t"
2856 "lea 0x8(%esp),%esp\n\t"
2857 "pop %eax\n\t"
2858 "pop %ebx");
2859
2860 if (offset_p)
2861 *offset_p = 20;
2862 if (size_p)
2863 *size_p = 4;
2864}
2865
df4a0200 2866static void
6b9801d4
SS
2867i386_emit_ge_goto (int *offset_p, int *size_p)
2868{
2869 EMIT_ASM32 (ge,
2870 "cmpl %ebx,4(%esp)\n\t"
2871 "jge .Lge_jump\n\t"
2872 "jne .Lge_fallthru\n\t"
2873 "cmpl %eax,(%esp)\n\t"
2874 "jnge .Lge_fallthru\n\t"
2875 ".Lge_jump:\n\t"
2876 "lea 0x8(%esp),%esp\n\t"
2877 "pop %eax\n\t"
2878 "pop %ebx\n\t"
2879 /* jmp, but don't trust the assembler to choose the right jump */
2880 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2881 ".Lge_fallthru:\n\t"
2882 "lea 0x8(%esp),%esp\n\t"
2883 "pop %eax\n\t"
2884 "pop %ebx");
2885
2886 if (offset_p)
2887 *offset_p = 20;
2888 if (size_p)
2889 *size_p = 4;
2890}
2891
6bd434d6 2892static emit_ops i386_emit_ops =
6a271cae
PA
2893 {
2894 i386_emit_prologue,
2895 i386_emit_epilogue,
2896 i386_emit_add,
2897 i386_emit_sub,
2898 i386_emit_mul,
2899 i386_emit_lsh,
2900 i386_emit_rsh_signed,
2901 i386_emit_rsh_unsigned,
2902 i386_emit_ext,
2903 i386_emit_log_not,
2904 i386_emit_bit_and,
2905 i386_emit_bit_or,
2906 i386_emit_bit_xor,
2907 i386_emit_bit_not,
2908 i386_emit_equal,
2909 i386_emit_less_signed,
2910 i386_emit_less_unsigned,
2911 i386_emit_ref,
2912 i386_emit_if_goto,
2913 i386_emit_goto,
2914 i386_write_goto_address,
2915 i386_emit_const,
2916 i386_emit_call,
2917 i386_emit_reg,
2918 i386_emit_pop,
2919 i386_emit_stack_flush,
2920 i386_emit_zero_ext,
2921 i386_emit_swap,
2922 i386_emit_stack_adjust,
2923 i386_emit_int_call_1,
6b9801d4
SS
2924 i386_emit_void_call_2,
2925 i386_emit_eq_goto,
2926 i386_emit_ne_goto,
2927 i386_emit_lt_goto,
2928 i386_emit_le_goto,
2929 i386_emit_gt_goto,
2930 i386_emit_ge_goto
6a271cae
PA
2931 };
2932
2933
ab64c999
TBA
2934emit_ops *
2935x86_target::emit_ops ()
6a271cae
PA
2936{
2937#ifdef __x86_64__
4855cbdc 2938 if (is_64bit_tdesc (current_thread))
6a271cae
PA
2939 return &amd64_emit_ops;
2940 else
2941#endif
2942 return &i386_emit_ops;
2943}
2944
3ca4edb6 2945/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 2946
3ca4edb6
TBA
2947const gdb_byte *
2948x86_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349
AT
2949{
2950 *size = x86_breakpoint_len;
2951 return x86_breakpoint;
2952}
2953
9cfd8715
TBA
2954bool
2955x86_target::low_supports_range_stepping ()
c2d6af84 2956{
9cfd8715 2957 return true;
c2d6af84
PA
2958}
2959
fc5ecdb6
TBA
2960int
2961x86_target::get_ipa_tdesc_idx ()
ae91f625
MK
2962{
2963 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2964 const struct target_desc *tdesc = regcache->tdesc;
2965
2966#ifdef __x86_64__
b4570e4b 2967 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2968#endif
2969
51a948fd 2970 if (tdesc == tdesc_i386_linux_no_xml.get ())
ae91f625 2971 return X86_TDESC_SSE;
ae91f625 2972
f49ff000 2973 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2974}
2975
ef0478f6
TBA
2976/* The linux target ops object. */
2977
2978linux_process_target *the_linux_target = &the_x86_target;
2979
3aee8918
PA
2980void
2981initialize_low_arch (void)
2982{
2983 /* Initialize the Linux target descriptions. */
2984#ifdef __x86_64__
cc397f3a 2985 tdesc_amd64_linux_no_xml = allocate_target_description ();
51a948fd 2986 copy_target_description (tdesc_amd64_linux_no_xml.get (),
b4570e4b
YQ
2987 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2988 false));
3aee8918
PA
2989 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2990#endif
f49ff000 2991
cc397f3a 2992 tdesc_i386_linux_no_xml = allocate_target_description ();
51a948fd 2993 copy_target_description (tdesc_i386_linux_no_xml.get (),
f49ff000 2994 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
2995 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2996
2997 initialize_regsets_info (&x86_regsets_info);
2998}