]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdbserver/linux-x86-low.cc
2.41 Release sources
[thirdparty/binutils-gdb.git] / gdbserver / linux-x86-low.cc
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
213516ef 3 Copyright (C) 2002-2023 Free Software Foundation, Inc.
d0722149
DE
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
d41f6d8e 20#include "server.h"
d0722149 21#include <signal.h>
6a271cae 22#include <limits.h>
f4647387 23#include <inttypes.h>
d0722149
DE
24#include "linux-low.h"
25#include "i387-fp.h"
df7e5265 26#include "x86-low.h"
268a13a5 27#include "gdbsupport/x86-xstate.h"
5826e159 28#include "nat/gdb_ptrace.h"
d0722149 29
93813b37
WT
30#ifdef __x86_64__
31#include "nat/amd64-linux-siginfo.h"
32#endif
33
d0722149 34#include "gdb_proc_service.h"
b5737fa9
PA
35/* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37#ifndef ELFMAG0
38#include "elf/common.h"
39#endif
40
268a13a5 41#include "gdbsupport/agent.h"
3aee8918 42#include "tdesc.h"
c144c7a0 43#include "tracepoint.h"
f699aaba 44#include "ax.h"
7b669087 45#include "nat/linux-nat.h"
4b134ca1 46#include "nat/x86-linux.h"
8e5d4070 47#include "nat/x86-linux-dregs.h"
ae91f625 48#include "linux-x86-tdesc.h"
a196ebeb 49
3aee8918 50#ifdef __x86_64__
51a948fd 51static target_desc_up tdesc_amd64_linux_no_xml;
3aee8918 52#endif
51a948fd 53static target_desc_up tdesc_i386_linux_no_xml;
3aee8918 54
1570b33e 55
fa593d66 56static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
405f8e94 57static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
fa593d66 58
1570b33e
L
59/* Backward compatibility for gdb without XML support. */
60
db92ac45 61static const char xmltarget_i386_linux_no_xml[] = "@<target>\
1570b33e
L
62<architecture>i386</architecture>\
63<osabi>GNU/Linux</osabi>\
64</target>";
f6d1620c
L
65
66#ifdef __x86_64__
db92ac45 67static const char xmltarget_amd64_linux_no_xml[] = "@<target>\
1570b33e
L
68<architecture>i386:x86-64</architecture>\
69<osabi>GNU/Linux</osabi>\
70</target>";
f6d1620c 71#endif
d0722149
DE
72
73#include <sys/reg.h>
74#include <sys/procfs.h>
1570b33e
L
75#include <sys/uio.h>
76
d0722149
DE
77#ifndef PTRACE_GET_THREAD_AREA
78#define PTRACE_GET_THREAD_AREA 25
79#endif
80
81/* This definition comes from prctl.h, but some kernels may not have it. */
82#ifndef PTRACE_ARCH_PRCTL
83#define PTRACE_ARCH_PRCTL 30
84#endif
85
86/* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88#ifndef ARCH_GET_FS
89#define ARCH_SET_GS 0x1001
90#define ARCH_SET_FS 0x1002
91#define ARCH_GET_FS 0x1003
92#define ARCH_GET_GS 0x1004
93#endif
94
ef0478f6
TBA
95/* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99class x86_target : public linux_process_target
100{
101public:
102
aa8d21c9
TBA
103 const regs_info *get_regs_info () override;
104
3ca4edb6
TBA
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
007c9b97
TBA
107 bool supports_z_point_type (char z_type) override;
108
b315b67d 109 void process_qsupported (gdb::array_view<const char * const> features) override;
a5b5da92 110
47f70aa7
TBA
111 bool supports_tracepoints () override;
112
809a0c35
TBA
113 bool supports_fast_tracepoints () override;
114
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
117 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
118 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
119 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
120 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
121 char *err) override;
122
123 int get_min_fast_tracepoint_insn_len () override;
124
ab64c999
TBA
125 struct emit_ops *emit_ops () override;
126
fc5ecdb6
TBA
127 int get_ipa_tdesc_idx () override;
128
797bcff5
TBA
129protected:
130
131 void low_arch_setup () override;
daca57a7
TBA
132
133 bool low_cannot_fetch_register (int regno) override;
134
135 bool low_cannot_store_register (int regno) override;
bf9ae9d8
TBA
136
137 bool low_supports_breakpoints () override;
138
139 CORE_ADDR low_get_pc (regcache *regcache) override;
140
141 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
d4807ea2
TBA
142
143 int low_decr_pc_after_break () override;
d7146cda
TBA
144
145 bool low_breakpoint_at (CORE_ADDR pc) override;
9db9aa23
TBA
146
147 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
148 int size, raw_breakpoint *bp) override;
149
150 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
151 int size, raw_breakpoint *bp) override;
ac1bbaca
TBA
152
153 bool low_stopped_by_watchpoint () override;
154
155 CORE_ADDR low_stopped_data_address () override;
b35db733
TBA
156
157 /* collect_ptrace_register/supply_ptrace_register are not needed in the
158 native i386 case (no registers smaller than an xfer unit), and are not
159 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
cb63de7c
TBA
160
161 /* Need to fix up i386 siginfo if host is amd64. */
162 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
163 int direction) override;
fd000fb3
TBA
164
165 arch_process_info *low_new_process () override;
166
167 void low_delete_process (arch_process_info *info) override;
168
169 void low_new_thread (lwp_info *) override;
170
171 void low_delete_thread (arch_lwp_info *) override;
172
173 void low_new_fork (process_info *parent, process_info *child) override;
d7599cc0
TBA
174
175 void low_prepare_to_resume (lwp_info *lwp) override;
a5b5da92 176
13e567af
TBA
177 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
178
9cfd8715
TBA
179 bool low_supports_range_stepping () override;
180
9eedd27d
TBA
181 bool low_supports_catch_syscall () override;
182
183 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
184
a5b5da92
TBA
185private:
186
187 /* Update all the target description of all processes; a new GDB
188 connected, and it may or not support xml target descriptions. */
189 void update_xmltarget ();
ef0478f6
TBA
190};
191
192/* The singleton target ops object. */
193
194static x86_target the_x86_target;
195
aa5ca48f
DE
196/* Per-process arch-specific data we want to keep. */
197
198struct arch_process_info
199{
df7e5265 200 struct x86_debug_reg_state debug_reg_state;
aa5ca48f
DE
201};
202
d0722149
DE
203#ifdef __x86_64__
204
205/* Mapping between the general-purpose registers in `struct user'
206 format and GDB's register array layout.
207 Note that the transfer layout uses 64-bit regs. */
208static /*const*/ int i386_regmap[] =
209{
210 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
211 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
212 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
213 DS * 8, ES * 8, FS * 8, GS * 8
214};
215
216#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
217
218/* So code below doesn't have to care, i386 or amd64. */
219#define ORIG_EAX ORIG_RAX
bc9540e8 220#define REGSIZE 8
d0722149
DE
221
222static const int x86_64_regmap[] =
223{
224 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
225 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
226 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
227 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
228 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
229 DS * 8, ES * 8, FS * 8, GS * 8,
230 -1, -1, -1, -1, -1, -1, -1, -1,
231 -1, -1, -1, -1, -1, -1, -1, -1,
232 -1, -1, -1, -1, -1, -1, -1, -1,
a196ebeb
WT
233 -1,
234 -1, -1, -1, -1, -1, -1, -1, -1,
235 ORIG_RAX * 8,
2735833d 236 21 * 8, 22 * 8,
a196ebeb 237 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
01f9f808
MS
238 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
239 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
240 -1, -1, -1, -1, -1, -1, -1, -1,
241 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
242 -1, -1, -1, -1, -1, -1, -1, -1,
243 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
244 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
245 -1, -1, -1, -1, -1, -1, -1, -1,
246 -1, -1, -1, -1, -1, -1, -1, -1,
51547df6
MS
247 -1, -1, -1, -1, -1, -1, -1, -1,
248 -1 /* pkru */
d0722149
DE
249};
250
251#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
9e0aa64f 252#define X86_64_USER_REGS (GS + 1)
d0722149
DE
253
254#else /* ! __x86_64__ */
255
256/* Mapping between the general-purpose registers in `struct user'
257 format and GDB's register array layout. */
258static /*const*/ int i386_regmap[] =
259{
260 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
261 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
262 EIP * 4, EFL * 4, CS * 4, SS * 4,
263 DS * 4, ES * 4, FS * 4, GS * 4
264};
265
266#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
267
bc9540e8
PA
268#define REGSIZE 4
269
d0722149 270#endif
3aee8918
PA
271
272#ifdef __x86_64__
273
4855cbdc 274/* Returns true if THREAD belongs to a x86-64 process, per the tdesc. */
3aee8918
PA
275
276static int
4855cbdc 277is_64bit_tdesc (thread_info *thread)
3aee8918 278{
4855cbdc 279 struct regcache *regcache = get_thread_regcache (thread, 0);
3aee8918
PA
280
281 return register_size (regcache->tdesc, 0) == 8;
282}
283
284#endif
285
d0722149
DE
286\f
287/* Called by libthread_db. */
288
289ps_err_e
754653a7 290ps_get_thread_area (struct ps_prochandle *ph,
d0722149
DE
291 lwpid_t lwpid, int idx, void **base)
292{
293#ifdef __x86_64__
4855cbdc
SM
294 lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
295 gdb_assert (lwp != nullptr);
296 int use_64bit = is_64bit_tdesc (get_lwp_thread (lwp));
d0722149
DE
297
298 if (use_64bit)
299 {
300 switch (idx)
301 {
302 case FS:
303 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
304 return PS_OK;
305 break;
306 case GS:
307 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
308 return PS_OK;
309 break;
310 default:
311 return PS_BADADDR;
312 }
313 return PS_ERR;
314 }
315#endif
316
317 {
318 unsigned int desc[4];
319
320 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
321 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
322 return PS_ERR;
323
d1ec4ce7
DE
324 /* Ensure we properly extend the value to 64-bits for x86_64. */
325 *base = (void *) (uintptr_t) desc[1];
d0722149
DE
326 return PS_OK;
327 }
328}
fa593d66
PA
329
330/* Get the thread area address. This is used to recognize which
331 thread is which when tracing with the in-process agent library. We
332 don't read anything from the address, and treat it as opaque; it's
333 the address itself that we assume is unique per-thread. */
334
13e567af
TBA
335int
336x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
fa593d66 337{
4855cbdc
SM
338 lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
339 gdb_assert (lwp != nullptr);
5e219e0f 340#ifdef __x86_64__
4855cbdc 341 int use_64bit = is_64bit_tdesc (get_lwp_thread (lwp));
fa593d66
PA
342
343 if (use_64bit)
344 {
345 void *base;
346 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
347 {
348 *addr = (CORE_ADDR) (uintptr_t) base;
349 return 0;
350 }
351
352 return -1;
353 }
354#endif
355
356 {
d86d4aaf
DE
357 struct thread_info *thr = get_lwp_thread (lwp);
358 struct regcache *regcache = get_thread_regcache (thr, 1);
fa593d66
PA
359 unsigned int desc[4];
360 ULONGEST gs = 0;
361 const int reg_thread_area = 3; /* bits to scale down register value. */
362 int idx;
363
364 collect_register_by_name (regcache, "gs", &gs);
365
366 idx = gs >> reg_thread_area;
367
368 if (ptrace (PTRACE_GET_THREAD_AREA,
d86d4aaf 369 lwpid_of (thr),
493e2a69 370 (void *) (long) idx, (unsigned long) &desc) < 0)
fa593d66
PA
371 return -1;
372
373 *addr = desc[1];
374 return 0;
375 }
376}
377
378
d0722149 379\f
daca57a7
TBA
380bool
381x86_target::low_cannot_store_register (int regno)
d0722149 382{
3aee8918 383#ifdef __x86_64__
4855cbdc 384 if (is_64bit_tdesc (current_thread))
daca57a7 385 return false;
3aee8918
PA
386#endif
387
d0722149
DE
388 return regno >= I386_NUM_REGS;
389}
390
daca57a7
TBA
391bool
392x86_target::low_cannot_fetch_register (int regno)
d0722149 393{
3aee8918 394#ifdef __x86_64__
4855cbdc 395 if (is_64bit_tdesc (current_thread))
daca57a7 396 return false;
3aee8918
PA
397#endif
398
d0722149
DE
399 return regno >= I386_NUM_REGS;
400}
401
037e8112
TV
402static void
403collect_register_i386 (struct regcache *regcache, int regno, void *buf)
404{
405 collect_register (regcache, regno, buf);
406
407#ifdef __x86_64__
408 /* In case of x86_64 -m32, collect_register only writes 4 bytes, but the
409 space reserved in buf for the register is 8 bytes. Make sure the entire
410 reserved space is initialized. */
411
412 gdb_assert (register_size (regcache->tdesc, regno) == 4);
413
414 if (regno == RAX)
415 {
416 /* Sign extend EAX value to avoid potential syscall restart
417 problems.
418
419 See amd64_linux_collect_native_gregset() in
420 gdb/amd64-linux-nat.c for a detailed explanation. */
421 *(int64_t *) buf = *(int32_t *) buf;
422 }
423 else
424 {
425 /* Zero-extend. */
426 *(uint64_t *) buf = *(uint32_t *) buf;
427 }
428#endif
429}
430
d0722149 431static void
442ea881 432x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
433{
434 int i;
435
436#ifdef __x86_64__
3aee8918 437 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
438 {
439 for (i = 0; i < X86_64_NUM_REGS; i++)
440 if (x86_64_regmap[i] != -1)
442ea881 441 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d 442
d0722149
DE
443 return;
444 }
445#endif
446
447 for (i = 0; i < I386_NUM_REGS; i++)
037e8112 448 collect_register_i386 (regcache, i, ((char *) buf) + i386_regmap[i]);
3f52fdbc 449
037e8112
TV
450 /* Handle ORIG_EAX, which is not in i386_regmap. */
451 collect_register_i386 (regcache, find_regno (regcache->tdesc, "orig_eax"),
452 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
453}
454
455static void
442ea881 456x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
457{
458 int i;
459
460#ifdef __x86_64__
3aee8918 461 if (register_size (regcache->tdesc, 0) == 8)
d0722149
DE
462 {
463 for (i = 0; i < X86_64_NUM_REGS; i++)
464 if (x86_64_regmap[i] != -1)
442ea881 465 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
2735833d 466
d0722149
DE
467 return;
468 }
469#endif
470
471 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 472 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 473
442ea881 474 supply_register_by_name (regcache, "orig_eax",
bc9540e8 475 ((char *) buf) + ORIG_EAX * REGSIZE);
d0722149
DE
476}
477
478static void
442ea881 479x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
480{
481#ifdef __x86_64__
442ea881 482 i387_cache_to_fxsave (regcache, buf);
d0722149 483#else
442ea881 484 i387_cache_to_fsave (regcache, buf);
d0722149
DE
485#endif
486}
487
488static void
442ea881 489x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
490{
491#ifdef __x86_64__
442ea881 492 i387_fxsave_to_cache (regcache, buf);
d0722149 493#else
442ea881 494 i387_fsave_to_cache (regcache, buf);
d0722149
DE
495#endif
496}
497
498#ifndef __x86_64__
499
500static void
442ea881 501x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 502{
442ea881 503 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
504}
505
506static void
442ea881 507x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 508{
442ea881 509 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
510}
511
512#endif
513
1570b33e
L
514static void
515x86_fill_xstateregset (struct regcache *regcache, void *buf)
516{
517 i387_cache_to_xsave (regcache, buf);
518}
519
520static void
521x86_store_xstateregset (struct regcache *regcache, const void *buf)
522{
523 i387_xsave_to_cache (regcache, buf);
524}
525
d0722149
DE
526/* ??? The non-biarch i386 case stores all the i387 regs twice.
527 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
528 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
529 doesn't work. IWBN to avoid the duplication in the case where it
530 does work. Maybe the arch_setup routine could check whether it works
3aee8918 531 and update the supported regsets accordingly. */
d0722149 532
3aee8918 533static struct regset_info x86_regsets[] =
d0722149
DE
534{
535#ifdef HAVE_PTRACE_GETREGS
1570b33e 536 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
537 GENERAL_REGS,
538 x86_fill_gregset, x86_store_gregset },
1570b33e
L
539 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
540 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
541# ifndef __x86_64__
542# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 543 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
544 EXTENDED_REGS,
545 x86_fill_fpxregset, x86_store_fpxregset },
546# endif
547# endif
1570b33e 548 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
549 FP_REGS,
550 x86_fill_fpregset, x86_store_fpregset },
551#endif /* HAVE_PTRACE_GETREGS */
50bc912a 552 NULL_REGSET
d0722149
DE
553};
554
bf9ae9d8
TBA
555bool
556x86_target::low_supports_breakpoints ()
557{
558 return true;
559}
560
561CORE_ADDR
562x86_target::low_get_pc (regcache *regcache)
d0722149 563{
3aee8918 564 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
565
566 if (use_64bit)
567 {
6598661d
PA
568 uint64_t pc;
569
442ea881 570 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
571 return (CORE_ADDR) pc;
572 }
573 else
574 {
6598661d
PA
575 uint32_t pc;
576
442ea881 577 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
578 return (CORE_ADDR) pc;
579 }
580}
581
bf9ae9d8
TBA
582void
583x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
d0722149 584{
3aee8918 585 int use_64bit = register_size (regcache->tdesc, 0) == 8;
d0722149
DE
586
587 if (use_64bit)
588 {
6598661d
PA
589 uint64_t newpc = pc;
590
442ea881 591 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
592 }
593 else
594 {
6598661d
PA
595 uint32_t newpc = pc;
596
442ea881 597 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
598 }
599}
d4807ea2
TBA
600
601int
602x86_target::low_decr_pc_after_break ()
603{
604 return 1;
605}
606
d0722149 607\f
dd373349 608static const gdb_byte x86_breakpoint[] = { 0xCC };
d0722149
DE
609#define x86_breakpoint_len 1
610
d7146cda
TBA
611bool
612x86_target::low_breakpoint_at (CORE_ADDR pc)
d0722149
DE
613{
614 unsigned char c;
615
d7146cda 616 read_memory (pc, &c, 1);
d0722149 617 if (c == 0xCC)
d7146cda 618 return true;
d0722149 619
d7146cda 620 return false;
d0722149
DE
621}
622\f
42995dbd 623/* Low-level function vector. */
df7e5265 624struct x86_dr_low_type x86_dr_low =
42995dbd 625 {
d33472ad
GB
626 x86_linux_dr_set_control,
627 x86_linux_dr_set_addr,
628 x86_linux_dr_get_addr,
629 x86_linux_dr_get_status,
630 x86_linux_dr_get_control,
42995dbd
GB
631 sizeof (void *),
632 };
aa5ca48f 633\f
90d74c30 634/* Breakpoint/Watchpoint support. */
aa5ca48f 635
007c9b97
TBA
636bool
637x86_target::supports_z_point_type (char z_type)
802e8e6d
PA
638{
639 switch (z_type)
640 {
641 case Z_PACKET_SW_BP:
642 case Z_PACKET_HW_BP:
643 case Z_PACKET_WRITE_WP:
644 case Z_PACKET_ACCESS_WP:
007c9b97 645 return true;
802e8e6d 646 default:
007c9b97 647 return false;
802e8e6d
PA
648 }
649}
650
9db9aa23
TBA
651int
652x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
653 int size, raw_breakpoint *bp)
aa5ca48f
DE
654{
655 struct process_info *proc = current_process ();
802e8e6d 656
aa5ca48f
DE
657 switch (type)
658 {
802e8e6d
PA
659 case raw_bkpt_type_hw:
660 case raw_bkpt_type_write_wp:
661 case raw_bkpt_type_access_wp:
a4165e94 662 {
802e8e6d
PA
663 enum target_hw_bp_type hw_type
664 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 665 struct x86_debug_reg_state *state
fe978cb0 666 = &proc->priv->arch_private->debug_reg_state;
a4165e94 667
df7e5265 668 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
a4165e94 669 }
961bd387 670
aa5ca48f
DE
671 default:
672 /* Unsupported. */
673 return 1;
674 }
675}
676
9db9aa23
TBA
677int
678x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
679 int size, raw_breakpoint *bp)
aa5ca48f
DE
680{
681 struct process_info *proc = current_process ();
802e8e6d 682
aa5ca48f
DE
683 switch (type)
684 {
802e8e6d
PA
685 case raw_bkpt_type_hw:
686 case raw_bkpt_type_write_wp:
687 case raw_bkpt_type_access_wp:
a4165e94 688 {
802e8e6d
PA
689 enum target_hw_bp_type hw_type
690 = raw_bkpt_type_to_target_hw_bp_type (type);
df7e5265 691 struct x86_debug_reg_state *state
fe978cb0 692 = &proc->priv->arch_private->debug_reg_state;
a4165e94 693
df7e5265 694 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
a4165e94 695 }
aa5ca48f
DE
696 default:
697 /* Unsupported. */
698 return 1;
699 }
700}
701
ac1bbaca
TBA
702bool
703x86_target::low_stopped_by_watchpoint ()
aa5ca48f
DE
704{
705 struct process_info *proc = current_process ();
fe978cb0 706 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
aa5ca48f
DE
707}
708
ac1bbaca
TBA
709CORE_ADDR
710x86_target::low_stopped_data_address ()
aa5ca48f
DE
711{
712 struct process_info *proc = current_process ();
713 CORE_ADDR addr;
fe978cb0 714 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
df7e5265 715 &addr))
aa5ca48f
DE
716 return addr;
717 return 0;
718}
719\f
720/* Called when a new process is created. */
721
fd000fb3
TBA
722arch_process_info *
723x86_target::low_new_process ()
aa5ca48f 724{
ed859da7 725 struct arch_process_info *info = XCNEW (struct arch_process_info);
aa5ca48f 726
df7e5265 727 x86_low_init_dregs (&info->debug_reg_state);
aa5ca48f
DE
728
729 return info;
730}
731
04ec7890
SM
732/* Called when a process is being deleted. */
733
fd000fb3
TBA
734void
735x86_target::low_delete_process (arch_process_info *info)
04ec7890
SM
736{
737 xfree (info);
738}
739
fd000fb3
TBA
740void
741x86_target::low_new_thread (lwp_info *lwp)
742{
743 /* This comes from nat/. */
744 x86_linux_new_thread (lwp);
745}
3a8a0396 746
fd000fb3
TBA
747void
748x86_target::low_delete_thread (arch_lwp_info *alwp)
749{
750 /* This comes from nat/. */
751 x86_linux_delete_thread (alwp);
752}
753
754/* Target routine for new_fork. */
755
756void
757x86_target::low_new_fork (process_info *parent, process_info *child)
3a8a0396
DB
758{
759 /* These are allocated by linux_add_process. */
760 gdb_assert (parent->priv != NULL
761 && parent->priv->arch_private != NULL);
762 gdb_assert (child->priv != NULL
763 && child->priv->arch_private != NULL);
764
765 /* Linux kernel before 2.6.33 commit
766 72f674d203cd230426437cdcf7dd6f681dad8b0d
767 will inherit hardware debug registers from parent
768 on fork/vfork/clone. Newer Linux kernels create such tasks with
769 zeroed debug registers.
770
771 GDB core assumes the child inherits the watchpoints/hw
772 breakpoints of the parent, and will remove them all from the
773 forked off process. Copy the debug registers mirrors into the
774 new process so that all breakpoints and watchpoints can be
775 removed together. The debug registers mirror will become zeroed
776 in the end before detaching the forked off process, thus making
777 this compatible with older Linux kernels too. */
778
779 *child->priv->arch_private = *parent->priv->arch_private;
780}
781
d7599cc0
TBA
782void
783x86_target::low_prepare_to_resume (lwp_info *lwp)
784{
785 /* This comes from nat/. */
786 x86_linux_prepare_to_resume (lwp);
787}
788
70a0bb6b
GB
789/* See nat/x86-dregs.h. */
790
791struct x86_debug_reg_state *
792x86_debug_reg_state (pid_t pid)
793{
794 struct process_info *proc = find_process_pid (pid);
795
796 return &proc->priv->arch_private->debug_reg_state;
797}
aa5ca48f 798\f
d0722149
DE
799/* When GDBSERVER is built as a 64-bit application on linux, the
800 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
801 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
802 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
803 conversion in-place ourselves. */
804
9cf12d57 805/* Convert a ptrace/host siginfo object, into/from the siginfo in the
d0722149
DE
806 layout of the inferiors' architecture. Returns true if any
807 conversion was done; false otherwise. If DIRECTION is 1, then copy
9cf12d57 808 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
d0722149
DE
809 INF. */
810
cb63de7c
TBA
811bool
812x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
d0722149
DE
813{
814#ifdef __x86_64__
760256f9 815 unsigned int machine;
0bfdf32f 816 int tid = lwpid_of (current_thread);
760256f9
PA
817 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
818
d0722149 819 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
4855cbdc 820 if (!is_64bit_tdesc (current_thread))
9cf12d57 821 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 822 FIXUP_32);
c92b5177 823 /* No fixup for native x32 GDB. */
760256f9 824 else if (!is_elf64 && sizeof (void *) == 8)
9cf12d57 825 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
c23bbc1c 826 FIXUP_X32);
d0722149
DE
827#endif
828
cb63de7c 829 return false;
d0722149
DE
830}
831\f
1570b33e
L
832static int use_xml;
833
3aee8918
PA
834/* Format of XSAVE extended state is:
835 struct
836 {
837 fxsave_bytes[0..463]
838 sw_usable_bytes[464..511]
839 xstate_hdr_bytes[512..575]
840 avx_bytes[576..831]
841 future_state etc
842 };
843
844 Same memory layout will be used for the coredump NT_X86_XSTATE
845 representing the XSAVE extended state registers.
846
847 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
848 extended state mask, which is the same as the extended control register
849 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
850 together with the mask saved in the xstate_hdr_bytes to determine what
851 states the processor/OS supports and what state, used or initialized,
852 the process/thread is in. */
853#define I386_LINUX_XSAVE_XCR0_OFFSET 464
854
855/* Does the current host support the GETFPXREGS request? The header
856 file may or may not define it, and even if it is defined, the
857 kernel will return EIO if it's running on a pre-SSE processor. */
858int have_ptrace_getfpxregs =
859#ifdef HAVE_PTRACE_GETFPXREGS
860 -1
861#else
862 0
863#endif
864;
1570b33e 865
3aee8918
PA
866/* Get Linux/x86 target description from running target. */
867
868static const struct target_desc *
869x86_linux_read_description (void)
1570b33e 870{
3aee8918
PA
871 unsigned int machine;
872 int is_elf64;
a196ebeb 873 int xcr0_features;
3aee8918
PA
874 int tid;
875 static uint64_t xcr0;
3a13a53b 876 struct regset_info *regset;
1570b33e 877
0bfdf32f 878 tid = lwpid_of (current_thread);
1570b33e 879
3aee8918 880 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
45ba0d02 881
3aee8918 882 if (sizeof (void *) == 4)
3a13a53b 883 {
3aee8918
PA
884 if (is_elf64 > 0)
885 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
886#ifndef __x86_64__
887 else if (machine == EM_X86_64)
888 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
889#endif
890 }
3a13a53b 891
3aee8918
PA
892#if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
893 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
894 {
895 elf_fpxregset_t fpxregs;
3a13a53b 896
3aee8918 897 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
3a13a53b 898 {
3aee8918
PA
899 have_ptrace_getfpxregs = 0;
900 have_ptrace_getregset = 0;
f49ff000 901 return i386_linux_read_description (X86_XSTATE_X87);
3a13a53b 902 }
3aee8918
PA
903 else
904 have_ptrace_getfpxregs = 1;
3a13a53b 905 }
1570b33e
L
906#endif
907
908 if (!use_xml)
909 {
df7e5265 910 x86_xcr0 = X86_XSTATE_SSE_MASK;
3aee8918 911
1570b33e
L
912 /* Don't use XML. */
913#ifdef __x86_64__
3aee8918 914 if (machine == EM_X86_64)
51a948fd 915 return tdesc_amd64_linux_no_xml.get ();
1570b33e 916 else
1570b33e 917#endif
51a948fd 918 return tdesc_i386_linux_no_xml.get ();
1570b33e
L
919 }
920
1570b33e
L
921 if (have_ptrace_getregset == -1)
922 {
df7e5265 923 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1570b33e 924 struct iovec iov;
1570b33e
L
925
926 iov.iov_base = xstateregs;
927 iov.iov_len = sizeof (xstateregs);
928
929 /* Check if PTRACE_GETREGSET works. */
3aee8918
PA
930 if (ptrace (PTRACE_GETREGSET, tid,
931 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
932 have_ptrace_getregset = 0;
933 else
1570b33e 934 {
3aee8918
PA
935 have_ptrace_getregset = 1;
936
937 /* Get XCR0 from XSAVE extended state. */
938 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
939 / sizeof (uint64_t))];
940
941 /* Use PTRACE_GETREGSET if it is available. */
942 for (regset = x86_regsets;
943 regset->fill_function != NULL; regset++)
944 if (regset->get_request == PTRACE_GETREGSET)
df7e5265 945 regset->size = X86_XSTATE_SIZE (xcr0);
3aee8918
PA
946 else if (regset->type != GENERAL_REGS)
947 regset->size = 0;
1570b33e 948 }
1570b33e
L
949 }
950
3aee8918 951 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
a196ebeb 952 xcr0_features = (have_ptrace_getregset
2e1e43e1 953 && (xcr0 & X86_XSTATE_ALL_MASK));
3aee8918 954
a196ebeb 955 if (xcr0_features)
3aee8918 956 x86_xcr0 = xcr0;
1570b33e 957
3aee8918
PA
958 if (machine == EM_X86_64)
959 {
1570b33e 960#ifdef __x86_64__
b4570e4b 961 const target_desc *tdesc = NULL;
a196ebeb 962
b4570e4b 963 if (xcr0_features)
3aee8918 964 {
b4570e4b
YQ
965 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
966 !is_elf64);
1570b33e 967 }
b4570e4b
YQ
968
969 if (tdesc == NULL)
970 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
971 return tdesc;
3aee8918 972#endif
1570b33e 973 }
3aee8918
PA
974 else
975 {
f49ff000 976 const target_desc *tdesc = NULL;
a1fa17ee 977
f49ff000
YQ
978 if (xcr0_features)
979 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
2b863f51 980
f49ff000
YQ
981 if (tdesc == NULL)
982 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
a196ebeb 983
f49ff000 984 return tdesc;
3aee8918
PA
985 }
986
987 gdb_assert_not_reached ("failed to return tdesc");
988}
989
3aee8918
PA
990/* Update all the target description of all processes; a new GDB
991 connected, and it may or not support xml target descriptions. */
992
797bcff5
TBA
993void
994x86_target::update_xmltarget ()
3aee8918 995{
24583e45 996 scoped_restore_current_thread restore_thread;
3aee8918
PA
997
998 /* Before changing the register cache's internal layout, flush the
999 contents of the current valid caches back to the threads, and
1000 release the current regcache objects. */
1001 regcache_release ();
1002
797bcff5 1003 for_each_process ([this] (process_info *proc) {
9179355e
SM
1004 int pid = proc->pid;
1005
1006 /* Look up any thread of this process. */
24583e45 1007 switch_to_thread (find_any_thread_of_pid (pid));
9179355e 1008
797bcff5 1009 low_arch_setup ();
9179355e 1010 });
1570b33e
L
1011}
1012
1013/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1014 PTRACE_GETREGSET. */
1015
a5b5da92 1016void
b315b67d 1017x86_target::process_qsupported (gdb::array_view<const char * const> features)
1570b33e
L
1018{
1019 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1020 with "i386" in qSupported query, it supports x86 XML target
1021 descriptions. */
1022 use_xml = 0;
1570b33e 1023
b315b67d
SM
1024 for (const char *feature : features)
1025 {
06e03fff 1026 if (startswith (feature, "xmlRegisters="))
1570b33e 1027 {
06e03fff 1028 char *copy = xstrdup (feature + 13);
06e03fff 1029
ca3a04f6
CB
1030 char *saveptr;
1031 for (char *p = strtok_r (copy, ",", &saveptr);
1032 p != NULL;
1033 p = strtok_r (NULL, ",", &saveptr))
1570b33e 1034 {
06e03fff
PA
1035 if (strcmp (p, "i386") == 0)
1036 {
1037 use_xml = 1;
1038 break;
1039 }
1570b33e 1040 }
1570b33e 1041
06e03fff
PA
1042 free (copy);
1043 }
1570b33e 1044 }
b315b67d 1045
a5b5da92 1046 update_xmltarget ();
1570b33e
L
1047}
1048
3aee8918 1049/* Common for x86/x86-64. */
d0722149 1050
3aee8918
PA
1051static struct regsets_info x86_regsets_info =
1052 {
1053 x86_regsets, /* regsets */
1054 0, /* num_regsets */
1055 NULL, /* disabled_regsets */
1056 };
214d508e
L
1057
1058#ifdef __x86_64__
3aee8918
PA
1059static struct regs_info amd64_linux_regs_info =
1060 {
1061 NULL, /* regset_bitmap */
1062 NULL, /* usrregs_info */
1063 &x86_regsets_info
1064 };
d0722149 1065#endif
3aee8918
PA
1066static struct usrregs_info i386_linux_usrregs_info =
1067 {
1068 I386_NUM_REGS,
1069 i386_regmap,
1070 };
d0722149 1071
3aee8918
PA
1072static struct regs_info i386_linux_regs_info =
1073 {
1074 NULL, /* regset_bitmap */
1075 &i386_linux_usrregs_info,
1076 &x86_regsets_info
1077 };
d0722149 1078
aa8d21c9
TBA
1079const regs_info *
1080x86_target::get_regs_info ()
3aee8918
PA
1081{
1082#ifdef __x86_64__
4855cbdc 1083 if (is_64bit_tdesc (current_thread))
3aee8918
PA
1084 return &amd64_linux_regs_info;
1085 else
1086#endif
1087 return &i386_linux_regs_info;
1088}
d0722149 1089
3aee8918
PA
1090/* Initialize the target description for the architecture of the
1091 inferior. */
1570b33e 1092
797bcff5
TBA
1093void
1094x86_target::low_arch_setup ()
3aee8918
PA
1095{
1096 current_process ()->tdesc = x86_linux_read_description ();
d0722149
DE
1097}
1098
9eedd27d
TBA
1099bool
1100x86_target::low_supports_catch_syscall ()
1101{
1102 return true;
1103}
1104
82075af2
JS
1105/* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1106 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1107
9eedd27d
TBA
1108void
1109x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
82075af2
JS
1110{
1111 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1112
1113 if (use_64bit)
1114 {
1115 long l_sysno;
82075af2
JS
1116
1117 collect_register_by_name (regcache, "orig_rax", &l_sysno);
82075af2 1118 *sysno = (int) l_sysno;
82075af2
JS
1119 }
1120 else
4cc32bec 1121 collect_register_by_name (regcache, "orig_eax", sysno);
82075af2
JS
1122}
1123
47f70aa7
TBA
1124bool
1125x86_target::supports_tracepoints ()
219f2f23 1126{
47f70aa7 1127 return true;
219f2f23
PA
1128}
1129
fa593d66
PA
1130static void
1131append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1132{
4196ab2a 1133 target_write_memory (*to, buf, len);
fa593d66
PA
1134 *to += len;
1135}
1136
1137static int
a121b7c1 1138push_opcode (unsigned char *buf, const char *op)
fa593d66
PA
1139{
1140 unsigned char *buf_org = buf;
1141
1142 while (1)
1143 {
1144 char *endptr;
1145 unsigned long ul = strtoul (op, &endptr, 16);
1146
1147 if (endptr == op)
1148 break;
1149
1150 *buf++ = ul;
1151 op = endptr;
1152 }
1153
1154 return buf - buf_org;
1155}
1156
1157#ifdef __x86_64__
1158
1159/* Build a jump pad that saves registers and calls a collection
1160 function. Writes a jump instruction to the jump pad to
1161 JJUMPAD_INSN. The caller is responsible to write it in at the
1162 tracepoint address. */
1163
1164static int
1165amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1166 CORE_ADDR collector,
1167 CORE_ADDR lockaddr,
1168 ULONGEST orig_size,
1169 CORE_ADDR *jump_entry,
405f8e94
SS
1170 CORE_ADDR *trampoline,
1171 ULONGEST *trampoline_size,
fa593d66
PA
1172 unsigned char *jjump_pad_insn,
1173 ULONGEST *jjump_pad_insn_size,
1174 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1175 CORE_ADDR *adjusted_insn_addr_end,
1176 char *err)
fa593d66
PA
1177{
1178 unsigned char buf[40];
1179 int i, offset;
f4647387
YQ
1180 int64_t loffset;
1181
fa593d66
PA
1182 CORE_ADDR buildaddr = *jump_entry;
1183
1184 /* Build the jump pad. */
1185
1186 /* First, do tracepoint data collection. Save registers. */
1187 i = 0;
1188 /* Need to ensure stack pointer saved first. */
1189 buf[i++] = 0x54; /* push %rsp */
1190 buf[i++] = 0x55; /* push %rbp */
1191 buf[i++] = 0x57; /* push %rdi */
1192 buf[i++] = 0x56; /* push %rsi */
1193 buf[i++] = 0x52; /* push %rdx */
1194 buf[i++] = 0x51; /* push %rcx */
1195 buf[i++] = 0x53; /* push %rbx */
1196 buf[i++] = 0x50; /* push %rax */
1197 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1198 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1199 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1200 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1201 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1202 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1203 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1204 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1205 buf[i++] = 0x9c; /* pushfq */
c8ef42ee 1206 buf[i++] = 0x48; /* movabs <addr>,%rdi */
fa593d66 1207 buf[i++] = 0xbf;
c8ef42ee
PA
1208 memcpy (buf + i, &tpaddr, 8);
1209 i += 8;
fa593d66
PA
1210 buf[i++] = 0x57; /* push %rdi */
1211 append_insns (&buildaddr, i, buf);
1212
1213 /* Stack space for the collecting_t object. */
1214 i = 0;
1215 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1216 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1217 memcpy (buf + i, &tpoint, 8);
1218 i += 8;
1219 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1220 i += push_opcode (&buf[i],
1221 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1222 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1223 append_insns (&buildaddr, i, buf);
1224
1225 /* spin-lock. */
1226 i = 0;
1227 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1228 memcpy (&buf[i], (void *) &lockaddr, 8);
1229 i += 8;
1230 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1231 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1232 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1233 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1234 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1235 append_insns (&buildaddr, i, buf);
1236
1237 /* Set up the gdb_collect call. */
1238 /* At this point, (stack pointer + 0x18) is the base of our saved
1239 register block. */
1240
1241 i = 0;
1242 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1243 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1244
1245 /* tpoint address may be 64-bit wide. */
1246 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1247 memcpy (buf + i, &tpoint, 8);
1248 i += 8;
1249 append_insns (&buildaddr, i, buf);
1250
1251 /* The collector function being in the shared library, may be
1252 >31-bits away off the jump pad. */
1253 i = 0;
1254 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1255 memcpy (buf + i, &collector, 8);
1256 i += 8;
1257 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1258 append_insns (&buildaddr, i, buf);
1259
1260 /* Clear the spin-lock. */
1261 i = 0;
1262 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1263 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1264 memcpy (buf + i, &lockaddr, 8);
1265 i += 8;
1266 append_insns (&buildaddr, i, buf);
1267
1268 /* Remove stack that had been used for the collect_t object. */
1269 i = 0;
1270 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1271 append_insns (&buildaddr, i, buf);
1272
1273 /* Restore register state. */
1274 i = 0;
1275 buf[i++] = 0x48; /* add $0x8,%rsp */
1276 buf[i++] = 0x83;
1277 buf[i++] = 0xc4;
1278 buf[i++] = 0x08;
1279 buf[i++] = 0x9d; /* popfq */
1280 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1281 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1282 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1283 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1284 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1285 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1286 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1287 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1288 buf[i++] = 0x58; /* pop %rax */
1289 buf[i++] = 0x5b; /* pop %rbx */
1290 buf[i++] = 0x59; /* pop %rcx */
1291 buf[i++] = 0x5a; /* pop %rdx */
1292 buf[i++] = 0x5e; /* pop %rsi */
1293 buf[i++] = 0x5f; /* pop %rdi */
1294 buf[i++] = 0x5d; /* pop %rbp */
1295 buf[i++] = 0x5c; /* pop %rsp */
1296 append_insns (&buildaddr, i, buf);
1297
1298 /* Now, adjust the original instruction to execute in the jump
1299 pad. */
1300 *adjusted_insn_addr = buildaddr;
1301 relocate_instruction (&buildaddr, tpaddr);
1302 *adjusted_insn_addr_end = buildaddr;
1303
1304 /* Finally, write a jump back to the program. */
f4647387
YQ
1305
1306 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1307 if (loffset > INT_MAX || loffset < INT_MIN)
1308 {
1309 sprintf (err,
1310 "E.Jump back from jump pad too far from tracepoint "
1311 "(offset 0x%" PRIx64 " > int32).", loffset);
1312 return 1;
1313 }
1314
1315 offset = (int) loffset;
fa593d66
PA
1316 memcpy (buf, jump_insn, sizeof (jump_insn));
1317 memcpy (buf + 1, &offset, 4);
1318 append_insns (&buildaddr, sizeof (jump_insn), buf);
1319
1320 /* The jump pad is now built. Wire in a jump to our jump pad. This
1321 is always done last (by our caller actually), so that we can
1322 install fast tracepoints with threads running. This relies on
1323 the agent's atomic write support. */
f4647387
YQ
1324 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1325 if (loffset > INT_MAX || loffset < INT_MIN)
1326 {
1327 sprintf (err,
1328 "E.Jump pad too far from tracepoint "
1329 "(offset 0x%" PRIx64 " > int32).", loffset);
1330 return 1;
1331 }
1332
1333 offset = (int) loffset;
1334
fa593d66
PA
1335 memcpy (buf, jump_insn, sizeof (jump_insn));
1336 memcpy (buf + 1, &offset, 4);
1337 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1338 *jjump_pad_insn_size = sizeof (jump_insn);
1339
1340 /* Return the end address of our pad. */
1341 *jump_entry = buildaddr;
1342
1343 return 0;
1344}
1345
1346#endif /* __x86_64__ */
1347
1348/* Build a jump pad that saves registers and calls a collection
1349 function. Writes a jump instruction to the jump pad to
1350 JJUMPAD_INSN. The caller is responsible to write it in at the
1351 tracepoint address. */
1352
1353static int
1354i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1355 CORE_ADDR collector,
1356 CORE_ADDR lockaddr,
1357 ULONGEST orig_size,
1358 CORE_ADDR *jump_entry,
405f8e94
SS
1359 CORE_ADDR *trampoline,
1360 ULONGEST *trampoline_size,
fa593d66
PA
1361 unsigned char *jjump_pad_insn,
1362 ULONGEST *jjump_pad_insn_size,
1363 CORE_ADDR *adjusted_insn_addr,
405f8e94
SS
1364 CORE_ADDR *adjusted_insn_addr_end,
1365 char *err)
fa593d66
PA
1366{
1367 unsigned char buf[0x100];
1368 int i, offset;
1369 CORE_ADDR buildaddr = *jump_entry;
1370
1371 /* Build the jump pad. */
1372
1373 /* First, do tracepoint data collection. Save registers. */
1374 i = 0;
1375 buf[i++] = 0x60; /* pushad */
1376 buf[i++] = 0x68; /* push tpaddr aka $pc */
1377 *((int *)(buf + i)) = (int) tpaddr;
1378 i += 4;
1379 buf[i++] = 0x9c; /* pushf */
1380 buf[i++] = 0x1e; /* push %ds */
1381 buf[i++] = 0x06; /* push %es */
1382 buf[i++] = 0x0f; /* push %fs */
1383 buf[i++] = 0xa0;
1384 buf[i++] = 0x0f; /* push %gs */
1385 buf[i++] = 0xa8;
1386 buf[i++] = 0x16; /* push %ss */
1387 buf[i++] = 0x0e; /* push %cs */
1388 append_insns (&buildaddr, i, buf);
1389
1390 /* Stack space for the collecting_t object. */
1391 i = 0;
1392 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1393
1394 /* Build the object. */
1395 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1396 memcpy (buf + i, &tpoint, 4);
1397 i += 4;
1398 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1399
1400 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1401 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1402 append_insns (&buildaddr, i, buf);
1403
1404 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1405 If we cared for it, this could be using xchg alternatively. */
1406
1407 i = 0;
1408 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1409 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1410 %esp,<lockaddr> */
1411 memcpy (&buf[i], (void *) &lockaddr, 4);
1412 i += 4;
1413 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1414 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1415 append_insns (&buildaddr, i, buf);
1416
1417
1418 /* Set up arguments to the gdb_collect call. */
1419 i = 0;
1420 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1421 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1422 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1423 append_insns (&buildaddr, i, buf);
1424
1425 i = 0;
1426 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1427 append_insns (&buildaddr, i, buf);
1428
1429 i = 0;
1430 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1431 memcpy (&buf[i], (void *) &tpoint, 4);
1432 i += 4;
1433 append_insns (&buildaddr, i, buf);
1434
1435 buf[0] = 0xe8; /* call <reladdr> */
1436 offset = collector - (buildaddr + sizeof (jump_insn));
1437 memcpy (buf + 1, &offset, 4);
1438 append_insns (&buildaddr, 5, buf);
1439 /* Clean up after the call. */
1440 buf[0] = 0x83; /* add $0x8,%esp */
1441 buf[1] = 0xc4;
1442 buf[2] = 0x08;
1443 append_insns (&buildaddr, 3, buf);
1444
1445
1446 /* Clear the spin-lock. This would need the LOCK prefix on older
1447 broken archs. */
1448 i = 0;
1449 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1450 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1451 memcpy (buf + i, &lockaddr, 4);
1452 i += 4;
1453 append_insns (&buildaddr, i, buf);
1454
1455
1456 /* Remove stack that had been used for the collect_t object. */
1457 i = 0;
1458 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1459 append_insns (&buildaddr, i, buf);
1460
1461 i = 0;
1462 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1463 buf[i++] = 0xc4;
1464 buf[i++] = 0x04;
1465 buf[i++] = 0x17; /* pop %ss */
1466 buf[i++] = 0x0f; /* pop %gs */
1467 buf[i++] = 0xa9;
1468 buf[i++] = 0x0f; /* pop %fs */
1469 buf[i++] = 0xa1;
1470 buf[i++] = 0x07; /* pop %es */
405f8e94 1471 buf[i++] = 0x1f; /* pop %ds */
fa593d66
PA
1472 buf[i++] = 0x9d; /* popf */
1473 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1474 buf[i++] = 0xc4;
1475 buf[i++] = 0x04;
1476 buf[i++] = 0x61; /* popad */
1477 append_insns (&buildaddr, i, buf);
1478
1479 /* Now, adjust the original instruction to execute in the jump
1480 pad. */
1481 *adjusted_insn_addr = buildaddr;
1482 relocate_instruction (&buildaddr, tpaddr);
1483 *adjusted_insn_addr_end = buildaddr;
1484
1485 /* Write the jump back to the program. */
1486 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1487 memcpy (buf, jump_insn, sizeof (jump_insn));
1488 memcpy (buf + 1, &offset, 4);
1489 append_insns (&buildaddr, sizeof (jump_insn), buf);
1490
1491 /* The jump pad is now built. Wire in a jump to our jump pad. This
1492 is always done last (by our caller actually), so that we can
1493 install fast tracepoints with threads running. This relies on
1494 the agent's atomic write support. */
405f8e94
SS
1495 if (orig_size == 4)
1496 {
1497 /* Create a trampoline. */
1498 *trampoline_size = sizeof (jump_insn);
1499 if (!claim_trampoline_space (*trampoline_size, trampoline))
1500 {
1501 /* No trampoline space available. */
1502 strcpy (err,
1503 "E.Cannot allocate trampoline space needed for fast "
1504 "tracepoints on 4-byte instructions.");
1505 return 1;
1506 }
1507
1508 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1509 memcpy (buf, jump_insn, sizeof (jump_insn));
1510 memcpy (buf + 1, &offset, 4);
4196ab2a 1511 target_write_memory (*trampoline, buf, sizeof (jump_insn));
405f8e94
SS
1512
1513 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1514 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1515 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1516 memcpy (buf + 2, &offset, 2);
1517 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1518 *jjump_pad_insn_size = sizeof (small_jump_insn);
1519 }
1520 else
1521 {
1522 /* Else use a 32-bit relative jump instruction. */
1523 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1524 memcpy (buf, jump_insn, sizeof (jump_insn));
1525 memcpy (buf + 1, &offset, 4);
1526 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1527 *jjump_pad_insn_size = sizeof (jump_insn);
1528 }
fa593d66
PA
1529
1530 /* Return the end address of our pad. */
1531 *jump_entry = buildaddr;
1532
1533 return 0;
1534}
1535
809a0c35
TBA
1536bool
1537x86_target::supports_fast_tracepoints ()
1538{
1539 return true;
1540}
1541
1542int
1543x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1544 CORE_ADDR tpaddr,
1545 CORE_ADDR collector,
1546 CORE_ADDR lockaddr,
1547 ULONGEST orig_size,
1548 CORE_ADDR *jump_entry,
1549 CORE_ADDR *trampoline,
1550 ULONGEST *trampoline_size,
1551 unsigned char *jjump_pad_insn,
1552 ULONGEST *jjump_pad_insn_size,
1553 CORE_ADDR *adjusted_insn_addr,
1554 CORE_ADDR *adjusted_insn_addr_end,
1555 char *err)
fa593d66
PA
1556{
1557#ifdef __x86_64__
4855cbdc 1558 if (is_64bit_tdesc (current_thread))
fa593d66
PA
1559 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1560 collector, lockaddr,
1561 orig_size, jump_entry,
405f8e94 1562 trampoline, trampoline_size,
fa593d66
PA
1563 jjump_pad_insn,
1564 jjump_pad_insn_size,
1565 adjusted_insn_addr,
405f8e94
SS
1566 adjusted_insn_addr_end,
1567 err);
fa593d66
PA
1568#endif
1569
1570 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1571 collector, lockaddr,
1572 orig_size, jump_entry,
405f8e94 1573 trampoline, trampoline_size,
fa593d66
PA
1574 jjump_pad_insn,
1575 jjump_pad_insn_size,
1576 adjusted_insn_addr,
405f8e94
SS
1577 adjusted_insn_addr_end,
1578 err);
1579}
1580
1581/* Return the minimum instruction length for fast tracepoints on x86/x86-64
1582 architectures. */
1583
809a0c35
TBA
1584int
1585x86_target::get_min_fast_tracepoint_insn_len ()
405f8e94
SS
1586{
1587 static int warned_about_fast_tracepoints = 0;
1588
1589#ifdef __x86_64__
1590 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1591 used for fast tracepoints. */
4855cbdc 1592 if (is_64bit_tdesc (current_thread))
405f8e94
SS
1593 return 5;
1594#endif
1595
58b4daa5 1596 if (agent_loaded_p ())
405f8e94
SS
1597 {
1598 char errbuf[IPA_BUFSIZ];
1599
1600 errbuf[0] = '\0';
1601
1602 /* On x86, if trampolines are available, then 4-byte jump instructions
1603 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1604 with a 4-byte offset are used instead. */
1605 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1606 return 4;
1607 else
1608 {
1609 /* GDB has no channel to explain to user why a shorter fast
1610 tracepoint is not possible, but at least make GDBserver
1611 mention that something has gone awry. */
1612 if (!warned_about_fast_tracepoints)
1613 {
422186a9 1614 warning ("4-byte fast tracepoints not available; %s", errbuf);
405f8e94
SS
1615 warned_about_fast_tracepoints = 1;
1616 }
1617 return 5;
1618 }
1619 }
1620 else
1621 {
1622 /* Indicate that the minimum length is currently unknown since the IPA
1623 has not loaded yet. */
1624 return 0;
1625 }
fa593d66
PA
1626}
1627
6a271cae
PA
1628static void
1629add_insns (unsigned char *start, int len)
1630{
1631 CORE_ADDR buildaddr = current_insn_ptr;
1632
c058728c
SM
1633 threads_debug_printf ("Adding %d bytes of insn at %s",
1634 len, paddress (buildaddr));
6a271cae
PA
1635
1636 append_insns (&buildaddr, len, start);
1637 current_insn_ptr = buildaddr;
1638}
1639
6a271cae
PA
1640/* Our general strategy for emitting code is to avoid specifying raw
1641 bytes whenever possible, and instead copy a block of inline asm
1642 that is embedded in the function. This is a little messy, because
1643 we need to keep the compiler from discarding what looks like dead
1644 code, plus suppress various warnings. */
1645
9e4344e5
PA
1646#define EMIT_ASM(NAME, INSNS) \
1647 do \
1648 { \
1649 extern unsigned char start_ ## NAME, end_ ## NAME; \
1650 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
493e2a69 1651 __asm__ ("jmp end_" #NAME "\n" \
9e4344e5
PA
1652 "\t" "start_" #NAME ":" \
1653 "\t" INSNS "\n" \
1654 "\t" "end_" #NAME ":"); \
1655 } while (0)
6a271cae
PA
1656
1657#ifdef __x86_64__
1658
1659#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1660 do \
1661 { \
1662 extern unsigned char start_ ## NAME, end_ ## NAME; \
1663 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1664 __asm__ (".code32\n" \
1665 "\t" "jmp end_" #NAME "\n" \
1666 "\t" "start_" #NAME ":\n" \
1667 "\t" INSNS "\n" \
1668 "\t" "end_" #NAME ":\n" \
1669 ".code64\n"); \
1670 } while (0)
6a271cae
PA
1671
1672#else
1673
1674#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1675
1676#endif
1677
1678#ifdef __x86_64__
1679
1680static void
1681amd64_emit_prologue (void)
1682{
1683 EMIT_ASM (amd64_prologue,
1684 "pushq %rbp\n\t"
1685 "movq %rsp,%rbp\n\t"
1686 "sub $0x20,%rsp\n\t"
1687 "movq %rdi,-8(%rbp)\n\t"
1688 "movq %rsi,-16(%rbp)");
1689}
1690
1691
1692static void
1693amd64_emit_epilogue (void)
1694{
1695 EMIT_ASM (amd64_epilogue,
1696 "movq -16(%rbp),%rdi\n\t"
1697 "movq %rax,(%rdi)\n\t"
1698 "xor %rax,%rax\n\t"
1699 "leave\n\t"
1700 "ret");
1701}
1702
1703static void
1704amd64_emit_add (void)
1705{
1706 EMIT_ASM (amd64_add,
1707 "add (%rsp),%rax\n\t"
1708 "lea 0x8(%rsp),%rsp");
1709}
1710
1711static void
1712amd64_emit_sub (void)
1713{
1714 EMIT_ASM (amd64_sub,
1715 "sub %rax,(%rsp)\n\t"
1716 "pop %rax");
1717}
1718
1719static void
1720amd64_emit_mul (void)
1721{
1722 emit_error = 1;
1723}
1724
1725static void
1726amd64_emit_lsh (void)
1727{
1728 emit_error = 1;
1729}
1730
1731static void
1732amd64_emit_rsh_signed (void)
1733{
1734 emit_error = 1;
1735}
1736
1737static void
1738amd64_emit_rsh_unsigned (void)
1739{
1740 emit_error = 1;
1741}
1742
1743static void
1744amd64_emit_ext (int arg)
1745{
1746 switch (arg)
1747 {
1748 case 8:
1749 EMIT_ASM (amd64_ext_8,
1750 "cbtw\n\t"
1751 "cwtl\n\t"
1752 "cltq");
1753 break;
1754 case 16:
1755 EMIT_ASM (amd64_ext_16,
1756 "cwtl\n\t"
1757 "cltq");
1758 break;
1759 case 32:
1760 EMIT_ASM (amd64_ext_32,
1761 "cltq");
1762 break;
1763 default:
1764 emit_error = 1;
1765 }
1766}
1767
1768static void
1769amd64_emit_log_not (void)
1770{
1771 EMIT_ASM (amd64_log_not,
1772 "test %rax,%rax\n\t"
1773 "sete %cl\n\t"
1774 "movzbq %cl,%rax");
1775}
1776
1777static void
1778amd64_emit_bit_and (void)
1779{
1780 EMIT_ASM (amd64_and,
1781 "and (%rsp),%rax\n\t"
1782 "lea 0x8(%rsp),%rsp");
1783}
1784
1785static void
1786amd64_emit_bit_or (void)
1787{
1788 EMIT_ASM (amd64_or,
1789 "or (%rsp),%rax\n\t"
1790 "lea 0x8(%rsp),%rsp");
1791}
1792
1793static void
1794amd64_emit_bit_xor (void)
1795{
1796 EMIT_ASM (amd64_xor,
1797 "xor (%rsp),%rax\n\t"
1798 "lea 0x8(%rsp),%rsp");
1799}
1800
1801static void
1802amd64_emit_bit_not (void)
1803{
1804 EMIT_ASM (amd64_bit_not,
1805 "xorq $0xffffffffffffffff,%rax");
1806}
1807
1808static void
1809amd64_emit_equal (void)
1810{
1811 EMIT_ASM (amd64_equal,
1812 "cmp %rax,(%rsp)\n\t"
1813 "je .Lamd64_equal_true\n\t"
1814 "xor %rax,%rax\n\t"
1815 "jmp .Lamd64_equal_end\n\t"
1816 ".Lamd64_equal_true:\n\t"
1817 "mov $0x1,%rax\n\t"
1818 ".Lamd64_equal_end:\n\t"
1819 "lea 0x8(%rsp),%rsp");
1820}
1821
1822static void
1823amd64_emit_less_signed (void)
1824{
1825 EMIT_ASM (amd64_less_signed,
1826 "cmp %rax,(%rsp)\n\t"
1827 "jl .Lamd64_less_signed_true\n\t"
1828 "xor %rax,%rax\n\t"
1829 "jmp .Lamd64_less_signed_end\n\t"
1830 ".Lamd64_less_signed_true:\n\t"
1831 "mov $1,%rax\n\t"
1832 ".Lamd64_less_signed_end:\n\t"
1833 "lea 0x8(%rsp),%rsp");
1834}
1835
1836static void
1837amd64_emit_less_unsigned (void)
1838{
1839 EMIT_ASM (amd64_less_unsigned,
1840 "cmp %rax,(%rsp)\n\t"
1841 "jb .Lamd64_less_unsigned_true\n\t"
1842 "xor %rax,%rax\n\t"
1843 "jmp .Lamd64_less_unsigned_end\n\t"
1844 ".Lamd64_less_unsigned_true:\n\t"
1845 "mov $1,%rax\n\t"
1846 ".Lamd64_less_unsigned_end:\n\t"
1847 "lea 0x8(%rsp),%rsp");
1848}
1849
1850static void
1851amd64_emit_ref (int size)
1852{
1853 switch (size)
1854 {
1855 case 1:
1856 EMIT_ASM (amd64_ref1,
1857 "movb (%rax),%al");
1858 break;
1859 case 2:
1860 EMIT_ASM (amd64_ref2,
1861 "movw (%rax),%ax");
1862 break;
1863 case 4:
1864 EMIT_ASM (amd64_ref4,
1865 "movl (%rax),%eax");
1866 break;
1867 case 8:
1868 EMIT_ASM (amd64_ref8,
1869 "movq (%rax),%rax");
1870 break;
1871 }
1872}
1873
1874static void
1875amd64_emit_if_goto (int *offset_p, int *size_p)
1876{
1877 EMIT_ASM (amd64_if_goto,
1878 "mov %rax,%rcx\n\t"
1879 "pop %rax\n\t"
1880 "cmp $0,%rcx\n\t"
1881 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1882 if (offset_p)
1883 *offset_p = 10;
1884 if (size_p)
1885 *size_p = 4;
1886}
1887
1888static void
1889amd64_emit_goto (int *offset_p, int *size_p)
1890{
1891 EMIT_ASM (amd64_goto,
1892 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1893 if (offset_p)
1894 *offset_p = 1;
1895 if (size_p)
1896 *size_p = 4;
1897}
1898
1899static void
1900amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1901{
1902 int diff = (to - (from + size));
1903 unsigned char buf[sizeof (int)];
1904
1905 if (size != 4)
1906 {
1907 emit_error = 1;
1908 return;
1909 }
1910
1911 memcpy (buf, &diff, sizeof (int));
4196ab2a 1912 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
1913}
1914
1915static void
4e29fb54 1916amd64_emit_const (LONGEST num)
6a271cae
PA
1917{
1918 unsigned char buf[16];
1919 int i;
1920 CORE_ADDR buildaddr = current_insn_ptr;
1921
1922 i = 0;
1923 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
b00ad6ff 1924 memcpy (&buf[i], &num, sizeof (num));
6a271cae
PA
1925 i += 8;
1926 append_insns (&buildaddr, i, buf);
1927 current_insn_ptr = buildaddr;
1928}
1929
1930static void
1931amd64_emit_call (CORE_ADDR fn)
1932{
1933 unsigned char buf[16];
1934 int i;
1935 CORE_ADDR buildaddr;
4e29fb54 1936 LONGEST offset64;
6a271cae
PA
1937
1938 /* The destination function being in the shared library, may be
1939 >31-bits away off the compiled code pad. */
1940
1941 buildaddr = current_insn_ptr;
1942
1943 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1944
1945 i = 0;
1946
1947 if (offset64 > INT_MAX || offset64 < INT_MIN)
1948 {
1949 /* Offset is too large for a call. Use callq, but that requires
1950 a register, so avoid it if possible. Use r10, since it is
1951 call-clobbered, we don't have to push/pop it. */
1952 buf[i++] = 0x48; /* mov $fn,%r10 */
1953 buf[i++] = 0xba;
1954 memcpy (buf + i, &fn, 8);
1955 i += 8;
1956 buf[i++] = 0xff; /* callq *%r10 */
1957 buf[i++] = 0xd2;
1958 }
1959 else
1960 {
1961 int offset32 = offset64; /* we know we can't overflow here. */
ed036b40
PA
1962
1963 buf[i++] = 0xe8; /* call <reladdr> */
6a271cae
PA
1964 memcpy (buf + i, &offset32, 4);
1965 i += 4;
1966 }
1967
1968 append_insns (&buildaddr, i, buf);
1969 current_insn_ptr = buildaddr;
1970}
1971
1972static void
1973amd64_emit_reg (int reg)
1974{
1975 unsigned char buf[16];
1976 int i;
1977 CORE_ADDR buildaddr;
1978
1979 /* Assume raw_regs is still in %rdi. */
1980 buildaddr = current_insn_ptr;
1981 i = 0;
1982 buf[i++] = 0xbe; /* mov $<n>,%esi */
b00ad6ff 1983 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
1984 i += 4;
1985 append_insns (&buildaddr, i, buf);
1986 current_insn_ptr = buildaddr;
1987 amd64_emit_call (get_raw_reg_func_addr ());
1988}
1989
1990static void
1991amd64_emit_pop (void)
1992{
1993 EMIT_ASM (amd64_pop,
1994 "pop %rax");
1995}
1996
1997static void
1998amd64_emit_stack_flush (void)
1999{
2000 EMIT_ASM (amd64_stack_flush,
2001 "push %rax");
2002}
2003
2004static void
2005amd64_emit_zero_ext (int arg)
2006{
2007 switch (arg)
2008 {
2009 case 8:
2010 EMIT_ASM (amd64_zero_ext_8,
2011 "and $0xff,%rax");
2012 break;
2013 case 16:
2014 EMIT_ASM (amd64_zero_ext_16,
2015 "and $0xffff,%rax");
2016 break;
2017 case 32:
2018 EMIT_ASM (amd64_zero_ext_32,
2019 "mov $0xffffffff,%rcx\n\t"
2020 "and %rcx,%rax");
2021 break;
2022 default:
2023 emit_error = 1;
2024 }
2025}
2026
2027static void
2028amd64_emit_swap (void)
2029{
2030 EMIT_ASM (amd64_swap,
2031 "mov %rax,%rcx\n\t"
2032 "pop %rax\n\t"
2033 "push %rcx");
2034}
2035
2036static void
2037amd64_emit_stack_adjust (int n)
2038{
2039 unsigned char buf[16];
2040 int i;
2041 CORE_ADDR buildaddr = current_insn_ptr;
2042
2043 i = 0;
2044 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2045 buf[i++] = 0x8d;
2046 buf[i++] = 0x64;
2047 buf[i++] = 0x24;
2048 /* This only handles adjustments up to 16, but we don't expect any more. */
2049 buf[i++] = n * 8;
2050 append_insns (&buildaddr, i, buf);
2051 current_insn_ptr = buildaddr;
2052}
2053
2054/* FN's prototype is `LONGEST(*fn)(int)'. */
2055
2056static void
2057amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2058{
2059 unsigned char buf[16];
2060 int i;
2061 CORE_ADDR buildaddr;
2062
2063 buildaddr = current_insn_ptr;
2064 i = 0;
2065 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2066 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2067 i += 4;
2068 append_insns (&buildaddr, i, buf);
2069 current_insn_ptr = buildaddr;
2070 amd64_emit_call (fn);
2071}
2072
4e29fb54 2073/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2074
2075static void
2076amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2077{
2078 unsigned char buf[16];
2079 int i;
2080 CORE_ADDR buildaddr;
2081
2082 buildaddr = current_insn_ptr;
2083 i = 0;
2084 buf[i++] = 0xbf; /* movl $<n>,%edi */
b00ad6ff 2085 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2086 i += 4;
2087 append_insns (&buildaddr, i, buf);
2088 current_insn_ptr = buildaddr;
2089 EMIT_ASM (amd64_void_call_2_a,
2090 /* Save away a copy of the stack top. */
2091 "push %rax\n\t"
2092 /* Also pass top as the second argument. */
2093 "mov %rax,%rsi");
2094 amd64_emit_call (fn);
2095 EMIT_ASM (amd64_void_call_2_b,
2096 /* Restore the stack top, %rax may have been trashed. */
2097 "pop %rax");
2098}
2099
df4a0200 2100static void
6b9801d4
SS
2101amd64_emit_eq_goto (int *offset_p, int *size_p)
2102{
2103 EMIT_ASM (amd64_eq,
2104 "cmp %rax,(%rsp)\n\t"
2105 "jne .Lamd64_eq_fallthru\n\t"
2106 "lea 0x8(%rsp),%rsp\n\t"
2107 "pop %rax\n\t"
2108 /* jmp, but don't trust the assembler to choose the right jump */
2109 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2110 ".Lamd64_eq_fallthru:\n\t"
2111 "lea 0x8(%rsp),%rsp\n\t"
2112 "pop %rax");
2113
2114 if (offset_p)
2115 *offset_p = 13;
2116 if (size_p)
2117 *size_p = 4;
2118}
2119
df4a0200 2120static void
6b9801d4
SS
2121amd64_emit_ne_goto (int *offset_p, int *size_p)
2122{
2123 EMIT_ASM (amd64_ne,
2124 "cmp %rax,(%rsp)\n\t"
2125 "je .Lamd64_ne_fallthru\n\t"
2126 "lea 0x8(%rsp),%rsp\n\t"
2127 "pop %rax\n\t"
2128 /* jmp, but don't trust the assembler to choose the right jump */
2129 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2130 ".Lamd64_ne_fallthru:\n\t"
2131 "lea 0x8(%rsp),%rsp\n\t"
2132 "pop %rax");
2133
2134 if (offset_p)
2135 *offset_p = 13;
2136 if (size_p)
2137 *size_p = 4;
2138}
2139
df4a0200 2140static void
6b9801d4
SS
2141amd64_emit_lt_goto (int *offset_p, int *size_p)
2142{
2143 EMIT_ASM (amd64_lt,
2144 "cmp %rax,(%rsp)\n\t"
2145 "jnl .Lamd64_lt_fallthru\n\t"
2146 "lea 0x8(%rsp),%rsp\n\t"
2147 "pop %rax\n\t"
2148 /* jmp, but don't trust the assembler to choose the right jump */
2149 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2150 ".Lamd64_lt_fallthru:\n\t"
2151 "lea 0x8(%rsp),%rsp\n\t"
2152 "pop %rax");
2153
2154 if (offset_p)
2155 *offset_p = 13;
2156 if (size_p)
2157 *size_p = 4;
2158}
2159
df4a0200 2160static void
6b9801d4
SS
2161amd64_emit_le_goto (int *offset_p, int *size_p)
2162{
2163 EMIT_ASM (amd64_le,
2164 "cmp %rax,(%rsp)\n\t"
2165 "jnle .Lamd64_le_fallthru\n\t"
2166 "lea 0x8(%rsp),%rsp\n\t"
2167 "pop %rax\n\t"
2168 /* jmp, but don't trust the assembler to choose the right jump */
2169 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2170 ".Lamd64_le_fallthru:\n\t"
2171 "lea 0x8(%rsp),%rsp\n\t"
2172 "pop %rax");
2173
2174 if (offset_p)
2175 *offset_p = 13;
2176 if (size_p)
2177 *size_p = 4;
2178}
2179
df4a0200 2180static void
6b9801d4
SS
2181amd64_emit_gt_goto (int *offset_p, int *size_p)
2182{
2183 EMIT_ASM (amd64_gt,
2184 "cmp %rax,(%rsp)\n\t"
2185 "jng .Lamd64_gt_fallthru\n\t"
2186 "lea 0x8(%rsp),%rsp\n\t"
2187 "pop %rax\n\t"
2188 /* jmp, but don't trust the assembler to choose the right jump */
2189 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2190 ".Lamd64_gt_fallthru:\n\t"
2191 "lea 0x8(%rsp),%rsp\n\t"
2192 "pop %rax");
2193
2194 if (offset_p)
2195 *offset_p = 13;
2196 if (size_p)
2197 *size_p = 4;
2198}
2199
df4a0200 2200static void
6b9801d4
SS
2201amd64_emit_ge_goto (int *offset_p, int *size_p)
2202{
2203 EMIT_ASM (amd64_ge,
2204 "cmp %rax,(%rsp)\n\t"
2205 "jnge .Lamd64_ge_fallthru\n\t"
2206 ".Lamd64_ge_jump:\n\t"
2207 "lea 0x8(%rsp),%rsp\n\t"
2208 "pop %rax\n\t"
2209 /* jmp, but don't trust the assembler to choose the right jump */
2210 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2211 ".Lamd64_ge_fallthru:\n\t"
2212 "lea 0x8(%rsp),%rsp\n\t"
2213 "pop %rax");
2214
2215 if (offset_p)
2216 *offset_p = 13;
2217 if (size_p)
2218 *size_p = 4;
2219}
2220
6bd434d6 2221static emit_ops amd64_emit_ops =
6a271cae
PA
2222 {
2223 amd64_emit_prologue,
2224 amd64_emit_epilogue,
2225 amd64_emit_add,
2226 amd64_emit_sub,
2227 amd64_emit_mul,
2228 amd64_emit_lsh,
2229 amd64_emit_rsh_signed,
2230 amd64_emit_rsh_unsigned,
2231 amd64_emit_ext,
2232 amd64_emit_log_not,
2233 amd64_emit_bit_and,
2234 amd64_emit_bit_or,
2235 amd64_emit_bit_xor,
2236 amd64_emit_bit_not,
2237 amd64_emit_equal,
2238 amd64_emit_less_signed,
2239 amd64_emit_less_unsigned,
2240 amd64_emit_ref,
2241 amd64_emit_if_goto,
2242 amd64_emit_goto,
2243 amd64_write_goto_address,
2244 amd64_emit_const,
2245 amd64_emit_call,
2246 amd64_emit_reg,
2247 amd64_emit_pop,
2248 amd64_emit_stack_flush,
2249 amd64_emit_zero_ext,
2250 amd64_emit_swap,
2251 amd64_emit_stack_adjust,
2252 amd64_emit_int_call_1,
6b9801d4
SS
2253 amd64_emit_void_call_2,
2254 amd64_emit_eq_goto,
2255 amd64_emit_ne_goto,
2256 amd64_emit_lt_goto,
2257 amd64_emit_le_goto,
2258 amd64_emit_gt_goto,
2259 amd64_emit_ge_goto
6a271cae
PA
2260 };
2261
2262#endif /* __x86_64__ */
2263
2264static void
2265i386_emit_prologue (void)
2266{
2267 EMIT_ASM32 (i386_prologue,
2268 "push %ebp\n\t"
bf15cbda
SS
2269 "mov %esp,%ebp\n\t"
2270 "push %ebx");
6a271cae
PA
2271 /* At this point, the raw regs base address is at 8(%ebp), and the
2272 value pointer is at 12(%ebp). */
2273}
2274
2275static void
2276i386_emit_epilogue (void)
2277{
2278 EMIT_ASM32 (i386_epilogue,
2279 "mov 12(%ebp),%ecx\n\t"
2280 "mov %eax,(%ecx)\n\t"
2281 "mov %ebx,0x4(%ecx)\n\t"
2282 "xor %eax,%eax\n\t"
bf15cbda 2283 "pop %ebx\n\t"
6a271cae
PA
2284 "pop %ebp\n\t"
2285 "ret");
2286}
2287
2288static void
2289i386_emit_add (void)
2290{
2291 EMIT_ASM32 (i386_add,
2292 "add (%esp),%eax\n\t"
2293 "adc 0x4(%esp),%ebx\n\t"
2294 "lea 0x8(%esp),%esp");
2295}
2296
2297static void
2298i386_emit_sub (void)
2299{
2300 EMIT_ASM32 (i386_sub,
2301 "subl %eax,(%esp)\n\t"
2302 "sbbl %ebx,4(%esp)\n\t"
2303 "pop %eax\n\t"
2304 "pop %ebx\n\t");
2305}
2306
2307static void
2308i386_emit_mul (void)
2309{
2310 emit_error = 1;
2311}
2312
2313static void
2314i386_emit_lsh (void)
2315{
2316 emit_error = 1;
2317}
2318
2319static void
2320i386_emit_rsh_signed (void)
2321{
2322 emit_error = 1;
2323}
2324
2325static void
2326i386_emit_rsh_unsigned (void)
2327{
2328 emit_error = 1;
2329}
2330
2331static void
2332i386_emit_ext (int arg)
2333{
2334 switch (arg)
2335 {
2336 case 8:
2337 EMIT_ASM32 (i386_ext_8,
2338 "cbtw\n\t"
2339 "cwtl\n\t"
2340 "movl %eax,%ebx\n\t"
2341 "sarl $31,%ebx");
2342 break;
2343 case 16:
2344 EMIT_ASM32 (i386_ext_16,
2345 "cwtl\n\t"
2346 "movl %eax,%ebx\n\t"
2347 "sarl $31,%ebx");
2348 break;
2349 case 32:
2350 EMIT_ASM32 (i386_ext_32,
2351 "movl %eax,%ebx\n\t"
2352 "sarl $31,%ebx");
2353 break;
2354 default:
2355 emit_error = 1;
2356 }
2357}
2358
2359static void
2360i386_emit_log_not (void)
2361{
2362 EMIT_ASM32 (i386_log_not,
2363 "or %ebx,%eax\n\t"
2364 "test %eax,%eax\n\t"
2365 "sete %cl\n\t"
2366 "xor %ebx,%ebx\n\t"
2367 "movzbl %cl,%eax");
2368}
2369
2370static void
2371i386_emit_bit_and (void)
2372{
2373 EMIT_ASM32 (i386_and,
2374 "and (%esp),%eax\n\t"
2375 "and 0x4(%esp),%ebx\n\t"
2376 "lea 0x8(%esp),%esp");
2377}
2378
2379static void
2380i386_emit_bit_or (void)
2381{
2382 EMIT_ASM32 (i386_or,
2383 "or (%esp),%eax\n\t"
2384 "or 0x4(%esp),%ebx\n\t"
2385 "lea 0x8(%esp),%esp");
2386}
2387
2388static void
2389i386_emit_bit_xor (void)
2390{
2391 EMIT_ASM32 (i386_xor,
2392 "xor (%esp),%eax\n\t"
2393 "xor 0x4(%esp),%ebx\n\t"
2394 "lea 0x8(%esp),%esp");
2395}
2396
2397static void
2398i386_emit_bit_not (void)
2399{
2400 EMIT_ASM32 (i386_bit_not,
2401 "xor $0xffffffff,%eax\n\t"
2402 "xor $0xffffffff,%ebx\n\t");
2403}
2404
2405static void
2406i386_emit_equal (void)
2407{
2408 EMIT_ASM32 (i386_equal,
2409 "cmpl %ebx,4(%esp)\n\t"
2410 "jne .Li386_equal_false\n\t"
2411 "cmpl %eax,(%esp)\n\t"
2412 "je .Li386_equal_true\n\t"
2413 ".Li386_equal_false:\n\t"
2414 "xor %eax,%eax\n\t"
2415 "jmp .Li386_equal_end\n\t"
2416 ".Li386_equal_true:\n\t"
2417 "mov $1,%eax\n\t"
2418 ".Li386_equal_end:\n\t"
2419 "xor %ebx,%ebx\n\t"
2420 "lea 0x8(%esp),%esp");
2421}
2422
2423static void
2424i386_emit_less_signed (void)
2425{
2426 EMIT_ASM32 (i386_less_signed,
2427 "cmpl %ebx,4(%esp)\n\t"
2428 "jl .Li386_less_signed_true\n\t"
2429 "jne .Li386_less_signed_false\n\t"
2430 "cmpl %eax,(%esp)\n\t"
2431 "jl .Li386_less_signed_true\n\t"
2432 ".Li386_less_signed_false:\n\t"
2433 "xor %eax,%eax\n\t"
2434 "jmp .Li386_less_signed_end\n\t"
2435 ".Li386_less_signed_true:\n\t"
2436 "mov $1,%eax\n\t"
2437 ".Li386_less_signed_end:\n\t"
2438 "xor %ebx,%ebx\n\t"
2439 "lea 0x8(%esp),%esp");
2440}
2441
2442static void
2443i386_emit_less_unsigned (void)
2444{
2445 EMIT_ASM32 (i386_less_unsigned,
2446 "cmpl %ebx,4(%esp)\n\t"
2447 "jb .Li386_less_unsigned_true\n\t"
2448 "jne .Li386_less_unsigned_false\n\t"
2449 "cmpl %eax,(%esp)\n\t"
2450 "jb .Li386_less_unsigned_true\n\t"
2451 ".Li386_less_unsigned_false:\n\t"
2452 "xor %eax,%eax\n\t"
2453 "jmp .Li386_less_unsigned_end\n\t"
2454 ".Li386_less_unsigned_true:\n\t"
2455 "mov $1,%eax\n\t"
2456 ".Li386_less_unsigned_end:\n\t"
2457 "xor %ebx,%ebx\n\t"
2458 "lea 0x8(%esp),%esp");
2459}
2460
2461static void
2462i386_emit_ref (int size)
2463{
2464 switch (size)
2465 {
2466 case 1:
2467 EMIT_ASM32 (i386_ref1,
2468 "movb (%eax),%al");
2469 break;
2470 case 2:
2471 EMIT_ASM32 (i386_ref2,
2472 "movw (%eax),%ax");
2473 break;
2474 case 4:
2475 EMIT_ASM32 (i386_ref4,
2476 "movl (%eax),%eax");
2477 break;
2478 case 8:
2479 EMIT_ASM32 (i386_ref8,
2480 "movl 4(%eax),%ebx\n\t"
2481 "movl (%eax),%eax");
2482 break;
2483 }
2484}
2485
2486static void
2487i386_emit_if_goto (int *offset_p, int *size_p)
2488{
2489 EMIT_ASM32 (i386_if_goto,
2490 "mov %eax,%ecx\n\t"
2491 "or %ebx,%ecx\n\t"
2492 "pop %eax\n\t"
2493 "pop %ebx\n\t"
2494 "cmpl $0,%ecx\n\t"
2495 /* Don't trust the assembler to choose the right jump */
2496 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2497
2498 if (offset_p)
2499 *offset_p = 11; /* be sure that this matches the sequence above */
2500 if (size_p)
2501 *size_p = 4;
2502}
2503
2504static void
2505i386_emit_goto (int *offset_p, int *size_p)
2506{
2507 EMIT_ASM32 (i386_goto,
2508 /* Don't trust the assembler to choose the right jump */
2509 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2510 if (offset_p)
2511 *offset_p = 1;
2512 if (size_p)
2513 *size_p = 4;
2514}
2515
2516static void
2517i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2518{
2519 int diff = (to - (from + size));
2520 unsigned char buf[sizeof (int)];
2521
2522 /* We're only doing 4-byte sizes at the moment. */
2523 if (size != 4)
2524 {
2525 emit_error = 1;
2526 return;
2527 }
2528
2529 memcpy (buf, &diff, sizeof (int));
4196ab2a 2530 target_write_memory (from, buf, sizeof (int));
6a271cae
PA
2531}
2532
2533static void
4e29fb54 2534i386_emit_const (LONGEST num)
6a271cae
PA
2535{
2536 unsigned char buf[16];
b00ad6ff 2537 int i, hi, lo;
6a271cae
PA
2538 CORE_ADDR buildaddr = current_insn_ptr;
2539
2540 i = 0;
2541 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff
NF
2542 lo = num & 0xffffffff;
2543 memcpy (&buf[i], &lo, sizeof (lo));
6a271cae
PA
2544 i += 4;
2545 hi = ((num >> 32) & 0xffffffff);
2546 if (hi)
2547 {
2548 buf[i++] = 0xbb; /* mov $<n>,%ebx */
b00ad6ff 2549 memcpy (&buf[i], &hi, sizeof (hi));
6a271cae
PA
2550 i += 4;
2551 }
2552 else
2553 {
2554 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2555 }
2556 append_insns (&buildaddr, i, buf);
2557 current_insn_ptr = buildaddr;
2558}
2559
2560static void
2561i386_emit_call (CORE_ADDR fn)
2562{
2563 unsigned char buf[16];
2564 int i, offset;
2565 CORE_ADDR buildaddr;
2566
2567 buildaddr = current_insn_ptr;
2568 i = 0;
2569 buf[i++] = 0xe8; /* call <reladdr> */
2570 offset = ((int) fn) - (buildaddr + 5);
2571 memcpy (buf + 1, &offset, 4);
2572 append_insns (&buildaddr, 5, buf);
2573 current_insn_ptr = buildaddr;
2574}
2575
2576static void
2577i386_emit_reg (int reg)
2578{
2579 unsigned char buf[16];
2580 int i;
2581 CORE_ADDR buildaddr;
2582
2583 EMIT_ASM32 (i386_reg_a,
2584 "sub $0x8,%esp");
2585 buildaddr = current_insn_ptr;
2586 i = 0;
2587 buf[i++] = 0xb8; /* mov $<n>,%eax */
b00ad6ff 2588 memcpy (&buf[i], &reg, sizeof (reg));
6a271cae
PA
2589 i += 4;
2590 append_insns (&buildaddr, i, buf);
2591 current_insn_ptr = buildaddr;
2592 EMIT_ASM32 (i386_reg_b,
2593 "mov %eax,4(%esp)\n\t"
2594 "mov 8(%ebp),%eax\n\t"
2595 "mov %eax,(%esp)");
2596 i386_emit_call (get_raw_reg_func_addr ());
2597 EMIT_ASM32 (i386_reg_c,
2598 "xor %ebx,%ebx\n\t"
2599 "lea 0x8(%esp),%esp");
2600}
2601
2602static void
2603i386_emit_pop (void)
2604{
2605 EMIT_ASM32 (i386_pop,
2606 "pop %eax\n\t"
2607 "pop %ebx");
2608}
2609
2610static void
2611i386_emit_stack_flush (void)
2612{
2613 EMIT_ASM32 (i386_stack_flush,
2614 "push %ebx\n\t"
2615 "push %eax");
2616}
2617
2618static void
2619i386_emit_zero_ext (int arg)
2620{
2621 switch (arg)
2622 {
2623 case 8:
2624 EMIT_ASM32 (i386_zero_ext_8,
2625 "and $0xff,%eax\n\t"
2626 "xor %ebx,%ebx");
2627 break;
2628 case 16:
2629 EMIT_ASM32 (i386_zero_ext_16,
2630 "and $0xffff,%eax\n\t"
2631 "xor %ebx,%ebx");
2632 break;
2633 case 32:
2634 EMIT_ASM32 (i386_zero_ext_32,
2635 "xor %ebx,%ebx");
2636 break;
2637 default:
2638 emit_error = 1;
2639 }
2640}
2641
2642static void
2643i386_emit_swap (void)
2644{
2645 EMIT_ASM32 (i386_swap,
2646 "mov %eax,%ecx\n\t"
2647 "mov %ebx,%edx\n\t"
2648 "pop %eax\n\t"
2649 "pop %ebx\n\t"
2650 "push %edx\n\t"
2651 "push %ecx");
2652}
2653
2654static void
2655i386_emit_stack_adjust (int n)
2656{
2657 unsigned char buf[16];
2658 int i;
2659 CORE_ADDR buildaddr = current_insn_ptr;
2660
2661 i = 0;
2662 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2663 buf[i++] = 0x64;
2664 buf[i++] = 0x24;
2665 buf[i++] = n * 8;
2666 append_insns (&buildaddr, i, buf);
2667 current_insn_ptr = buildaddr;
2668}
2669
2670/* FN's prototype is `LONGEST(*fn)(int)'. */
2671
2672static void
2673i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2674{
2675 unsigned char buf[16];
2676 int i;
2677 CORE_ADDR buildaddr;
2678
2679 EMIT_ASM32 (i386_int_call_1_a,
2680 /* Reserve a bit of stack space. */
2681 "sub $0x8,%esp");
2682 /* Put the one argument on the stack. */
2683 buildaddr = current_insn_ptr;
2684 i = 0;
2685 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2686 buf[i++] = 0x04;
2687 buf[i++] = 0x24;
b00ad6ff 2688 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2689 i += 4;
2690 append_insns (&buildaddr, i, buf);
2691 current_insn_ptr = buildaddr;
2692 i386_emit_call (fn);
2693 EMIT_ASM32 (i386_int_call_1_c,
2694 "mov %edx,%ebx\n\t"
2695 "lea 0x8(%esp),%esp");
2696}
2697
4e29fb54 2698/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2699
2700static void
2701i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2702{
2703 unsigned char buf[16];
2704 int i;
2705 CORE_ADDR buildaddr;
2706
2707 EMIT_ASM32 (i386_void_call_2_a,
2708 /* Preserve %eax only; we don't have to worry about %ebx. */
2709 "push %eax\n\t"
2710 /* Reserve a bit of stack space for arguments. */
2711 "sub $0x10,%esp\n\t"
2712 /* Copy "top" to the second argument position. (Note that
2713 we can't assume function won't scribble on its
2714 arguments, so don't try to restore from this.) */
2715 "mov %eax,4(%esp)\n\t"
2716 "mov %ebx,8(%esp)");
2717 /* Put the first argument on the stack. */
2718 buildaddr = current_insn_ptr;
2719 i = 0;
2720 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2721 buf[i++] = 0x04;
2722 buf[i++] = 0x24;
b00ad6ff 2723 memcpy (&buf[i], &arg1, sizeof (arg1));
6a271cae
PA
2724 i += 4;
2725 append_insns (&buildaddr, i, buf);
2726 current_insn_ptr = buildaddr;
2727 i386_emit_call (fn);
2728 EMIT_ASM32 (i386_void_call_2_b,
2729 "lea 0x10(%esp),%esp\n\t"
2730 /* Restore original stack top. */
2731 "pop %eax");
2732}
2733
6b9801d4 2734
df4a0200 2735static void
6b9801d4
SS
2736i386_emit_eq_goto (int *offset_p, int *size_p)
2737{
2738 EMIT_ASM32 (eq,
2739 /* Check low half first, more likely to be decider */
2740 "cmpl %eax,(%esp)\n\t"
2741 "jne .Leq_fallthru\n\t"
2742 "cmpl %ebx,4(%esp)\n\t"
2743 "jne .Leq_fallthru\n\t"
2744 "lea 0x8(%esp),%esp\n\t"
2745 "pop %eax\n\t"
2746 "pop %ebx\n\t"
2747 /* jmp, but don't trust the assembler to choose the right jump */
2748 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2749 ".Leq_fallthru:\n\t"
2750 "lea 0x8(%esp),%esp\n\t"
2751 "pop %eax\n\t"
2752 "pop %ebx");
2753
2754 if (offset_p)
2755 *offset_p = 18;
2756 if (size_p)
2757 *size_p = 4;
2758}
2759
df4a0200 2760static void
6b9801d4
SS
2761i386_emit_ne_goto (int *offset_p, int *size_p)
2762{
2763 EMIT_ASM32 (ne,
2764 /* Check low half first, more likely to be decider */
2765 "cmpl %eax,(%esp)\n\t"
2766 "jne .Lne_jump\n\t"
2767 "cmpl %ebx,4(%esp)\n\t"
2768 "je .Lne_fallthru\n\t"
2769 ".Lne_jump:\n\t"
2770 "lea 0x8(%esp),%esp\n\t"
2771 "pop %eax\n\t"
2772 "pop %ebx\n\t"
2773 /* jmp, but don't trust the assembler to choose the right jump */
2774 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2775 ".Lne_fallthru:\n\t"
2776 "lea 0x8(%esp),%esp\n\t"
2777 "pop %eax\n\t"
2778 "pop %ebx");
2779
2780 if (offset_p)
2781 *offset_p = 18;
2782 if (size_p)
2783 *size_p = 4;
2784}
2785
df4a0200 2786static void
6b9801d4
SS
2787i386_emit_lt_goto (int *offset_p, int *size_p)
2788{
2789 EMIT_ASM32 (lt,
2790 "cmpl %ebx,4(%esp)\n\t"
2791 "jl .Llt_jump\n\t"
2792 "jne .Llt_fallthru\n\t"
2793 "cmpl %eax,(%esp)\n\t"
2794 "jnl .Llt_fallthru\n\t"
2795 ".Llt_jump:\n\t"
2796 "lea 0x8(%esp),%esp\n\t"
2797 "pop %eax\n\t"
2798 "pop %ebx\n\t"
2799 /* jmp, but don't trust the assembler to choose the right jump */
2800 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2801 ".Llt_fallthru:\n\t"
2802 "lea 0x8(%esp),%esp\n\t"
2803 "pop %eax\n\t"
2804 "pop %ebx");
2805
2806 if (offset_p)
2807 *offset_p = 20;
2808 if (size_p)
2809 *size_p = 4;
2810}
2811
df4a0200 2812static void
6b9801d4
SS
2813i386_emit_le_goto (int *offset_p, int *size_p)
2814{
2815 EMIT_ASM32 (le,
2816 "cmpl %ebx,4(%esp)\n\t"
2817 "jle .Lle_jump\n\t"
2818 "jne .Lle_fallthru\n\t"
2819 "cmpl %eax,(%esp)\n\t"
2820 "jnle .Lle_fallthru\n\t"
2821 ".Lle_jump:\n\t"
2822 "lea 0x8(%esp),%esp\n\t"
2823 "pop %eax\n\t"
2824 "pop %ebx\n\t"
2825 /* jmp, but don't trust the assembler to choose the right jump */
2826 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2827 ".Lle_fallthru:\n\t"
2828 "lea 0x8(%esp),%esp\n\t"
2829 "pop %eax\n\t"
2830 "pop %ebx");
2831
2832 if (offset_p)
2833 *offset_p = 20;
2834 if (size_p)
2835 *size_p = 4;
2836}
2837
df4a0200 2838static void
6b9801d4
SS
2839i386_emit_gt_goto (int *offset_p, int *size_p)
2840{
2841 EMIT_ASM32 (gt,
2842 "cmpl %ebx,4(%esp)\n\t"
2843 "jg .Lgt_jump\n\t"
2844 "jne .Lgt_fallthru\n\t"
2845 "cmpl %eax,(%esp)\n\t"
2846 "jng .Lgt_fallthru\n\t"
2847 ".Lgt_jump:\n\t"
2848 "lea 0x8(%esp),%esp\n\t"
2849 "pop %eax\n\t"
2850 "pop %ebx\n\t"
2851 /* jmp, but don't trust the assembler to choose the right jump */
2852 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2853 ".Lgt_fallthru:\n\t"
2854 "lea 0x8(%esp),%esp\n\t"
2855 "pop %eax\n\t"
2856 "pop %ebx");
2857
2858 if (offset_p)
2859 *offset_p = 20;
2860 if (size_p)
2861 *size_p = 4;
2862}
2863
df4a0200 2864static void
6b9801d4
SS
2865i386_emit_ge_goto (int *offset_p, int *size_p)
2866{
2867 EMIT_ASM32 (ge,
2868 "cmpl %ebx,4(%esp)\n\t"
2869 "jge .Lge_jump\n\t"
2870 "jne .Lge_fallthru\n\t"
2871 "cmpl %eax,(%esp)\n\t"
2872 "jnge .Lge_fallthru\n\t"
2873 ".Lge_jump:\n\t"
2874 "lea 0x8(%esp),%esp\n\t"
2875 "pop %eax\n\t"
2876 "pop %ebx\n\t"
2877 /* jmp, but don't trust the assembler to choose the right jump */
2878 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2879 ".Lge_fallthru:\n\t"
2880 "lea 0x8(%esp),%esp\n\t"
2881 "pop %eax\n\t"
2882 "pop %ebx");
2883
2884 if (offset_p)
2885 *offset_p = 20;
2886 if (size_p)
2887 *size_p = 4;
2888}
2889
6bd434d6 2890static emit_ops i386_emit_ops =
6a271cae
PA
2891 {
2892 i386_emit_prologue,
2893 i386_emit_epilogue,
2894 i386_emit_add,
2895 i386_emit_sub,
2896 i386_emit_mul,
2897 i386_emit_lsh,
2898 i386_emit_rsh_signed,
2899 i386_emit_rsh_unsigned,
2900 i386_emit_ext,
2901 i386_emit_log_not,
2902 i386_emit_bit_and,
2903 i386_emit_bit_or,
2904 i386_emit_bit_xor,
2905 i386_emit_bit_not,
2906 i386_emit_equal,
2907 i386_emit_less_signed,
2908 i386_emit_less_unsigned,
2909 i386_emit_ref,
2910 i386_emit_if_goto,
2911 i386_emit_goto,
2912 i386_write_goto_address,
2913 i386_emit_const,
2914 i386_emit_call,
2915 i386_emit_reg,
2916 i386_emit_pop,
2917 i386_emit_stack_flush,
2918 i386_emit_zero_ext,
2919 i386_emit_swap,
2920 i386_emit_stack_adjust,
2921 i386_emit_int_call_1,
6b9801d4
SS
2922 i386_emit_void_call_2,
2923 i386_emit_eq_goto,
2924 i386_emit_ne_goto,
2925 i386_emit_lt_goto,
2926 i386_emit_le_goto,
2927 i386_emit_gt_goto,
2928 i386_emit_ge_goto
6a271cae
PA
2929 };
2930
2931
ab64c999
TBA
2932emit_ops *
2933x86_target::emit_ops ()
6a271cae
PA
2934{
2935#ifdef __x86_64__
4855cbdc 2936 if (is_64bit_tdesc (current_thread))
6a271cae
PA
2937 return &amd64_emit_ops;
2938 else
2939#endif
2940 return &i386_emit_ops;
2941}
2942
3ca4edb6 2943/* Implementation of target ops method "sw_breakpoint_from_kind". */
dd373349 2944
3ca4edb6
TBA
2945const gdb_byte *
2946x86_target::sw_breakpoint_from_kind (int kind, int *size)
dd373349
AT
2947{
2948 *size = x86_breakpoint_len;
2949 return x86_breakpoint;
2950}
2951
9cfd8715
TBA
2952bool
2953x86_target::low_supports_range_stepping ()
c2d6af84 2954{
9cfd8715 2955 return true;
c2d6af84
PA
2956}
2957
fc5ecdb6
TBA
2958int
2959x86_target::get_ipa_tdesc_idx ()
ae91f625
MK
2960{
2961 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2962 const struct target_desc *tdesc = regcache->tdesc;
2963
2964#ifdef __x86_64__
b4570e4b 2965 return amd64_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2966#endif
2967
51a948fd 2968 if (tdesc == tdesc_i386_linux_no_xml.get ())
ae91f625 2969 return X86_TDESC_SSE;
ae91f625 2970
f49ff000 2971 return i386_get_ipa_tdesc_idx (tdesc);
ae91f625
MK
2972}
2973
ef0478f6
TBA
2974/* The linux target ops object. */
2975
2976linux_process_target *the_linux_target = &the_x86_target;
2977
3aee8918
PA
2978void
2979initialize_low_arch (void)
2980{
2981 /* Initialize the Linux target descriptions. */
2982#ifdef __x86_64__
cc397f3a 2983 tdesc_amd64_linux_no_xml = allocate_target_description ();
51a948fd 2984 copy_target_description (tdesc_amd64_linux_no_xml.get (),
b4570e4b
YQ
2985 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2986 false));
3aee8918
PA
2987 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2988#endif
f49ff000 2989
cc397f3a 2990 tdesc_i386_linux_no_xml = allocate_target_description ();
51a948fd 2991 copy_target_description (tdesc_i386_linux_no_xml.get (),
f49ff000 2992 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3aee8918
PA
2993 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2994
2995 initialize_regsets_info (&x86_regsets_info);
2996}