]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdbserver/linux-x86-low.cc
gdbserver/linux-low: turn 'get_syscall_trapinfo' into a method
[thirdparty/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2020 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99 class x86_target : public linux_process_target
100 {
101 public:
102
103 const regs_info *get_regs_info () override;
104
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
107 bool supports_z_point_type (char z_type) override;
108
109 void process_qsupported (char **features, int count) override;
110
111 bool supports_tracepoints () override;
112
113 bool supports_fast_tracepoints () override;
114
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
117 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
118 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
119 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
120 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
121 char *err) override;
122
123 int get_min_fast_tracepoint_insn_len () override;
124
125 struct emit_ops *emit_ops () override;
126
127 protected:
128
129 void low_arch_setup () override;
130
131 bool low_cannot_fetch_register (int regno) override;
132
133 bool low_cannot_store_register (int regno) override;
134
135 bool low_supports_breakpoints () override;
136
137 CORE_ADDR low_get_pc (regcache *regcache) override;
138
139 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
140
141 int low_decr_pc_after_break () override;
142
143 bool low_breakpoint_at (CORE_ADDR pc) override;
144
145 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
146 int size, raw_breakpoint *bp) override;
147
148 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
149 int size, raw_breakpoint *bp) override;
150
151 bool low_stopped_by_watchpoint () override;
152
153 CORE_ADDR low_stopped_data_address () override;
154
155 /* collect_ptrace_register/supply_ptrace_register are not needed in the
156 native i386 case (no registers smaller than an xfer unit), and are not
157 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
158
159 /* Need to fix up i386 siginfo if host is amd64. */
160 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
161 int direction) override;
162
163 arch_process_info *low_new_process () override;
164
165 void low_delete_process (arch_process_info *info) override;
166
167 void low_new_thread (lwp_info *) override;
168
169 void low_delete_thread (arch_lwp_info *) override;
170
171 void low_new_fork (process_info *parent, process_info *child) override;
172
173 void low_prepare_to_resume (lwp_info *lwp) override;
174
175 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
176
177 bool low_supports_range_stepping () override;
178
179 bool low_supports_catch_syscall () override;
180
181 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
182
183 private:
184
185 /* Update all the target description of all processes; a new GDB
186 connected, and it may or not support xml target descriptions. */
187 void update_xmltarget ();
188 };
189
190 /* The singleton target ops object. */
191
192 static x86_target the_x86_target;
193
194 /* Per-process arch-specific data we want to keep. */
195
196 struct arch_process_info
197 {
198 struct x86_debug_reg_state debug_reg_state;
199 };
200
201 #ifdef __x86_64__
202
203 /* Mapping between the general-purpose registers in `struct user'
204 format and GDB's register array layout.
205 Note that the transfer layout uses 64-bit regs. */
206 static /*const*/ int i386_regmap[] =
207 {
208 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
209 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
210 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
211 DS * 8, ES * 8, FS * 8, GS * 8
212 };
213
214 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
215
216 /* So code below doesn't have to care, i386 or amd64. */
217 #define ORIG_EAX ORIG_RAX
218 #define REGSIZE 8
219
220 static const int x86_64_regmap[] =
221 {
222 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
223 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
224 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
225 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
226 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
227 DS * 8, ES * 8, FS * 8, GS * 8,
228 -1, -1, -1, -1, -1, -1, -1, -1,
229 -1, -1, -1, -1, -1, -1, -1, -1,
230 -1, -1, -1, -1, -1, -1, -1, -1,
231 -1,
232 -1, -1, -1, -1, -1, -1, -1, -1,
233 ORIG_RAX * 8,
234 #ifdef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
235 21 * 8, 22 * 8,
236 #else
237 -1, -1,
238 #endif
239 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
240 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
241 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
242 -1, -1, -1, -1, -1, -1, -1, -1,
243 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
244 -1, -1, -1, -1, -1, -1, -1, -1,
245 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
246 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
247 -1, -1, -1, -1, -1, -1, -1, -1,
248 -1, -1, -1, -1, -1, -1, -1, -1,
249 -1, -1, -1, -1, -1, -1, -1, -1,
250 -1 /* pkru */
251 };
252
253 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
254 #define X86_64_USER_REGS (GS + 1)
255
256 #else /* ! __x86_64__ */
257
258 /* Mapping between the general-purpose registers in `struct user'
259 format and GDB's register array layout. */
260 static /*const*/ int i386_regmap[] =
261 {
262 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
263 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
264 EIP * 4, EFL * 4, CS * 4, SS * 4,
265 DS * 4, ES * 4, FS * 4, GS * 4
266 };
267
268 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
269
270 #define REGSIZE 4
271
272 #endif
273
274 #ifdef __x86_64__
275
276 /* Returns true if the current inferior belongs to a x86-64 process,
277 per the tdesc. */
278
279 static int
280 is_64bit_tdesc (void)
281 {
282 struct regcache *regcache = get_thread_regcache (current_thread, 0);
283
284 return register_size (regcache->tdesc, 0) == 8;
285 }
286
287 #endif
288
289 \f
290 /* Called by libthread_db. */
291
292 ps_err_e
293 ps_get_thread_area (struct ps_prochandle *ph,
294 lwpid_t lwpid, int idx, void **base)
295 {
296 #ifdef __x86_64__
297 int use_64bit = is_64bit_tdesc ();
298
299 if (use_64bit)
300 {
301 switch (idx)
302 {
303 case FS:
304 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
305 return PS_OK;
306 break;
307 case GS:
308 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
309 return PS_OK;
310 break;
311 default:
312 return PS_BADADDR;
313 }
314 return PS_ERR;
315 }
316 #endif
317
318 {
319 unsigned int desc[4];
320
321 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
322 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
323 return PS_ERR;
324
325 /* Ensure we properly extend the value to 64-bits for x86_64. */
326 *base = (void *) (uintptr_t) desc[1];
327 return PS_OK;
328 }
329 }
330
331 /* Get the thread area address. This is used to recognize which
332 thread is which when tracing with the in-process agent library. We
333 don't read anything from the address, and treat it as opaque; it's
334 the address itself that we assume is unique per-thread. */
335
336 int
337 x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
338 {
339 #ifdef __x86_64__
340 int use_64bit = is_64bit_tdesc ();
341
342 if (use_64bit)
343 {
344 void *base;
345 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
346 {
347 *addr = (CORE_ADDR) (uintptr_t) base;
348 return 0;
349 }
350
351 return -1;
352 }
353 #endif
354
355 {
356 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
357 struct thread_info *thr = get_lwp_thread (lwp);
358 struct regcache *regcache = get_thread_regcache (thr, 1);
359 unsigned int desc[4];
360 ULONGEST gs = 0;
361 const int reg_thread_area = 3; /* bits to scale down register value. */
362 int idx;
363
364 collect_register_by_name (regcache, "gs", &gs);
365
366 idx = gs >> reg_thread_area;
367
368 if (ptrace (PTRACE_GET_THREAD_AREA,
369 lwpid_of (thr),
370 (void *) (long) idx, (unsigned long) &desc) < 0)
371 return -1;
372
373 *addr = desc[1];
374 return 0;
375 }
376 }
377
378
379 \f
380 bool
381 x86_target::low_cannot_store_register (int regno)
382 {
383 #ifdef __x86_64__
384 if (is_64bit_tdesc ())
385 return false;
386 #endif
387
388 return regno >= I386_NUM_REGS;
389 }
390
391 bool
392 x86_target::low_cannot_fetch_register (int regno)
393 {
394 #ifdef __x86_64__
395 if (is_64bit_tdesc ())
396 return false;
397 #endif
398
399 return regno >= I386_NUM_REGS;
400 }
401
402 static void
403 x86_fill_gregset (struct regcache *regcache, void *buf)
404 {
405 int i;
406
407 #ifdef __x86_64__
408 if (register_size (regcache->tdesc, 0) == 8)
409 {
410 for (i = 0; i < X86_64_NUM_REGS; i++)
411 if (x86_64_regmap[i] != -1)
412 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
413
414 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
415 {
416 unsigned long base;
417 int lwpid = lwpid_of (current_thread);
418
419 collect_register_by_name (regcache, "fs_base", &base);
420 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_FS);
421
422 collect_register_by_name (regcache, "gs_base", &base);
423 ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_SET_GS);
424 }
425 #endif
426
427 return;
428 }
429
430 /* 32-bit inferior registers need to be zero-extended.
431 Callers would read uninitialized memory otherwise. */
432 memset (buf, 0x00, X86_64_USER_REGS * 8);
433 #endif
434
435 for (i = 0; i < I386_NUM_REGS; i++)
436 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
437
438 collect_register_by_name (regcache, "orig_eax",
439 ((char *) buf) + ORIG_EAX * REGSIZE);
440
441 #ifdef __x86_64__
442 /* Sign extend EAX value to avoid potential syscall restart
443 problems.
444
445 See amd64_linux_collect_native_gregset() in gdb/amd64-linux-nat.c
446 for a detailed explanation. */
447 if (register_size (regcache->tdesc, 0) == 4)
448 {
449 void *ptr = ((gdb_byte *) buf
450 + i386_regmap[find_regno (regcache->tdesc, "eax")]);
451
452 *(int64_t *) ptr = *(int32_t *) ptr;
453 }
454 #endif
455 }
456
457 static void
458 x86_store_gregset (struct regcache *regcache, const void *buf)
459 {
460 int i;
461
462 #ifdef __x86_64__
463 if (register_size (regcache->tdesc, 0) == 8)
464 {
465 for (i = 0; i < X86_64_NUM_REGS; i++)
466 if (x86_64_regmap[i] != -1)
467 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
468
469 #ifndef HAVE_STRUCT_USER_REGS_STRUCT_FS_BASE
470 {
471 unsigned long base;
472 int lwpid = lwpid_of (current_thread);
473
474 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
475 supply_register_by_name (regcache, "fs_base", &base);
476
477 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_GS) == 0)
478 supply_register_by_name (regcache, "gs_base", &base);
479 }
480 #endif
481 return;
482 }
483 #endif
484
485 for (i = 0; i < I386_NUM_REGS; i++)
486 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
487
488 supply_register_by_name (regcache, "orig_eax",
489 ((char *) buf) + ORIG_EAX * REGSIZE);
490 }
491
492 static void
493 x86_fill_fpregset (struct regcache *regcache, void *buf)
494 {
495 #ifdef __x86_64__
496 i387_cache_to_fxsave (regcache, buf);
497 #else
498 i387_cache_to_fsave (regcache, buf);
499 #endif
500 }
501
502 static void
503 x86_store_fpregset (struct regcache *regcache, const void *buf)
504 {
505 #ifdef __x86_64__
506 i387_fxsave_to_cache (regcache, buf);
507 #else
508 i387_fsave_to_cache (regcache, buf);
509 #endif
510 }
511
512 #ifndef __x86_64__
513
514 static void
515 x86_fill_fpxregset (struct regcache *regcache, void *buf)
516 {
517 i387_cache_to_fxsave (regcache, buf);
518 }
519
520 static void
521 x86_store_fpxregset (struct regcache *regcache, const void *buf)
522 {
523 i387_fxsave_to_cache (regcache, buf);
524 }
525
526 #endif
527
528 static void
529 x86_fill_xstateregset (struct regcache *regcache, void *buf)
530 {
531 i387_cache_to_xsave (regcache, buf);
532 }
533
534 static void
535 x86_store_xstateregset (struct regcache *regcache, const void *buf)
536 {
537 i387_xsave_to_cache (regcache, buf);
538 }
539
540 /* ??? The non-biarch i386 case stores all the i387 regs twice.
541 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
542 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
543 doesn't work. IWBN to avoid the duplication in the case where it
544 does work. Maybe the arch_setup routine could check whether it works
545 and update the supported regsets accordingly. */
546
547 static struct regset_info x86_regsets[] =
548 {
549 #ifdef HAVE_PTRACE_GETREGS
550 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
551 GENERAL_REGS,
552 x86_fill_gregset, x86_store_gregset },
553 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
554 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
555 # ifndef __x86_64__
556 # ifdef HAVE_PTRACE_GETFPXREGS
557 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
558 EXTENDED_REGS,
559 x86_fill_fpxregset, x86_store_fpxregset },
560 # endif
561 # endif
562 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
563 FP_REGS,
564 x86_fill_fpregset, x86_store_fpregset },
565 #endif /* HAVE_PTRACE_GETREGS */
566 NULL_REGSET
567 };
568
569 bool
570 x86_target::low_supports_breakpoints ()
571 {
572 return true;
573 }
574
575 CORE_ADDR
576 x86_target::low_get_pc (regcache *regcache)
577 {
578 int use_64bit = register_size (regcache->tdesc, 0) == 8;
579
580 if (use_64bit)
581 {
582 uint64_t pc;
583
584 collect_register_by_name (regcache, "rip", &pc);
585 return (CORE_ADDR) pc;
586 }
587 else
588 {
589 uint32_t pc;
590
591 collect_register_by_name (regcache, "eip", &pc);
592 return (CORE_ADDR) pc;
593 }
594 }
595
596 void
597 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
598 {
599 int use_64bit = register_size (regcache->tdesc, 0) == 8;
600
601 if (use_64bit)
602 {
603 uint64_t newpc = pc;
604
605 supply_register_by_name (regcache, "rip", &newpc);
606 }
607 else
608 {
609 uint32_t newpc = pc;
610
611 supply_register_by_name (regcache, "eip", &newpc);
612 }
613 }
614
615 int
616 x86_target::low_decr_pc_after_break ()
617 {
618 return 1;
619 }
620
621 \f
622 static const gdb_byte x86_breakpoint[] = { 0xCC };
623 #define x86_breakpoint_len 1
624
625 bool
626 x86_target::low_breakpoint_at (CORE_ADDR pc)
627 {
628 unsigned char c;
629
630 read_memory (pc, &c, 1);
631 if (c == 0xCC)
632 return true;
633
634 return false;
635 }
636 \f
637 /* Low-level function vector. */
638 struct x86_dr_low_type x86_dr_low =
639 {
640 x86_linux_dr_set_control,
641 x86_linux_dr_set_addr,
642 x86_linux_dr_get_addr,
643 x86_linux_dr_get_status,
644 x86_linux_dr_get_control,
645 sizeof (void *),
646 };
647 \f
648 /* Breakpoint/Watchpoint support. */
649
650 bool
651 x86_target::supports_z_point_type (char z_type)
652 {
653 switch (z_type)
654 {
655 case Z_PACKET_SW_BP:
656 case Z_PACKET_HW_BP:
657 case Z_PACKET_WRITE_WP:
658 case Z_PACKET_ACCESS_WP:
659 return true;
660 default:
661 return false;
662 }
663 }
664
665 int
666 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
667 int size, raw_breakpoint *bp)
668 {
669 struct process_info *proc = current_process ();
670
671 switch (type)
672 {
673 case raw_bkpt_type_hw:
674 case raw_bkpt_type_write_wp:
675 case raw_bkpt_type_access_wp:
676 {
677 enum target_hw_bp_type hw_type
678 = raw_bkpt_type_to_target_hw_bp_type (type);
679 struct x86_debug_reg_state *state
680 = &proc->priv->arch_private->debug_reg_state;
681
682 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
683 }
684
685 default:
686 /* Unsupported. */
687 return 1;
688 }
689 }
690
691 int
692 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
693 int size, raw_breakpoint *bp)
694 {
695 struct process_info *proc = current_process ();
696
697 switch (type)
698 {
699 case raw_bkpt_type_hw:
700 case raw_bkpt_type_write_wp:
701 case raw_bkpt_type_access_wp:
702 {
703 enum target_hw_bp_type hw_type
704 = raw_bkpt_type_to_target_hw_bp_type (type);
705 struct x86_debug_reg_state *state
706 = &proc->priv->arch_private->debug_reg_state;
707
708 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
709 }
710 default:
711 /* Unsupported. */
712 return 1;
713 }
714 }
715
716 bool
717 x86_target::low_stopped_by_watchpoint ()
718 {
719 struct process_info *proc = current_process ();
720 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
721 }
722
723 CORE_ADDR
724 x86_target::low_stopped_data_address ()
725 {
726 struct process_info *proc = current_process ();
727 CORE_ADDR addr;
728 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
729 &addr))
730 return addr;
731 return 0;
732 }
733 \f
734 /* Called when a new process is created. */
735
736 arch_process_info *
737 x86_target::low_new_process ()
738 {
739 struct arch_process_info *info = XCNEW (struct arch_process_info);
740
741 x86_low_init_dregs (&info->debug_reg_state);
742
743 return info;
744 }
745
746 /* Called when a process is being deleted. */
747
748 void
749 x86_target::low_delete_process (arch_process_info *info)
750 {
751 xfree (info);
752 }
753
754 void
755 x86_target::low_new_thread (lwp_info *lwp)
756 {
757 /* This comes from nat/. */
758 x86_linux_new_thread (lwp);
759 }
760
761 void
762 x86_target::low_delete_thread (arch_lwp_info *alwp)
763 {
764 /* This comes from nat/. */
765 x86_linux_delete_thread (alwp);
766 }
767
768 /* Target routine for new_fork. */
769
770 void
771 x86_target::low_new_fork (process_info *parent, process_info *child)
772 {
773 /* These are allocated by linux_add_process. */
774 gdb_assert (parent->priv != NULL
775 && parent->priv->arch_private != NULL);
776 gdb_assert (child->priv != NULL
777 && child->priv->arch_private != NULL);
778
779 /* Linux kernel before 2.6.33 commit
780 72f674d203cd230426437cdcf7dd6f681dad8b0d
781 will inherit hardware debug registers from parent
782 on fork/vfork/clone. Newer Linux kernels create such tasks with
783 zeroed debug registers.
784
785 GDB core assumes the child inherits the watchpoints/hw
786 breakpoints of the parent, and will remove them all from the
787 forked off process. Copy the debug registers mirrors into the
788 new process so that all breakpoints and watchpoints can be
789 removed together. The debug registers mirror will become zeroed
790 in the end before detaching the forked off process, thus making
791 this compatible with older Linux kernels too. */
792
793 *child->priv->arch_private = *parent->priv->arch_private;
794 }
795
796 void
797 x86_target::low_prepare_to_resume (lwp_info *lwp)
798 {
799 /* This comes from nat/. */
800 x86_linux_prepare_to_resume (lwp);
801 }
802
803 /* See nat/x86-dregs.h. */
804
805 struct x86_debug_reg_state *
806 x86_debug_reg_state (pid_t pid)
807 {
808 struct process_info *proc = find_process_pid (pid);
809
810 return &proc->priv->arch_private->debug_reg_state;
811 }
812 \f
813 /* When GDBSERVER is built as a 64-bit application on linux, the
814 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
815 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
816 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
817 conversion in-place ourselves. */
818
819 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
820 layout of the inferiors' architecture. Returns true if any
821 conversion was done; false otherwise. If DIRECTION is 1, then copy
822 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
823 INF. */
824
825 bool
826 x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
827 {
828 #ifdef __x86_64__
829 unsigned int machine;
830 int tid = lwpid_of (current_thread);
831 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
832
833 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
834 if (!is_64bit_tdesc ())
835 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
836 FIXUP_32);
837 /* No fixup for native x32 GDB. */
838 else if (!is_elf64 && sizeof (void *) == 8)
839 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
840 FIXUP_X32);
841 #endif
842
843 return false;
844 }
845 \f
846 static int use_xml;
847
848 /* Format of XSAVE extended state is:
849 struct
850 {
851 fxsave_bytes[0..463]
852 sw_usable_bytes[464..511]
853 xstate_hdr_bytes[512..575]
854 avx_bytes[576..831]
855 future_state etc
856 };
857
858 Same memory layout will be used for the coredump NT_X86_XSTATE
859 representing the XSAVE extended state registers.
860
861 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
862 extended state mask, which is the same as the extended control register
863 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
864 together with the mask saved in the xstate_hdr_bytes to determine what
865 states the processor/OS supports and what state, used or initialized,
866 the process/thread is in. */
867 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
868
869 /* Does the current host support the GETFPXREGS request? The header
870 file may or may not define it, and even if it is defined, the
871 kernel will return EIO if it's running on a pre-SSE processor. */
872 int have_ptrace_getfpxregs =
873 #ifdef HAVE_PTRACE_GETFPXREGS
874 -1
875 #else
876 0
877 #endif
878 ;
879
880 /* Get Linux/x86 target description from running target. */
881
882 static const struct target_desc *
883 x86_linux_read_description (void)
884 {
885 unsigned int machine;
886 int is_elf64;
887 int xcr0_features;
888 int tid;
889 static uint64_t xcr0;
890 struct regset_info *regset;
891
892 tid = lwpid_of (current_thread);
893
894 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
895
896 if (sizeof (void *) == 4)
897 {
898 if (is_elf64 > 0)
899 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
900 #ifndef __x86_64__
901 else if (machine == EM_X86_64)
902 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
903 #endif
904 }
905
906 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
907 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
908 {
909 elf_fpxregset_t fpxregs;
910
911 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
912 {
913 have_ptrace_getfpxregs = 0;
914 have_ptrace_getregset = 0;
915 return i386_linux_read_description (X86_XSTATE_X87);
916 }
917 else
918 have_ptrace_getfpxregs = 1;
919 }
920 #endif
921
922 if (!use_xml)
923 {
924 x86_xcr0 = X86_XSTATE_SSE_MASK;
925
926 /* Don't use XML. */
927 #ifdef __x86_64__
928 if (machine == EM_X86_64)
929 return tdesc_amd64_linux_no_xml;
930 else
931 #endif
932 return tdesc_i386_linux_no_xml;
933 }
934
935 if (have_ptrace_getregset == -1)
936 {
937 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
938 struct iovec iov;
939
940 iov.iov_base = xstateregs;
941 iov.iov_len = sizeof (xstateregs);
942
943 /* Check if PTRACE_GETREGSET works. */
944 if (ptrace (PTRACE_GETREGSET, tid,
945 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
946 have_ptrace_getregset = 0;
947 else
948 {
949 have_ptrace_getregset = 1;
950
951 /* Get XCR0 from XSAVE extended state. */
952 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
953 / sizeof (uint64_t))];
954
955 /* Use PTRACE_GETREGSET if it is available. */
956 for (regset = x86_regsets;
957 regset->fill_function != NULL; regset++)
958 if (regset->get_request == PTRACE_GETREGSET)
959 regset->size = X86_XSTATE_SIZE (xcr0);
960 else if (regset->type != GENERAL_REGS)
961 regset->size = 0;
962 }
963 }
964
965 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
966 xcr0_features = (have_ptrace_getregset
967 && (xcr0 & X86_XSTATE_ALL_MASK));
968
969 if (xcr0_features)
970 x86_xcr0 = xcr0;
971
972 if (machine == EM_X86_64)
973 {
974 #ifdef __x86_64__
975 const target_desc *tdesc = NULL;
976
977 if (xcr0_features)
978 {
979 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
980 !is_elf64);
981 }
982
983 if (tdesc == NULL)
984 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
985 return tdesc;
986 #endif
987 }
988 else
989 {
990 const target_desc *tdesc = NULL;
991
992 if (xcr0_features)
993 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
994
995 if (tdesc == NULL)
996 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
997
998 return tdesc;
999 }
1000
1001 gdb_assert_not_reached ("failed to return tdesc");
1002 }
1003
1004 /* Update all the target description of all processes; a new GDB
1005 connected, and it may or not support xml target descriptions. */
1006
1007 void
1008 x86_target::update_xmltarget ()
1009 {
1010 struct thread_info *saved_thread = current_thread;
1011
1012 /* Before changing the register cache's internal layout, flush the
1013 contents of the current valid caches back to the threads, and
1014 release the current regcache objects. */
1015 regcache_release ();
1016
1017 for_each_process ([this] (process_info *proc) {
1018 int pid = proc->pid;
1019
1020 /* Look up any thread of this process. */
1021 current_thread = find_any_thread_of_pid (pid);
1022
1023 low_arch_setup ();
1024 });
1025
1026 current_thread = saved_thread;
1027 }
1028
1029 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1030 PTRACE_GETREGSET. */
1031
1032 void
1033 x86_target::process_qsupported (char **features, int count)
1034 {
1035 int i;
1036
1037 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1038 with "i386" in qSupported query, it supports x86 XML target
1039 descriptions. */
1040 use_xml = 0;
1041 for (i = 0; i < count; i++)
1042 {
1043 const char *feature = features[i];
1044
1045 if (startswith (feature, "xmlRegisters="))
1046 {
1047 char *copy = xstrdup (feature + 13);
1048
1049 char *saveptr;
1050 for (char *p = strtok_r (copy, ",", &saveptr);
1051 p != NULL;
1052 p = strtok_r (NULL, ",", &saveptr))
1053 {
1054 if (strcmp (p, "i386") == 0)
1055 {
1056 use_xml = 1;
1057 break;
1058 }
1059 }
1060
1061 free (copy);
1062 }
1063 }
1064 update_xmltarget ();
1065 }
1066
1067 /* Common for x86/x86-64. */
1068
1069 static struct regsets_info x86_regsets_info =
1070 {
1071 x86_regsets, /* regsets */
1072 0, /* num_regsets */
1073 NULL, /* disabled_regsets */
1074 };
1075
1076 #ifdef __x86_64__
1077 static struct regs_info amd64_linux_regs_info =
1078 {
1079 NULL, /* regset_bitmap */
1080 NULL, /* usrregs_info */
1081 &x86_regsets_info
1082 };
1083 #endif
1084 static struct usrregs_info i386_linux_usrregs_info =
1085 {
1086 I386_NUM_REGS,
1087 i386_regmap,
1088 };
1089
1090 static struct regs_info i386_linux_regs_info =
1091 {
1092 NULL, /* regset_bitmap */
1093 &i386_linux_usrregs_info,
1094 &x86_regsets_info
1095 };
1096
1097 const regs_info *
1098 x86_target::get_regs_info ()
1099 {
1100 #ifdef __x86_64__
1101 if (is_64bit_tdesc ())
1102 return &amd64_linux_regs_info;
1103 else
1104 #endif
1105 return &i386_linux_regs_info;
1106 }
1107
1108 /* Initialize the target description for the architecture of the
1109 inferior. */
1110
1111 void
1112 x86_target::low_arch_setup ()
1113 {
1114 current_process ()->tdesc = x86_linux_read_description ();
1115 }
1116
1117 bool
1118 x86_target::low_supports_catch_syscall ()
1119 {
1120 return true;
1121 }
1122
1123 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1124 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1125
1126 void
1127 x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
1128 {
1129 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1130
1131 if (use_64bit)
1132 {
1133 long l_sysno;
1134
1135 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1136 *sysno = (int) l_sysno;
1137 }
1138 else
1139 collect_register_by_name (regcache, "orig_eax", sysno);
1140 }
1141
1142 bool
1143 x86_target::supports_tracepoints ()
1144 {
1145 return true;
1146 }
1147
1148 static void
1149 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1150 {
1151 target_write_memory (*to, buf, len);
1152 *to += len;
1153 }
1154
1155 static int
1156 push_opcode (unsigned char *buf, const char *op)
1157 {
1158 unsigned char *buf_org = buf;
1159
1160 while (1)
1161 {
1162 char *endptr;
1163 unsigned long ul = strtoul (op, &endptr, 16);
1164
1165 if (endptr == op)
1166 break;
1167
1168 *buf++ = ul;
1169 op = endptr;
1170 }
1171
1172 return buf - buf_org;
1173 }
1174
1175 #ifdef __x86_64__
1176
1177 /* Build a jump pad that saves registers and calls a collection
1178 function. Writes a jump instruction to the jump pad to
1179 JJUMPAD_INSN. The caller is responsible to write it in at the
1180 tracepoint address. */
1181
1182 static int
1183 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1184 CORE_ADDR collector,
1185 CORE_ADDR lockaddr,
1186 ULONGEST orig_size,
1187 CORE_ADDR *jump_entry,
1188 CORE_ADDR *trampoline,
1189 ULONGEST *trampoline_size,
1190 unsigned char *jjump_pad_insn,
1191 ULONGEST *jjump_pad_insn_size,
1192 CORE_ADDR *adjusted_insn_addr,
1193 CORE_ADDR *adjusted_insn_addr_end,
1194 char *err)
1195 {
1196 unsigned char buf[40];
1197 int i, offset;
1198 int64_t loffset;
1199
1200 CORE_ADDR buildaddr = *jump_entry;
1201
1202 /* Build the jump pad. */
1203
1204 /* First, do tracepoint data collection. Save registers. */
1205 i = 0;
1206 /* Need to ensure stack pointer saved first. */
1207 buf[i++] = 0x54; /* push %rsp */
1208 buf[i++] = 0x55; /* push %rbp */
1209 buf[i++] = 0x57; /* push %rdi */
1210 buf[i++] = 0x56; /* push %rsi */
1211 buf[i++] = 0x52; /* push %rdx */
1212 buf[i++] = 0x51; /* push %rcx */
1213 buf[i++] = 0x53; /* push %rbx */
1214 buf[i++] = 0x50; /* push %rax */
1215 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1216 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1217 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1218 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1219 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1220 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1221 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1222 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1223 buf[i++] = 0x9c; /* pushfq */
1224 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1225 buf[i++] = 0xbf;
1226 memcpy (buf + i, &tpaddr, 8);
1227 i += 8;
1228 buf[i++] = 0x57; /* push %rdi */
1229 append_insns (&buildaddr, i, buf);
1230
1231 /* Stack space for the collecting_t object. */
1232 i = 0;
1233 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1234 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1235 memcpy (buf + i, &tpoint, 8);
1236 i += 8;
1237 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1238 i += push_opcode (&buf[i],
1239 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1240 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1241 append_insns (&buildaddr, i, buf);
1242
1243 /* spin-lock. */
1244 i = 0;
1245 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1246 memcpy (&buf[i], (void *) &lockaddr, 8);
1247 i += 8;
1248 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1249 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1250 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1251 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1252 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1253 append_insns (&buildaddr, i, buf);
1254
1255 /* Set up the gdb_collect call. */
1256 /* At this point, (stack pointer + 0x18) is the base of our saved
1257 register block. */
1258
1259 i = 0;
1260 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1261 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1262
1263 /* tpoint address may be 64-bit wide. */
1264 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1265 memcpy (buf + i, &tpoint, 8);
1266 i += 8;
1267 append_insns (&buildaddr, i, buf);
1268
1269 /* The collector function being in the shared library, may be
1270 >31-bits away off the jump pad. */
1271 i = 0;
1272 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1273 memcpy (buf + i, &collector, 8);
1274 i += 8;
1275 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1276 append_insns (&buildaddr, i, buf);
1277
1278 /* Clear the spin-lock. */
1279 i = 0;
1280 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1281 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1282 memcpy (buf + i, &lockaddr, 8);
1283 i += 8;
1284 append_insns (&buildaddr, i, buf);
1285
1286 /* Remove stack that had been used for the collect_t object. */
1287 i = 0;
1288 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1289 append_insns (&buildaddr, i, buf);
1290
1291 /* Restore register state. */
1292 i = 0;
1293 buf[i++] = 0x48; /* add $0x8,%rsp */
1294 buf[i++] = 0x83;
1295 buf[i++] = 0xc4;
1296 buf[i++] = 0x08;
1297 buf[i++] = 0x9d; /* popfq */
1298 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1299 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1300 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1301 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1302 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1303 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1304 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1305 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1306 buf[i++] = 0x58; /* pop %rax */
1307 buf[i++] = 0x5b; /* pop %rbx */
1308 buf[i++] = 0x59; /* pop %rcx */
1309 buf[i++] = 0x5a; /* pop %rdx */
1310 buf[i++] = 0x5e; /* pop %rsi */
1311 buf[i++] = 0x5f; /* pop %rdi */
1312 buf[i++] = 0x5d; /* pop %rbp */
1313 buf[i++] = 0x5c; /* pop %rsp */
1314 append_insns (&buildaddr, i, buf);
1315
1316 /* Now, adjust the original instruction to execute in the jump
1317 pad. */
1318 *adjusted_insn_addr = buildaddr;
1319 relocate_instruction (&buildaddr, tpaddr);
1320 *adjusted_insn_addr_end = buildaddr;
1321
1322 /* Finally, write a jump back to the program. */
1323
1324 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1325 if (loffset > INT_MAX || loffset < INT_MIN)
1326 {
1327 sprintf (err,
1328 "E.Jump back from jump pad too far from tracepoint "
1329 "(offset 0x%" PRIx64 " > int32).", loffset);
1330 return 1;
1331 }
1332
1333 offset = (int) loffset;
1334 memcpy (buf, jump_insn, sizeof (jump_insn));
1335 memcpy (buf + 1, &offset, 4);
1336 append_insns (&buildaddr, sizeof (jump_insn), buf);
1337
1338 /* The jump pad is now built. Wire in a jump to our jump pad. This
1339 is always done last (by our caller actually), so that we can
1340 install fast tracepoints with threads running. This relies on
1341 the agent's atomic write support. */
1342 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1343 if (loffset > INT_MAX || loffset < INT_MIN)
1344 {
1345 sprintf (err,
1346 "E.Jump pad too far from tracepoint "
1347 "(offset 0x%" PRIx64 " > int32).", loffset);
1348 return 1;
1349 }
1350
1351 offset = (int) loffset;
1352
1353 memcpy (buf, jump_insn, sizeof (jump_insn));
1354 memcpy (buf + 1, &offset, 4);
1355 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1356 *jjump_pad_insn_size = sizeof (jump_insn);
1357
1358 /* Return the end address of our pad. */
1359 *jump_entry = buildaddr;
1360
1361 return 0;
1362 }
1363
1364 #endif /* __x86_64__ */
1365
1366 /* Build a jump pad that saves registers and calls a collection
1367 function. Writes a jump instruction to the jump pad to
1368 JJUMPAD_INSN. The caller is responsible to write it in at the
1369 tracepoint address. */
1370
1371 static int
1372 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1373 CORE_ADDR collector,
1374 CORE_ADDR lockaddr,
1375 ULONGEST orig_size,
1376 CORE_ADDR *jump_entry,
1377 CORE_ADDR *trampoline,
1378 ULONGEST *trampoline_size,
1379 unsigned char *jjump_pad_insn,
1380 ULONGEST *jjump_pad_insn_size,
1381 CORE_ADDR *adjusted_insn_addr,
1382 CORE_ADDR *adjusted_insn_addr_end,
1383 char *err)
1384 {
1385 unsigned char buf[0x100];
1386 int i, offset;
1387 CORE_ADDR buildaddr = *jump_entry;
1388
1389 /* Build the jump pad. */
1390
1391 /* First, do tracepoint data collection. Save registers. */
1392 i = 0;
1393 buf[i++] = 0x60; /* pushad */
1394 buf[i++] = 0x68; /* push tpaddr aka $pc */
1395 *((int *)(buf + i)) = (int) tpaddr;
1396 i += 4;
1397 buf[i++] = 0x9c; /* pushf */
1398 buf[i++] = 0x1e; /* push %ds */
1399 buf[i++] = 0x06; /* push %es */
1400 buf[i++] = 0x0f; /* push %fs */
1401 buf[i++] = 0xa0;
1402 buf[i++] = 0x0f; /* push %gs */
1403 buf[i++] = 0xa8;
1404 buf[i++] = 0x16; /* push %ss */
1405 buf[i++] = 0x0e; /* push %cs */
1406 append_insns (&buildaddr, i, buf);
1407
1408 /* Stack space for the collecting_t object. */
1409 i = 0;
1410 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1411
1412 /* Build the object. */
1413 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1414 memcpy (buf + i, &tpoint, 4);
1415 i += 4;
1416 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1417
1418 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1419 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1420 append_insns (&buildaddr, i, buf);
1421
1422 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1423 If we cared for it, this could be using xchg alternatively. */
1424
1425 i = 0;
1426 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1427 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1428 %esp,<lockaddr> */
1429 memcpy (&buf[i], (void *) &lockaddr, 4);
1430 i += 4;
1431 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1432 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1433 append_insns (&buildaddr, i, buf);
1434
1435
1436 /* Set up arguments to the gdb_collect call. */
1437 i = 0;
1438 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1439 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1440 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1441 append_insns (&buildaddr, i, buf);
1442
1443 i = 0;
1444 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1445 append_insns (&buildaddr, i, buf);
1446
1447 i = 0;
1448 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1449 memcpy (&buf[i], (void *) &tpoint, 4);
1450 i += 4;
1451 append_insns (&buildaddr, i, buf);
1452
1453 buf[0] = 0xe8; /* call <reladdr> */
1454 offset = collector - (buildaddr + sizeof (jump_insn));
1455 memcpy (buf + 1, &offset, 4);
1456 append_insns (&buildaddr, 5, buf);
1457 /* Clean up after the call. */
1458 buf[0] = 0x83; /* add $0x8,%esp */
1459 buf[1] = 0xc4;
1460 buf[2] = 0x08;
1461 append_insns (&buildaddr, 3, buf);
1462
1463
1464 /* Clear the spin-lock. This would need the LOCK prefix on older
1465 broken archs. */
1466 i = 0;
1467 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1468 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1469 memcpy (buf + i, &lockaddr, 4);
1470 i += 4;
1471 append_insns (&buildaddr, i, buf);
1472
1473
1474 /* Remove stack that had been used for the collect_t object. */
1475 i = 0;
1476 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1477 append_insns (&buildaddr, i, buf);
1478
1479 i = 0;
1480 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1481 buf[i++] = 0xc4;
1482 buf[i++] = 0x04;
1483 buf[i++] = 0x17; /* pop %ss */
1484 buf[i++] = 0x0f; /* pop %gs */
1485 buf[i++] = 0xa9;
1486 buf[i++] = 0x0f; /* pop %fs */
1487 buf[i++] = 0xa1;
1488 buf[i++] = 0x07; /* pop %es */
1489 buf[i++] = 0x1f; /* pop %ds */
1490 buf[i++] = 0x9d; /* popf */
1491 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1492 buf[i++] = 0xc4;
1493 buf[i++] = 0x04;
1494 buf[i++] = 0x61; /* popad */
1495 append_insns (&buildaddr, i, buf);
1496
1497 /* Now, adjust the original instruction to execute in the jump
1498 pad. */
1499 *adjusted_insn_addr = buildaddr;
1500 relocate_instruction (&buildaddr, tpaddr);
1501 *adjusted_insn_addr_end = buildaddr;
1502
1503 /* Write the jump back to the program. */
1504 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1505 memcpy (buf, jump_insn, sizeof (jump_insn));
1506 memcpy (buf + 1, &offset, 4);
1507 append_insns (&buildaddr, sizeof (jump_insn), buf);
1508
1509 /* The jump pad is now built. Wire in a jump to our jump pad. This
1510 is always done last (by our caller actually), so that we can
1511 install fast tracepoints with threads running. This relies on
1512 the agent's atomic write support. */
1513 if (orig_size == 4)
1514 {
1515 /* Create a trampoline. */
1516 *trampoline_size = sizeof (jump_insn);
1517 if (!claim_trampoline_space (*trampoline_size, trampoline))
1518 {
1519 /* No trampoline space available. */
1520 strcpy (err,
1521 "E.Cannot allocate trampoline space needed for fast "
1522 "tracepoints on 4-byte instructions.");
1523 return 1;
1524 }
1525
1526 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1527 memcpy (buf, jump_insn, sizeof (jump_insn));
1528 memcpy (buf + 1, &offset, 4);
1529 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1530
1531 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1532 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1533 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1534 memcpy (buf + 2, &offset, 2);
1535 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1536 *jjump_pad_insn_size = sizeof (small_jump_insn);
1537 }
1538 else
1539 {
1540 /* Else use a 32-bit relative jump instruction. */
1541 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1542 memcpy (buf, jump_insn, sizeof (jump_insn));
1543 memcpy (buf + 1, &offset, 4);
1544 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1545 *jjump_pad_insn_size = sizeof (jump_insn);
1546 }
1547
1548 /* Return the end address of our pad. */
1549 *jump_entry = buildaddr;
1550
1551 return 0;
1552 }
1553
1554 bool
1555 x86_target::supports_fast_tracepoints ()
1556 {
1557 return true;
1558 }
1559
1560 int
1561 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1562 CORE_ADDR tpaddr,
1563 CORE_ADDR collector,
1564 CORE_ADDR lockaddr,
1565 ULONGEST orig_size,
1566 CORE_ADDR *jump_entry,
1567 CORE_ADDR *trampoline,
1568 ULONGEST *trampoline_size,
1569 unsigned char *jjump_pad_insn,
1570 ULONGEST *jjump_pad_insn_size,
1571 CORE_ADDR *adjusted_insn_addr,
1572 CORE_ADDR *adjusted_insn_addr_end,
1573 char *err)
1574 {
1575 #ifdef __x86_64__
1576 if (is_64bit_tdesc ())
1577 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1578 collector, lockaddr,
1579 orig_size, jump_entry,
1580 trampoline, trampoline_size,
1581 jjump_pad_insn,
1582 jjump_pad_insn_size,
1583 adjusted_insn_addr,
1584 adjusted_insn_addr_end,
1585 err);
1586 #endif
1587
1588 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1589 collector, lockaddr,
1590 orig_size, jump_entry,
1591 trampoline, trampoline_size,
1592 jjump_pad_insn,
1593 jjump_pad_insn_size,
1594 adjusted_insn_addr,
1595 adjusted_insn_addr_end,
1596 err);
1597 }
1598
1599 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1600 architectures. */
1601
1602 int
1603 x86_target::get_min_fast_tracepoint_insn_len ()
1604 {
1605 static int warned_about_fast_tracepoints = 0;
1606
1607 #ifdef __x86_64__
1608 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1609 used for fast tracepoints. */
1610 if (is_64bit_tdesc ())
1611 return 5;
1612 #endif
1613
1614 if (agent_loaded_p ())
1615 {
1616 char errbuf[IPA_BUFSIZ];
1617
1618 errbuf[0] = '\0';
1619
1620 /* On x86, if trampolines are available, then 4-byte jump instructions
1621 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1622 with a 4-byte offset are used instead. */
1623 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1624 return 4;
1625 else
1626 {
1627 /* GDB has no channel to explain to user why a shorter fast
1628 tracepoint is not possible, but at least make GDBserver
1629 mention that something has gone awry. */
1630 if (!warned_about_fast_tracepoints)
1631 {
1632 warning ("4-byte fast tracepoints not available; %s", errbuf);
1633 warned_about_fast_tracepoints = 1;
1634 }
1635 return 5;
1636 }
1637 }
1638 else
1639 {
1640 /* Indicate that the minimum length is currently unknown since the IPA
1641 has not loaded yet. */
1642 return 0;
1643 }
1644 }
1645
1646 static void
1647 add_insns (unsigned char *start, int len)
1648 {
1649 CORE_ADDR buildaddr = current_insn_ptr;
1650
1651 if (debug_threads)
1652 debug_printf ("Adding %d bytes of insn at %s\n",
1653 len, paddress (buildaddr));
1654
1655 append_insns (&buildaddr, len, start);
1656 current_insn_ptr = buildaddr;
1657 }
1658
1659 /* Our general strategy for emitting code is to avoid specifying raw
1660 bytes whenever possible, and instead copy a block of inline asm
1661 that is embedded in the function. This is a little messy, because
1662 we need to keep the compiler from discarding what looks like dead
1663 code, plus suppress various warnings. */
1664
1665 #define EMIT_ASM(NAME, INSNS) \
1666 do \
1667 { \
1668 extern unsigned char start_ ## NAME, end_ ## NAME; \
1669 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1670 __asm__ ("jmp end_" #NAME "\n" \
1671 "\t" "start_" #NAME ":" \
1672 "\t" INSNS "\n" \
1673 "\t" "end_" #NAME ":"); \
1674 } while (0)
1675
1676 #ifdef __x86_64__
1677
1678 #define EMIT_ASM32(NAME,INSNS) \
1679 do \
1680 { \
1681 extern unsigned char start_ ## NAME, end_ ## NAME; \
1682 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1683 __asm__ (".code32\n" \
1684 "\t" "jmp end_" #NAME "\n" \
1685 "\t" "start_" #NAME ":\n" \
1686 "\t" INSNS "\n" \
1687 "\t" "end_" #NAME ":\n" \
1688 ".code64\n"); \
1689 } while (0)
1690
1691 #else
1692
1693 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1694
1695 #endif
1696
1697 #ifdef __x86_64__
1698
1699 static void
1700 amd64_emit_prologue (void)
1701 {
1702 EMIT_ASM (amd64_prologue,
1703 "pushq %rbp\n\t"
1704 "movq %rsp,%rbp\n\t"
1705 "sub $0x20,%rsp\n\t"
1706 "movq %rdi,-8(%rbp)\n\t"
1707 "movq %rsi,-16(%rbp)");
1708 }
1709
1710
1711 static void
1712 amd64_emit_epilogue (void)
1713 {
1714 EMIT_ASM (amd64_epilogue,
1715 "movq -16(%rbp),%rdi\n\t"
1716 "movq %rax,(%rdi)\n\t"
1717 "xor %rax,%rax\n\t"
1718 "leave\n\t"
1719 "ret");
1720 }
1721
1722 static void
1723 amd64_emit_add (void)
1724 {
1725 EMIT_ASM (amd64_add,
1726 "add (%rsp),%rax\n\t"
1727 "lea 0x8(%rsp),%rsp");
1728 }
1729
1730 static void
1731 amd64_emit_sub (void)
1732 {
1733 EMIT_ASM (amd64_sub,
1734 "sub %rax,(%rsp)\n\t"
1735 "pop %rax");
1736 }
1737
1738 static void
1739 amd64_emit_mul (void)
1740 {
1741 emit_error = 1;
1742 }
1743
1744 static void
1745 amd64_emit_lsh (void)
1746 {
1747 emit_error = 1;
1748 }
1749
1750 static void
1751 amd64_emit_rsh_signed (void)
1752 {
1753 emit_error = 1;
1754 }
1755
1756 static void
1757 amd64_emit_rsh_unsigned (void)
1758 {
1759 emit_error = 1;
1760 }
1761
1762 static void
1763 amd64_emit_ext (int arg)
1764 {
1765 switch (arg)
1766 {
1767 case 8:
1768 EMIT_ASM (amd64_ext_8,
1769 "cbtw\n\t"
1770 "cwtl\n\t"
1771 "cltq");
1772 break;
1773 case 16:
1774 EMIT_ASM (amd64_ext_16,
1775 "cwtl\n\t"
1776 "cltq");
1777 break;
1778 case 32:
1779 EMIT_ASM (amd64_ext_32,
1780 "cltq");
1781 break;
1782 default:
1783 emit_error = 1;
1784 }
1785 }
1786
1787 static void
1788 amd64_emit_log_not (void)
1789 {
1790 EMIT_ASM (amd64_log_not,
1791 "test %rax,%rax\n\t"
1792 "sete %cl\n\t"
1793 "movzbq %cl,%rax");
1794 }
1795
1796 static void
1797 amd64_emit_bit_and (void)
1798 {
1799 EMIT_ASM (amd64_and,
1800 "and (%rsp),%rax\n\t"
1801 "lea 0x8(%rsp),%rsp");
1802 }
1803
1804 static void
1805 amd64_emit_bit_or (void)
1806 {
1807 EMIT_ASM (amd64_or,
1808 "or (%rsp),%rax\n\t"
1809 "lea 0x8(%rsp),%rsp");
1810 }
1811
1812 static void
1813 amd64_emit_bit_xor (void)
1814 {
1815 EMIT_ASM (amd64_xor,
1816 "xor (%rsp),%rax\n\t"
1817 "lea 0x8(%rsp),%rsp");
1818 }
1819
1820 static void
1821 amd64_emit_bit_not (void)
1822 {
1823 EMIT_ASM (amd64_bit_not,
1824 "xorq $0xffffffffffffffff,%rax");
1825 }
1826
1827 static void
1828 amd64_emit_equal (void)
1829 {
1830 EMIT_ASM (amd64_equal,
1831 "cmp %rax,(%rsp)\n\t"
1832 "je .Lamd64_equal_true\n\t"
1833 "xor %rax,%rax\n\t"
1834 "jmp .Lamd64_equal_end\n\t"
1835 ".Lamd64_equal_true:\n\t"
1836 "mov $0x1,%rax\n\t"
1837 ".Lamd64_equal_end:\n\t"
1838 "lea 0x8(%rsp),%rsp");
1839 }
1840
1841 static void
1842 amd64_emit_less_signed (void)
1843 {
1844 EMIT_ASM (amd64_less_signed,
1845 "cmp %rax,(%rsp)\n\t"
1846 "jl .Lamd64_less_signed_true\n\t"
1847 "xor %rax,%rax\n\t"
1848 "jmp .Lamd64_less_signed_end\n\t"
1849 ".Lamd64_less_signed_true:\n\t"
1850 "mov $1,%rax\n\t"
1851 ".Lamd64_less_signed_end:\n\t"
1852 "lea 0x8(%rsp),%rsp");
1853 }
1854
1855 static void
1856 amd64_emit_less_unsigned (void)
1857 {
1858 EMIT_ASM (amd64_less_unsigned,
1859 "cmp %rax,(%rsp)\n\t"
1860 "jb .Lamd64_less_unsigned_true\n\t"
1861 "xor %rax,%rax\n\t"
1862 "jmp .Lamd64_less_unsigned_end\n\t"
1863 ".Lamd64_less_unsigned_true:\n\t"
1864 "mov $1,%rax\n\t"
1865 ".Lamd64_less_unsigned_end:\n\t"
1866 "lea 0x8(%rsp),%rsp");
1867 }
1868
1869 static void
1870 amd64_emit_ref (int size)
1871 {
1872 switch (size)
1873 {
1874 case 1:
1875 EMIT_ASM (amd64_ref1,
1876 "movb (%rax),%al");
1877 break;
1878 case 2:
1879 EMIT_ASM (amd64_ref2,
1880 "movw (%rax),%ax");
1881 break;
1882 case 4:
1883 EMIT_ASM (amd64_ref4,
1884 "movl (%rax),%eax");
1885 break;
1886 case 8:
1887 EMIT_ASM (amd64_ref8,
1888 "movq (%rax),%rax");
1889 break;
1890 }
1891 }
1892
1893 static void
1894 amd64_emit_if_goto (int *offset_p, int *size_p)
1895 {
1896 EMIT_ASM (amd64_if_goto,
1897 "mov %rax,%rcx\n\t"
1898 "pop %rax\n\t"
1899 "cmp $0,%rcx\n\t"
1900 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1901 if (offset_p)
1902 *offset_p = 10;
1903 if (size_p)
1904 *size_p = 4;
1905 }
1906
1907 static void
1908 amd64_emit_goto (int *offset_p, int *size_p)
1909 {
1910 EMIT_ASM (amd64_goto,
1911 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1912 if (offset_p)
1913 *offset_p = 1;
1914 if (size_p)
1915 *size_p = 4;
1916 }
1917
1918 static void
1919 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1920 {
1921 int diff = (to - (from + size));
1922 unsigned char buf[sizeof (int)];
1923
1924 if (size != 4)
1925 {
1926 emit_error = 1;
1927 return;
1928 }
1929
1930 memcpy (buf, &diff, sizeof (int));
1931 target_write_memory (from, buf, sizeof (int));
1932 }
1933
1934 static void
1935 amd64_emit_const (LONGEST num)
1936 {
1937 unsigned char buf[16];
1938 int i;
1939 CORE_ADDR buildaddr = current_insn_ptr;
1940
1941 i = 0;
1942 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1943 memcpy (&buf[i], &num, sizeof (num));
1944 i += 8;
1945 append_insns (&buildaddr, i, buf);
1946 current_insn_ptr = buildaddr;
1947 }
1948
1949 static void
1950 amd64_emit_call (CORE_ADDR fn)
1951 {
1952 unsigned char buf[16];
1953 int i;
1954 CORE_ADDR buildaddr;
1955 LONGEST offset64;
1956
1957 /* The destination function being in the shared library, may be
1958 >31-bits away off the compiled code pad. */
1959
1960 buildaddr = current_insn_ptr;
1961
1962 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1963
1964 i = 0;
1965
1966 if (offset64 > INT_MAX || offset64 < INT_MIN)
1967 {
1968 /* Offset is too large for a call. Use callq, but that requires
1969 a register, so avoid it if possible. Use r10, since it is
1970 call-clobbered, we don't have to push/pop it. */
1971 buf[i++] = 0x48; /* mov $fn,%r10 */
1972 buf[i++] = 0xba;
1973 memcpy (buf + i, &fn, 8);
1974 i += 8;
1975 buf[i++] = 0xff; /* callq *%r10 */
1976 buf[i++] = 0xd2;
1977 }
1978 else
1979 {
1980 int offset32 = offset64; /* we know we can't overflow here. */
1981
1982 buf[i++] = 0xe8; /* call <reladdr> */
1983 memcpy (buf + i, &offset32, 4);
1984 i += 4;
1985 }
1986
1987 append_insns (&buildaddr, i, buf);
1988 current_insn_ptr = buildaddr;
1989 }
1990
1991 static void
1992 amd64_emit_reg (int reg)
1993 {
1994 unsigned char buf[16];
1995 int i;
1996 CORE_ADDR buildaddr;
1997
1998 /* Assume raw_regs is still in %rdi. */
1999 buildaddr = current_insn_ptr;
2000 i = 0;
2001 buf[i++] = 0xbe; /* mov $<n>,%esi */
2002 memcpy (&buf[i], &reg, sizeof (reg));
2003 i += 4;
2004 append_insns (&buildaddr, i, buf);
2005 current_insn_ptr = buildaddr;
2006 amd64_emit_call (get_raw_reg_func_addr ());
2007 }
2008
2009 static void
2010 amd64_emit_pop (void)
2011 {
2012 EMIT_ASM (amd64_pop,
2013 "pop %rax");
2014 }
2015
2016 static void
2017 amd64_emit_stack_flush (void)
2018 {
2019 EMIT_ASM (amd64_stack_flush,
2020 "push %rax");
2021 }
2022
2023 static void
2024 amd64_emit_zero_ext (int arg)
2025 {
2026 switch (arg)
2027 {
2028 case 8:
2029 EMIT_ASM (amd64_zero_ext_8,
2030 "and $0xff,%rax");
2031 break;
2032 case 16:
2033 EMIT_ASM (amd64_zero_ext_16,
2034 "and $0xffff,%rax");
2035 break;
2036 case 32:
2037 EMIT_ASM (amd64_zero_ext_32,
2038 "mov $0xffffffff,%rcx\n\t"
2039 "and %rcx,%rax");
2040 break;
2041 default:
2042 emit_error = 1;
2043 }
2044 }
2045
2046 static void
2047 amd64_emit_swap (void)
2048 {
2049 EMIT_ASM (amd64_swap,
2050 "mov %rax,%rcx\n\t"
2051 "pop %rax\n\t"
2052 "push %rcx");
2053 }
2054
2055 static void
2056 amd64_emit_stack_adjust (int n)
2057 {
2058 unsigned char buf[16];
2059 int i;
2060 CORE_ADDR buildaddr = current_insn_ptr;
2061
2062 i = 0;
2063 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2064 buf[i++] = 0x8d;
2065 buf[i++] = 0x64;
2066 buf[i++] = 0x24;
2067 /* This only handles adjustments up to 16, but we don't expect any more. */
2068 buf[i++] = n * 8;
2069 append_insns (&buildaddr, i, buf);
2070 current_insn_ptr = buildaddr;
2071 }
2072
2073 /* FN's prototype is `LONGEST(*fn)(int)'. */
2074
2075 static void
2076 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2077 {
2078 unsigned char buf[16];
2079 int i;
2080 CORE_ADDR buildaddr;
2081
2082 buildaddr = current_insn_ptr;
2083 i = 0;
2084 buf[i++] = 0xbf; /* movl $<n>,%edi */
2085 memcpy (&buf[i], &arg1, sizeof (arg1));
2086 i += 4;
2087 append_insns (&buildaddr, i, buf);
2088 current_insn_ptr = buildaddr;
2089 amd64_emit_call (fn);
2090 }
2091
2092 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2093
2094 static void
2095 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2096 {
2097 unsigned char buf[16];
2098 int i;
2099 CORE_ADDR buildaddr;
2100
2101 buildaddr = current_insn_ptr;
2102 i = 0;
2103 buf[i++] = 0xbf; /* movl $<n>,%edi */
2104 memcpy (&buf[i], &arg1, sizeof (arg1));
2105 i += 4;
2106 append_insns (&buildaddr, i, buf);
2107 current_insn_ptr = buildaddr;
2108 EMIT_ASM (amd64_void_call_2_a,
2109 /* Save away a copy of the stack top. */
2110 "push %rax\n\t"
2111 /* Also pass top as the second argument. */
2112 "mov %rax,%rsi");
2113 amd64_emit_call (fn);
2114 EMIT_ASM (amd64_void_call_2_b,
2115 /* Restore the stack top, %rax may have been trashed. */
2116 "pop %rax");
2117 }
2118
2119 static void
2120 amd64_emit_eq_goto (int *offset_p, int *size_p)
2121 {
2122 EMIT_ASM (amd64_eq,
2123 "cmp %rax,(%rsp)\n\t"
2124 "jne .Lamd64_eq_fallthru\n\t"
2125 "lea 0x8(%rsp),%rsp\n\t"
2126 "pop %rax\n\t"
2127 /* jmp, but don't trust the assembler to choose the right jump */
2128 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2129 ".Lamd64_eq_fallthru:\n\t"
2130 "lea 0x8(%rsp),%rsp\n\t"
2131 "pop %rax");
2132
2133 if (offset_p)
2134 *offset_p = 13;
2135 if (size_p)
2136 *size_p = 4;
2137 }
2138
2139 static void
2140 amd64_emit_ne_goto (int *offset_p, int *size_p)
2141 {
2142 EMIT_ASM (amd64_ne,
2143 "cmp %rax,(%rsp)\n\t"
2144 "je .Lamd64_ne_fallthru\n\t"
2145 "lea 0x8(%rsp),%rsp\n\t"
2146 "pop %rax\n\t"
2147 /* jmp, but don't trust the assembler to choose the right jump */
2148 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2149 ".Lamd64_ne_fallthru:\n\t"
2150 "lea 0x8(%rsp),%rsp\n\t"
2151 "pop %rax");
2152
2153 if (offset_p)
2154 *offset_p = 13;
2155 if (size_p)
2156 *size_p = 4;
2157 }
2158
2159 static void
2160 amd64_emit_lt_goto (int *offset_p, int *size_p)
2161 {
2162 EMIT_ASM (amd64_lt,
2163 "cmp %rax,(%rsp)\n\t"
2164 "jnl .Lamd64_lt_fallthru\n\t"
2165 "lea 0x8(%rsp),%rsp\n\t"
2166 "pop %rax\n\t"
2167 /* jmp, but don't trust the assembler to choose the right jump */
2168 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2169 ".Lamd64_lt_fallthru:\n\t"
2170 "lea 0x8(%rsp),%rsp\n\t"
2171 "pop %rax");
2172
2173 if (offset_p)
2174 *offset_p = 13;
2175 if (size_p)
2176 *size_p = 4;
2177 }
2178
2179 static void
2180 amd64_emit_le_goto (int *offset_p, int *size_p)
2181 {
2182 EMIT_ASM (amd64_le,
2183 "cmp %rax,(%rsp)\n\t"
2184 "jnle .Lamd64_le_fallthru\n\t"
2185 "lea 0x8(%rsp),%rsp\n\t"
2186 "pop %rax\n\t"
2187 /* jmp, but don't trust the assembler to choose the right jump */
2188 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2189 ".Lamd64_le_fallthru:\n\t"
2190 "lea 0x8(%rsp),%rsp\n\t"
2191 "pop %rax");
2192
2193 if (offset_p)
2194 *offset_p = 13;
2195 if (size_p)
2196 *size_p = 4;
2197 }
2198
2199 static void
2200 amd64_emit_gt_goto (int *offset_p, int *size_p)
2201 {
2202 EMIT_ASM (amd64_gt,
2203 "cmp %rax,(%rsp)\n\t"
2204 "jng .Lamd64_gt_fallthru\n\t"
2205 "lea 0x8(%rsp),%rsp\n\t"
2206 "pop %rax\n\t"
2207 /* jmp, but don't trust the assembler to choose the right jump */
2208 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2209 ".Lamd64_gt_fallthru:\n\t"
2210 "lea 0x8(%rsp),%rsp\n\t"
2211 "pop %rax");
2212
2213 if (offset_p)
2214 *offset_p = 13;
2215 if (size_p)
2216 *size_p = 4;
2217 }
2218
2219 static void
2220 amd64_emit_ge_goto (int *offset_p, int *size_p)
2221 {
2222 EMIT_ASM (amd64_ge,
2223 "cmp %rax,(%rsp)\n\t"
2224 "jnge .Lamd64_ge_fallthru\n\t"
2225 ".Lamd64_ge_jump:\n\t"
2226 "lea 0x8(%rsp),%rsp\n\t"
2227 "pop %rax\n\t"
2228 /* jmp, but don't trust the assembler to choose the right jump */
2229 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2230 ".Lamd64_ge_fallthru:\n\t"
2231 "lea 0x8(%rsp),%rsp\n\t"
2232 "pop %rax");
2233
2234 if (offset_p)
2235 *offset_p = 13;
2236 if (size_p)
2237 *size_p = 4;
2238 }
2239
2240 struct emit_ops amd64_emit_ops =
2241 {
2242 amd64_emit_prologue,
2243 amd64_emit_epilogue,
2244 amd64_emit_add,
2245 amd64_emit_sub,
2246 amd64_emit_mul,
2247 amd64_emit_lsh,
2248 amd64_emit_rsh_signed,
2249 amd64_emit_rsh_unsigned,
2250 amd64_emit_ext,
2251 amd64_emit_log_not,
2252 amd64_emit_bit_and,
2253 amd64_emit_bit_or,
2254 amd64_emit_bit_xor,
2255 amd64_emit_bit_not,
2256 amd64_emit_equal,
2257 amd64_emit_less_signed,
2258 amd64_emit_less_unsigned,
2259 amd64_emit_ref,
2260 amd64_emit_if_goto,
2261 amd64_emit_goto,
2262 amd64_write_goto_address,
2263 amd64_emit_const,
2264 amd64_emit_call,
2265 amd64_emit_reg,
2266 amd64_emit_pop,
2267 amd64_emit_stack_flush,
2268 amd64_emit_zero_ext,
2269 amd64_emit_swap,
2270 amd64_emit_stack_adjust,
2271 amd64_emit_int_call_1,
2272 amd64_emit_void_call_2,
2273 amd64_emit_eq_goto,
2274 amd64_emit_ne_goto,
2275 amd64_emit_lt_goto,
2276 amd64_emit_le_goto,
2277 amd64_emit_gt_goto,
2278 amd64_emit_ge_goto
2279 };
2280
2281 #endif /* __x86_64__ */
2282
2283 static void
2284 i386_emit_prologue (void)
2285 {
2286 EMIT_ASM32 (i386_prologue,
2287 "push %ebp\n\t"
2288 "mov %esp,%ebp\n\t"
2289 "push %ebx");
2290 /* At this point, the raw regs base address is at 8(%ebp), and the
2291 value pointer is at 12(%ebp). */
2292 }
2293
2294 static void
2295 i386_emit_epilogue (void)
2296 {
2297 EMIT_ASM32 (i386_epilogue,
2298 "mov 12(%ebp),%ecx\n\t"
2299 "mov %eax,(%ecx)\n\t"
2300 "mov %ebx,0x4(%ecx)\n\t"
2301 "xor %eax,%eax\n\t"
2302 "pop %ebx\n\t"
2303 "pop %ebp\n\t"
2304 "ret");
2305 }
2306
2307 static void
2308 i386_emit_add (void)
2309 {
2310 EMIT_ASM32 (i386_add,
2311 "add (%esp),%eax\n\t"
2312 "adc 0x4(%esp),%ebx\n\t"
2313 "lea 0x8(%esp),%esp");
2314 }
2315
2316 static void
2317 i386_emit_sub (void)
2318 {
2319 EMIT_ASM32 (i386_sub,
2320 "subl %eax,(%esp)\n\t"
2321 "sbbl %ebx,4(%esp)\n\t"
2322 "pop %eax\n\t"
2323 "pop %ebx\n\t");
2324 }
2325
2326 static void
2327 i386_emit_mul (void)
2328 {
2329 emit_error = 1;
2330 }
2331
2332 static void
2333 i386_emit_lsh (void)
2334 {
2335 emit_error = 1;
2336 }
2337
2338 static void
2339 i386_emit_rsh_signed (void)
2340 {
2341 emit_error = 1;
2342 }
2343
2344 static void
2345 i386_emit_rsh_unsigned (void)
2346 {
2347 emit_error = 1;
2348 }
2349
2350 static void
2351 i386_emit_ext (int arg)
2352 {
2353 switch (arg)
2354 {
2355 case 8:
2356 EMIT_ASM32 (i386_ext_8,
2357 "cbtw\n\t"
2358 "cwtl\n\t"
2359 "movl %eax,%ebx\n\t"
2360 "sarl $31,%ebx");
2361 break;
2362 case 16:
2363 EMIT_ASM32 (i386_ext_16,
2364 "cwtl\n\t"
2365 "movl %eax,%ebx\n\t"
2366 "sarl $31,%ebx");
2367 break;
2368 case 32:
2369 EMIT_ASM32 (i386_ext_32,
2370 "movl %eax,%ebx\n\t"
2371 "sarl $31,%ebx");
2372 break;
2373 default:
2374 emit_error = 1;
2375 }
2376 }
2377
2378 static void
2379 i386_emit_log_not (void)
2380 {
2381 EMIT_ASM32 (i386_log_not,
2382 "or %ebx,%eax\n\t"
2383 "test %eax,%eax\n\t"
2384 "sete %cl\n\t"
2385 "xor %ebx,%ebx\n\t"
2386 "movzbl %cl,%eax");
2387 }
2388
2389 static void
2390 i386_emit_bit_and (void)
2391 {
2392 EMIT_ASM32 (i386_and,
2393 "and (%esp),%eax\n\t"
2394 "and 0x4(%esp),%ebx\n\t"
2395 "lea 0x8(%esp),%esp");
2396 }
2397
2398 static void
2399 i386_emit_bit_or (void)
2400 {
2401 EMIT_ASM32 (i386_or,
2402 "or (%esp),%eax\n\t"
2403 "or 0x4(%esp),%ebx\n\t"
2404 "lea 0x8(%esp),%esp");
2405 }
2406
2407 static void
2408 i386_emit_bit_xor (void)
2409 {
2410 EMIT_ASM32 (i386_xor,
2411 "xor (%esp),%eax\n\t"
2412 "xor 0x4(%esp),%ebx\n\t"
2413 "lea 0x8(%esp),%esp");
2414 }
2415
2416 static void
2417 i386_emit_bit_not (void)
2418 {
2419 EMIT_ASM32 (i386_bit_not,
2420 "xor $0xffffffff,%eax\n\t"
2421 "xor $0xffffffff,%ebx\n\t");
2422 }
2423
2424 static void
2425 i386_emit_equal (void)
2426 {
2427 EMIT_ASM32 (i386_equal,
2428 "cmpl %ebx,4(%esp)\n\t"
2429 "jne .Li386_equal_false\n\t"
2430 "cmpl %eax,(%esp)\n\t"
2431 "je .Li386_equal_true\n\t"
2432 ".Li386_equal_false:\n\t"
2433 "xor %eax,%eax\n\t"
2434 "jmp .Li386_equal_end\n\t"
2435 ".Li386_equal_true:\n\t"
2436 "mov $1,%eax\n\t"
2437 ".Li386_equal_end:\n\t"
2438 "xor %ebx,%ebx\n\t"
2439 "lea 0x8(%esp),%esp");
2440 }
2441
2442 static void
2443 i386_emit_less_signed (void)
2444 {
2445 EMIT_ASM32 (i386_less_signed,
2446 "cmpl %ebx,4(%esp)\n\t"
2447 "jl .Li386_less_signed_true\n\t"
2448 "jne .Li386_less_signed_false\n\t"
2449 "cmpl %eax,(%esp)\n\t"
2450 "jl .Li386_less_signed_true\n\t"
2451 ".Li386_less_signed_false:\n\t"
2452 "xor %eax,%eax\n\t"
2453 "jmp .Li386_less_signed_end\n\t"
2454 ".Li386_less_signed_true:\n\t"
2455 "mov $1,%eax\n\t"
2456 ".Li386_less_signed_end:\n\t"
2457 "xor %ebx,%ebx\n\t"
2458 "lea 0x8(%esp),%esp");
2459 }
2460
2461 static void
2462 i386_emit_less_unsigned (void)
2463 {
2464 EMIT_ASM32 (i386_less_unsigned,
2465 "cmpl %ebx,4(%esp)\n\t"
2466 "jb .Li386_less_unsigned_true\n\t"
2467 "jne .Li386_less_unsigned_false\n\t"
2468 "cmpl %eax,(%esp)\n\t"
2469 "jb .Li386_less_unsigned_true\n\t"
2470 ".Li386_less_unsigned_false:\n\t"
2471 "xor %eax,%eax\n\t"
2472 "jmp .Li386_less_unsigned_end\n\t"
2473 ".Li386_less_unsigned_true:\n\t"
2474 "mov $1,%eax\n\t"
2475 ".Li386_less_unsigned_end:\n\t"
2476 "xor %ebx,%ebx\n\t"
2477 "lea 0x8(%esp),%esp");
2478 }
2479
2480 static void
2481 i386_emit_ref (int size)
2482 {
2483 switch (size)
2484 {
2485 case 1:
2486 EMIT_ASM32 (i386_ref1,
2487 "movb (%eax),%al");
2488 break;
2489 case 2:
2490 EMIT_ASM32 (i386_ref2,
2491 "movw (%eax),%ax");
2492 break;
2493 case 4:
2494 EMIT_ASM32 (i386_ref4,
2495 "movl (%eax),%eax");
2496 break;
2497 case 8:
2498 EMIT_ASM32 (i386_ref8,
2499 "movl 4(%eax),%ebx\n\t"
2500 "movl (%eax),%eax");
2501 break;
2502 }
2503 }
2504
2505 static void
2506 i386_emit_if_goto (int *offset_p, int *size_p)
2507 {
2508 EMIT_ASM32 (i386_if_goto,
2509 "mov %eax,%ecx\n\t"
2510 "or %ebx,%ecx\n\t"
2511 "pop %eax\n\t"
2512 "pop %ebx\n\t"
2513 "cmpl $0,%ecx\n\t"
2514 /* Don't trust the assembler to choose the right jump */
2515 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2516
2517 if (offset_p)
2518 *offset_p = 11; /* be sure that this matches the sequence above */
2519 if (size_p)
2520 *size_p = 4;
2521 }
2522
2523 static void
2524 i386_emit_goto (int *offset_p, int *size_p)
2525 {
2526 EMIT_ASM32 (i386_goto,
2527 /* Don't trust the assembler to choose the right jump */
2528 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2529 if (offset_p)
2530 *offset_p = 1;
2531 if (size_p)
2532 *size_p = 4;
2533 }
2534
2535 static void
2536 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2537 {
2538 int diff = (to - (from + size));
2539 unsigned char buf[sizeof (int)];
2540
2541 /* We're only doing 4-byte sizes at the moment. */
2542 if (size != 4)
2543 {
2544 emit_error = 1;
2545 return;
2546 }
2547
2548 memcpy (buf, &diff, sizeof (int));
2549 target_write_memory (from, buf, sizeof (int));
2550 }
2551
2552 static void
2553 i386_emit_const (LONGEST num)
2554 {
2555 unsigned char buf[16];
2556 int i, hi, lo;
2557 CORE_ADDR buildaddr = current_insn_ptr;
2558
2559 i = 0;
2560 buf[i++] = 0xb8; /* mov $<n>,%eax */
2561 lo = num & 0xffffffff;
2562 memcpy (&buf[i], &lo, sizeof (lo));
2563 i += 4;
2564 hi = ((num >> 32) & 0xffffffff);
2565 if (hi)
2566 {
2567 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2568 memcpy (&buf[i], &hi, sizeof (hi));
2569 i += 4;
2570 }
2571 else
2572 {
2573 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2574 }
2575 append_insns (&buildaddr, i, buf);
2576 current_insn_ptr = buildaddr;
2577 }
2578
2579 static void
2580 i386_emit_call (CORE_ADDR fn)
2581 {
2582 unsigned char buf[16];
2583 int i, offset;
2584 CORE_ADDR buildaddr;
2585
2586 buildaddr = current_insn_ptr;
2587 i = 0;
2588 buf[i++] = 0xe8; /* call <reladdr> */
2589 offset = ((int) fn) - (buildaddr + 5);
2590 memcpy (buf + 1, &offset, 4);
2591 append_insns (&buildaddr, 5, buf);
2592 current_insn_ptr = buildaddr;
2593 }
2594
2595 static void
2596 i386_emit_reg (int reg)
2597 {
2598 unsigned char buf[16];
2599 int i;
2600 CORE_ADDR buildaddr;
2601
2602 EMIT_ASM32 (i386_reg_a,
2603 "sub $0x8,%esp");
2604 buildaddr = current_insn_ptr;
2605 i = 0;
2606 buf[i++] = 0xb8; /* mov $<n>,%eax */
2607 memcpy (&buf[i], &reg, sizeof (reg));
2608 i += 4;
2609 append_insns (&buildaddr, i, buf);
2610 current_insn_ptr = buildaddr;
2611 EMIT_ASM32 (i386_reg_b,
2612 "mov %eax,4(%esp)\n\t"
2613 "mov 8(%ebp),%eax\n\t"
2614 "mov %eax,(%esp)");
2615 i386_emit_call (get_raw_reg_func_addr ());
2616 EMIT_ASM32 (i386_reg_c,
2617 "xor %ebx,%ebx\n\t"
2618 "lea 0x8(%esp),%esp");
2619 }
2620
2621 static void
2622 i386_emit_pop (void)
2623 {
2624 EMIT_ASM32 (i386_pop,
2625 "pop %eax\n\t"
2626 "pop %ebx");
2627 }
2628
2629 static void
2630 i386_emit_stack_flush (void)
2631 {
2632 EMIT_ASM32 (i386_stack_flush,
2633 "push %ebx\n\t"
2634 "push %eax");
2635 }
2636
2637 static void
2638 i386_emit_zero_ext (int arg)
2639 {
2640 switch (arg)
2641 {
2642 case 8:
2643 EMIT_ASM32 (i386_zero_ext_8,
2644 "and $0xff,%eax\n\t"
2645 "xor %ebx,%ebx");
2646 break;
2647 case 16:
2648 EMIT_ASM32 (i386_zero_ext_16,
2649 "and $0xffff,%eax\n\t"
2650 "xor %ebx,%ebx");
2651 break;
2652 case 32:
2653 EMIT_ASM32 (i386_zero_ext_32,
2654 "xor %ebx,%ebx");
2655 break;
2656 default:
2657 emit_error = 1;
2658 }
2659 }
2660
2661 static void
2662 i386_emit_swap (void)
2663 {
2664 EMIT_ASM32 (i386_swap,
2665 "mov %eax,%ecx\n\t"
2666 "mov %ebx,%edx\n\t"
2667 "pop %eax\n\t"
2668 "pop %ebx\n\t"
2669 "push %edx\n\t"
2670 "push %ecx");
2671 }
2672
2673 static void
2674 i386_emit_stack_adjust (int n)
2675 {
2676 unsigned char buf[16];
2677 int i;
2678 CORE_ADDR buildaddr = current_insn_ptr;
2679
2680 i = 0;
2681 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2682 buf[i++] = 0x64;
2683 buf[i++] = 0x24;
2684 buf[i++] = n * 8;
2685 append_insns (&buildaddr, i, buf);
2686 current_insn_ptr = buildaddr;
2687 }
2688
2689 /* FN's prototype is `LONGEST(*fn)(int)'. */
2690
2691 static void
2692 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2693 {
2694 unsigned char buf[16];
2695 int i;
2696 CORE_ADDR buildaddr;
2697
2698 EMIT_ASM32 (i386_int_call_1_a,
2699 /* Reserve a bit of stack space. */
2700 "sub $0x8,%esp");
2701 /* Put the one argument on the stack. */
2702 buildaddr = current_insn_ptr;
2703 i = 0;
2704 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2705 buf[i++] = 0x04;
2706 buf[i++] = 0x24;
2707 memcpy (&buf[i], &arg1, sizeof (arg1));
2708 i += 4;
2709 append_insns (&buildaddr, i, buf);
2710 current_insn_ptr = buildaddr;
2711 i386_emit_call (fn);
2712 EMIT_ASM32 (i386_int_call_1_c,
2713 "mov %edx,%ebx\n\t"
2714 "lea 0x8(%esp),%esp");
2715 }
2716
2717 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2718
2719 static void
2720 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2721 {
2722 unsigned char buf[16];
2723 int i;
2724 CORE_ADDR buildaddr;
2725
2726 EMIT_ASM32 (i386_void_call_2_a,
2727 /* Preserve %eax only; we don't have to worry about %ebx. */
2728 "push %eax\n\t"
2729 /* Reserve a bit of stack space for arguments. */
2730 "sub $0x10,%esp\n\t"
2731 /* Copy "top" to the second argument position. (Note that
2732 we can't assume function won't scribble on its
2733 arguments, so don't try to restore from this.) */
2734 "mov %eax,4(%esp)\n\t"
2735 "mov %ebx,8(%esp)");
2736 /* Put the first argument on the stack. */
2737 buildaddr = current_insn_ptr;
2738 i = 0;
2739 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2740 buf[i++] = 0x04;
2741 buf[i++] = 0x24;
2742 memcpy (&buf[i], &arg1, sizeof (arg1));
2743 i += 4;
2744 append_insns (&buildaddr, i, buf);
2745 current_insn_ptr = buildaddr;
2746 i386_emit_call (fn);
2747 EMIT_ASM32 (i386_void_call_2_b,
2748 "lea 0x10(%esp),%esp\n\t"
2749 /* Restore original stack top. */
2750 "pop %eax");
2751 }
2752
2753
2754 static void
2755 i386_emit_eq_goto (int *offset_p, int *size_p)
2756 {
2757 EMIT_ASM32 (eq,
2758 /* Check low half first, more likely to be decider */
2759 "cmpl %eax,(%esp)\n\t"
2760 "jne .Leq_fallthru\n\t"
2761 "cmpl %ebx,4(%esp)\n\t"
2762 "jne .Leq_fallthru\n\t"
2763 "lea 0x8(%esp),%esp\n\t"
2764 "pop %eax\n\t"
2765 "pop %ebx\n\t"
2766 /* jmp, but don't trust the assembler to choose the right jump */
2767 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2768 ".Leq_fallthru:\n\t"
2769 "lea 0x8(%esp),%esp\n\t"
2770 "pop %eax\n\t"
2771 "pop %ebx");
2772
2773 if (offset_p)
2774 *offset_p = 18;
2775 if (size_p)
2776 *size_p = 4;
2777 }
2778
2779 static void
2780 i386_emit_ne_goto (int *offset_p, int *size_p)
2781 {
2782 EMIT_ASM32 (ne,
2783 /* Check low half first, more likely to be decider */
2784 "cmpl %eax,(%esp)\n\t"
2785 "jne .Lne_jump\n\t"
2786 "cmpl %ebx,4(%esp)\n\t"
2787 "je .Lne_fallthru\n\t"
2788 ".Lne_jump:\n\t"
2789 "lea 0x8(%esp),%esp\n\t"
2790 "pop %eax\n\t"
2791 "pop %ebx\n\t"
2792 /* jmp, but don't trust the assembler to choose the right jump */
2793 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2794 ".Lne_fallthru:\n\t"
2795 "lea 0x8(%esp),%esp\n\t"
2796 "pop %eax\n\t"
2797 "pop %ebx");
2798
2799 if (offset_p)
2800 *offset_p = 18;
2801 if (size_p)
2802 *size_p = 4;
2803 }
2804
2805 static void
2806 i386_emit_lt_goto (int *offset_p, int *size_p)
2807 {
2808 EMIT_ASM32 (lt,
2809 "cmpl %ebx,4(%esp)\n\t"
2810 "jl .Llt_jump\n\t"
2811 "jne .Llt_fallthru\n\t"
2812 "cmpl %eax,(%esp)\n\t"
2813 "jnl .Llt_fallthru\n\t"
2814 ".Llt_jump:\n\t"
2815 "lea 0x8(%esp),%esp\n\t"
2816 "pop %eax\n\t"
2817 "pop %ebx\n\t"
2818 /* jmp, but don't trust the assembler to choose the right jump */
2819 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2820 ".Llt_fallthru:\n\t"
2821 "lea 0x8(%esp),%esp\n\t"
2822 "pop %eax\n\t"
2823 "pop %ebx");
2824
2825 if (offset_p)
2826 *offset_p = 20;
2827 if (size_p)
2828 *size_p = 4;
2829 }
2830
2831 static void
2832 i386_emit_le_goto (int *offset_p, int *size_p)
2833 {
2834 EMIT_ASM32 (le,
2835 "cmpl %ebx,4(%esp)\n\t"
2836 "jle .Lle_jump\n\t"
2837 "jne .Lle_fallthru\n\t"
2838 "cmpl %eax,(%esp)\n\t"
2839 "jnle .Lle_fallthru\n\t"
2840 ".Lle_jump:\n\t"
2841 "lea 0x8(%esp),%esp\n\t"
2842 "pop %eax\n\t"
2843 "pop %ebx\n\t"
2844 /* jmp, but don't trust the assembler to choose the right jump */
2845 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2846 ".Lle_fallthru:\n\t"
2847 "lea 0x8(%esp),%esp\n\t"
2848 "pop %eax\n\t"
2849 "pop %ebx");
2850
2851 if (offset_p)
2852 *offset_p = 20;
2853 if (size_p)
2854 *size_p = 4;
2855 }
2856
2857 static void
2858 i386_emit_gt_goto (int *offset_p, int *size_p)
2859 {
2860 EMIT_ASM32 (gt,
2861 "cmpl %ebx,4(%esp)\n\t"
2862 "jg .Lgt_jump\n\t"
2863 "jne .Lgt_fallthru\n\t"
2864 "cmpl %eax,(%esp)\n\t"
2865 "jng .Lgt_fallthru\n\t"
2866 ".Lgt_jump:\n\t"
2867 "lea 0x8(%esp),%esp\n\t"
2868 "pop %eax\n\t"
2869 "pop %ebx\n\t"
2870 /* jmp, but don't trust the assembler to choose the right jump */
2871 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2872 ".Lgt_fallthru:\n\t"
2873 "lea 0x8(%esp),%esp\n\t"
2874 "pop %eax\n\t"
2875 "pop %ebx");
2876
2877 if (offset_p)
2878 *offset_p = 20;
2879 if (size_p)
2880 *size_p = 4;
2881 }
2882
2883 static void
2884 i386_emit_ge_goto (int *offset_p, int *size_p)
2885 {
2886 EMIT_ASM32 (ge,
2887 "cmpl %ebx,4(%esp)\n\t"
2888 "jge .Lge_jump\n\t"
2889 "jne .Lge_fallthru\n\t"
2890 "cmpl %eax,(%esp)\n\t"
2891 "jnge .Lge_fallthru\n\t"
2892 ".Lge_jump:\n\t"
2893 "lea 0x8(%esp),%esp\n\t"
2894 "pop %eax\n\t"
2895 "pop %ebx\n\t"
2896 /* jmp, but don't trust the assembler to choose the right jump */
2897 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2898 ".Lge_fallthru:\n\t"
2899 "lea 0x8(%esp),%esp\n\t"
2900 "pop %eax\n\t"
2901 "pop %ebx");
2902
2903 if (offset_p)
2904 *offset_p = 20;
2905 if (size_p)
2906 *size_p = 4;
2907 }
2908
2909 struct emit_ops i386_emit_ops =
2910 {
2911 i386_emit_prologue,
2912 i386_emit_epilogue,
2913 i386_emit_add,
2914 i386_emit_sub,
2915 i386_emit_mul,
2916 i386_emit_lsh,
2917 i386_emit_rsh_signed,
2918 i386_emit_rsh_unsigned,
2919 i386_emit_ext,
2920 i386_emit_log_not,
2921 i386_emit_bit_and,
2922 i386_emit_bit_or,
2923 i386_emit_bit_xor,
2924 i386_emit_bit_not,
2925 i386_emit_equal,
2926 i386_emit_less_signed,
2927 i386_emit_less_unsigned,
2928 i386_emit_ref,
2929 i386_emit_if_goto,
2930 i386_emit_goto,
2931 i386_write_goto_address,
2932 i386_emit_const,
2933 i386_emit_call,
2934 i386_emit_reg,
2935 i386_emit_pop,
2936 i386_emit_stack_flush,
2937 i386_emit_zero_ext,
2938 i386_emit_swap,
2939 i386_emit_stack_adjust,
2940 i386_emit_int_call_1,
2941 i386_emit_void_call_2,
2942 i386_emit_eq_goto,
2943 i386_emit_ne_goto,
2944 i386_emit_lt_goto,
2945 i386_emit_le_goto,
2946 i386_emit_gt_goto,
2947 i386_emit_ge_goto
2948 };
2949
2950
2951 emit_ops *
2952 x86_target::emit_ops ()
2953 {
2954 #ifdef __x86_64__
2955 if (is_64bit_tdesc ())
2956 return &amd64_emit_ops;
2957 else
2958 #endif
2959 return &i386_emit_ops;
2960 }
2961
2962 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2963
2964 const gdb_byte *
2965 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2966 {
2967 *size = x86_breakpoint_len;
2968 return x86_breakpoint;
2969 }
2970
2971 bool
2972 x86_target::low_supports_range_stepping ()
2973 {
2974 return true;
2975 }
2976
2977 static int
2978 x86_get_ipa_tdesc_idx (void)
2979 {
2980 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2981 const struct target_desc *tdesc = regcache->tdesc;
2982
2983 #ifdef __x86_64__
2984 return amd64_get_ipa_tdesc_idx (tdesc);
2985 #endif
2986
2987 if (tdesc == tdesc_i386_linux_no_xml)
2988 return X86_TDESC_SSE;
2989
2990 return i386_get_ipa_tdesc_idx (tdesc);
2991 }
2992
2993 /* This is initialized assuming an amd64 target.
2994 x86_arch_setup will correct it for i386 or amd64 targets. */
2995
2996 struct linux_target_ops the_low_target =
2997 {
2998 x86_get_ipa_tdesc_idx,
2999 };
3000
3001 /* The linux target ops object. */
3002
3003 linux_process_target *the_linux_target = &the_x86_target;
3004
3005 void
3006 initialize_low_arch (void)
3007 {
3008 /* Initialize the Linux target descriptions. */
3009 #ifdef __x86_64__
3010 tdesc_amd64_linux_no_xml = allocate_target_description ();
3011 copy_target_description (tdesc_amd64_linux_no_xml,
3012 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
3013 false));
3014 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3015 #endif
3016
3017 tdesc_i386_linux_no_xml = allocate_target_description ();
3018 copy_target_description (tdesc_i386_linux_no_xml,
3019 i386_linux_read_description (X86_XSTATE_SSE_MASK));
3020 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3021
3022 initialize_regsets_info (&x86_regsets_info);
3023 }