]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdbserver/linux-x86-low.cc
Automatic Copyright Year update after running gdb/copyright.py
[thirdparty/binutils-gdb.git] / gdbserver / linux-x86-low.cc
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2022 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "gdbsupport/x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "gdbsupport/agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static target_desc_up tdesc_amd64_linux_no_xml;
52 #endif
53 static target_desc_up tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char xmltarget_i386_linux_no_xml[] = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char xmltarget_amd64_linux_no_xml[] = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include <sys/uio.h>
76
77 #ifndef PTRACE_GET_THREAD_AREA
78 #define PTRACE_GET_THREAD_AREA 25
79 #endif
80
81 /* This definition comes from prctl.h, but some kernels may not have it. */
82 #ifndef PTRACE_ARCH_PRCTL
83 #define PTRACE_ARCH_PRCTL 30
84 #endif
85
86 /* The following definitions come from prctl.h, but may be absent
87 for certain configurations. */
88 #ifndef ARCH_GET_FS
89 #define ARCH_SET_GS 0x1001
90 #define ARCH_SET_FS 0x1002
91 #define ARCH_GET_FS 0x1003
92 #define ARCH_GET_GS 0x1004
93 #endif
94
95 /* Linux target op definitions for the x86 architecture.
96 This is initialized assuming an amd64 target.
97 'low_arch_setup' will correct it for i386 or amd64 targets. */
98
99 class x86_target : public linux_process_target
100 {
101 public:
102
103 const regs_info *get_regs_info () override;
104
105 const gdb_byte *sw_breakpoint_from_kind (int kind, int *size) override;
106
107 bool supports_z_point_type (char z_type) override;
108
109 void process_qsupported (gdb::array_view<const char * const> features) override;
110
111 bool supports_tracepoints () override;
112
113 bool supports_fast_tracepoints () override;
114
115 int install_fast_tracepoint_jump_pad
116 (CORE_ADDR tpoint, CORE_ADDR tpaddr, CORE_ADDR collector,
117 CORE_ADDR lockaddr, ULONGEST orig_size, CORE_ADDR *jump_entry,
118 CORE_ADDR *trampoline, ULONGEST *trampoline_size,
119 unsigned char *jjump_pad_insn, ULONGEST *jjump_pad_insn_size,
120 CORE_ADDR *adjusted_insn_addr, CORE_ADDR *adjusted_insn_addr_end,
121 char *err) override;
122
123 int get_min_fast_tracepoint_insn_len () override;
124
125 struct emit_ops *emit_ops () override;
126
127 int get_ipa_tdesc_idx () override;
128
129 protected:
130
131 void low_arch_setup () override;
132
133 bool low_cannot_fetch_register (int regno) override;
134
135 bool low_cannot_store_register (int regno) override;
136
137 bool low_supports_breakpoints () override;
138
139 CORE_ADDR low_get_pc (regcache *regcache) override;
140
141 void low_set_pc (regcache *regcache, CORE_ADDR newpc) override;
142
143 int low_decr_pc_after_break () override;
144
145 bool low_breakpoint_at (CORE_ADDR pc) override;
146
147 int low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
148 int size, raw_breakpoint *bp) override;
149
150 int low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
151 int size, raw_breakpoint *bp) override;
152
153 bool low_stopped_by_watchpoint () override;
154
155 CORE_ADDR low_stopped_data_address () override;
156
157 /* collect_ptrace_register/supply_ptrace_register are not needed in the
158 native i386 case (no registers smaller than an xfer unit), and are not
159 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
160
161 /* Need to fix up i386 siginfo if host is amd64. */
162 bool low_siginfo_fixup (siginfo_t *native, gdb_byte *inf,
163 int direction) override;
164
165 arch_process_info *low_new_process () override;
166
167 void low_delete_process (arch_process_info *info) override;
168
169 void low_new_thread (lwp_info *) override;
170
171 void low_delete_thread (arch_lwp_info *) override;
172
173 void low_new_fork (process_info *parent, process_info *child) override;
174
175 void low_prepare_to_resume (lwp_info *lwp) override;
176
177 int low_get_thread_area (int lwpid, CORE_ADDR *addrp) override;
178
179 bool low_supports_range_stepping () override;
180
181 bool low_supports_catch_syscall () override;
182
183 void low_get_syscall_trapinfo (regcache *regcache, int *sysno) override;
184
185 private:
186
187 /* Update all the target description of all processes; a new GDB
188 connected, and it may or not support xml target descriptions. */
189 void update_xmltarget ();
190 };
191
192 /* The singleton target ops object. */
193
194 static x86_target the_x86_target;
195
196 /* Per-process arch-specific data we want to keep. */
197
198 struct arch_process_info
199 {
200 struct x86_debug_reg_state debug_reg_state;
201 };
202
203 #ifdef __x86_64__
204
205 /* Mapping between the general-purpose registers in `struct user'
206 format and GDB's register array layout.
207 Note that the transfer layout uses 64-bit regs. */
208 static /*const*/ int i386_regmap[] =
209 {
210 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
211 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
212 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
213 DS * 8, ES * 8, FS * 8, GS * 8
214 };
215
216 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
217
218 /* So code below doesn't have to care, i386 or amd64. */
219 #define ORIG_EAX ORIG_RAX
220 #define REGSIZE 8
221
222 static const int x86_64_regmap[] =
223 {
224 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
225 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
226 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
227 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
228 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
229 DS * 8, ES * 8, FS * 8, GS * 8,
230 -1, -1, -1, -1, -1, -1, -1, -1,
231 -1, -1, -1, -1, -1, -1, -1, -1,
232 -1, -1, -1, -1, -1, -1, -1, -1,
233 -1,
234 -1, -1, -1, -1, -1, -1, -1, -1,
235 ORIG_RAX * 8,
236 21 * 8, 22 * 8,
237 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
238 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
239 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
240 -1, -1, -1, -1, -1, -1, -1, -1,
241 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
242 -1, -1, -1, -1, -1, -1, -1, -1,
243 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
244 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
245 -1, -1, -1, -1, -1, -1, -1, -1,
246 -1, -1, -1, -1, -1, -1, -1, -1,
247 -1, -1, -1, -1, -1, -1, -1, -1,
248 -1 /* pkru */
249 };
250
251 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
252 #define X86_64_USER_REGS (GS + 1)
253
254 #else /* ! __x86_64__ */
255
256 /* Mapping between the general-purpose registers in `struct user'
257 format and GDB's register array layout. */
258 static /*const*/ int i386_regmap[] =
259 {
260 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
261 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
262 EIP * 4, EFL * 4, CS * 4, SS * 4,
263 DS * 4, ES * 4, FS * 4, GS * 4
264 };
265
266 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
267
268 #define REGSIZE 4
269
270 #endif
271
272 #ifdef __x86_64__
273
274 /* Returns true if the current inferior belongs to a x86-64 process,
275 per the tdesc. */
276
277 static int
278 is_64bit_tdesc (void)
279 {
280 struct regcache *regcache = get_thread_regcache (current_thread, 0);
281
282 return register_size (regcache->tdesc, 0) == 8;
283 }
284
285 #endif
286
287 \f
288 /* Called by libthread_db. */
289
290 ps_err_e
291 ps_get_thread_area (struct ps_prochandle *ph,
292 lwpid_t lwpid, int idx, void **base)
293 {
294 #ifdef __x86_64__
295 int use_64bit = is_64bit_tdesc ();
296
297 if (use_64bit)
298 {
299 switch (idx)
300 {
301 case FS:
302 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
303 return PS_OK;
304 break;
305 case GS:
306 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
307 return PS_OK;
308 break;
309 default:
310 return PS_BADADDR;
311 }
312 return PS_ERR;
313 }
314 #endif
315
316 {
317 unsigned int desc[4];
318
319 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
320 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
321 return PS_ERR;
322
323 /* Ensure we properly extend the value to 64-bits for x86_64. */
324 *base = (void *) (uintptr_t) desc[1];
325 return PS_OK;
326 }
327 }
328
329 /* Get the thread area address. This is used to recognize which
330 thread is which when tracing with the in-process agent library. We
331 don't read anything from the address, and treat it as opaque; it's
332 the address itself that we assume is unique per-thread. */
333
334 int
335 x86_target::low_get_thread_area (int lwpid, CORE_ADDR *addr)
336 {
337 #ifdef __x86_64__
338 int use_64bit = is_64bit_tdesc ();
339
340 if (use_64bit)
341 {
342 void *base;
343 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
344 {
345 *addr = (CORE_ADDR) (uintptr_t) base;
346 return 0;
347 }
348
349 return -1;
350 }
351 #endif
352
353 {
354 struct lwp_info *lwp = find_lwp_pid (ptid_t (lwpid));
355 struct thread_info *thr = get_lwp_thread (lwp);
356 struct regcache *regcache = get_thread_regcache (thr, 1);
357 unsigned int desc[4];
358 ULONGEST gs = 0;
359 const int reg_thread_area = 3; /* bits to scale down register value. */
360 int idx;
361
362 collect_register_by_name (regcache, "gs", &gs);
363
364 idx = gs >> reg_thread_area;
365
366 if (ptrace (PTRACE_GET_THREAD_AREA,
367 lwpid_of (thr),
368 (void *) (long) idx, (unsigned long) &desc) < 0)
369 return -1;
370
371 *addr = desc[1];
372 return 0;
373 }
374 }
375
376
377 \f
378 bool
379 x86_target::low_cannot_store_register (int regno)
380 {
381 #ifdef __x86_64__
382 if (is_64bit_tdesc ())
383 return false;
384 #endif
385
386 return regno >= I386_NUM_REGS;
387 }
388
389 bool
390 x86_target::low_cannot_fetch_register (int regno)
391 {
392 #ifdef __x86_64__
393 if (is_64bit_tdesc ())
394 return false;
395 #endif
396
397 return regno >= I386_NUM_REGS;
398 }
399
400 static void
401 collect_register_i386 (struct regcache *regcache, int regno, void *buf)
402 {
403 collect_register (regcache, regno, buf);
404
405 #ifdef __x86_64__
406 /* In case of x86_64 -m32, collect_register only writes 4 bytes, but the
407 space reserved in buf for the register is 8 bytes. Make sure the entire
408 reserved space is initialized. */
409
410 gdb_assert (register_size (regcache->tdesc, regno) == 4);
411
412 if (regno == RAX)
413 {
414 /* Sign extend EAX value to avoid potential syscall restart
415 problems.
416
417 See amd64_linux_collect_native_gregset() in
418 gdb/amd64-linux-nat.c for a detailed explanation. */
419 *(int64_t *) buf = *(int32_t *) buf;
420 }
421 else
422 {
423 /* Zero-extend. */
424 *(uint64_t *) buf = *(uint32_t *) buf;
425 }
426 #endif
427 }
428
429 static void
430 x86_fill_gregset (struct regcache *regcache, void *buf)
431 {
432 int i;
433
434 #ifdef __x86_64__
435 if (register_size (regcache->tdesc, 0) == 8)
436 {
437 for (i = 0; i < X86_64_NUM_REGS; i++)
438 if (x86_64_regmap[i] != -1)
439 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
440
441 return;
442 }
443 #endif
444
445 for (i = 0; i < I386_NUM_REGS; i++)
446 collect_register_i386 (regcache, i, ((char *) buf) + i386_regmap[i]);
447
448 /* Handle ORIG_EAX, which is not in i386_regmap. */
449 collect_register_i386 (regcache, find_regno (regcache->tdesc, "orig_eax"),
450 ((char *) buf) + ORIG_EAX * REGSIZE);
451 }
452
453 static void
454 x86_store_gregset (struct regcache *regcache, const void *buf)
455 {
456 int i;
457
458 #ifdef __x86_64__
459 if (register_size (regcache->tdesc, 0) == 8)
460 {
461 for (i = 0; i < X86_64_NUM_REGS; i++)
462 if (x86_64_regmap[i] != -1)
463 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
464
465 return;
466 }
467 #endif
468
469 for (i = 0; i < I386_NUM_REGS; i++)
470 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
471
472 supply_register_by_name (regcache, "orig_eax",
473 ((char *) buf) + ORIG_EAX * REGSIZE);
474 }
475
476 static void
477 x86_fill_fpregset (struct regcache *regcache, void *buf)
478 {
479 #ifdef __x86_64__
480 i387_cache_to_fxsave (regcache, buf);
481 #else
482 i387_cache_to_fsave (regcache, buf);
483 #endif
484 }
485
486 static void
487 x86_store_fpregset (struct regcache *regcache, const void *buf)
488 {
489 #ifdef __x86_64__
490 i387_fxsave_to_cache (regcache, buf);
491 #else
492 i387_fsave_to_cache (regcache, buf);
493 #endif
494 }
495
496 #ifndef __x86_64__
497
498 static void
499 x86_fill_fpxregset (struct regcache *regcache, void *buf)
500 {
501 i387_cache_to_fxsave (regcache, buf);
502 }
503
504 static void
505 x86_store_fpxregset (struct regcache *regcache, const void *buf)
506 {
507 i387_fxsave_to_cache (regcache, buf);
508 }
509
510 #endif
511
512 static void
513 x86_fill_xstateregset (struct regcache *regcache, void *buf)
514 {
515 i387_cache_to_xsave (regcache, buf);
516 }
517
518 static void
519 x86_store_xstateregset (struct regcache *regcache, const void *buf)
520 {
521 i387_xsave_to_cache (regcache, buf);
522 }
523
524 /* ??? The non-biarch i386 case stores all the i387 regs twice.
525 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
526 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
527 doesn't work. IWBN to avoid the duplication in the case where it
528 does work. Maybe the arch_setup routine could check whether it works
529 and update the supported regsets accordingly. */
530
531 static struct regset_info x86_regsets[] =
532 {
533 #ifdef HAVE_PTRACE_GETREGS
534 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
535 GENERAL_REGS,
536 x86_fill_gregset, x86_store_gregset },
537 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
538 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
539 # ifndef __x86_64__
540 # ifdef HAVE_PTRACE_GETFPXREGS
541 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
542 EXTENDED_REGS,
543 x86_fill_fpxregset, x86_store_fpxregset },
544 # endif
545 # endif
546 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
547 FP_REGS,
548 x86_fill_fpregset, x86_store_fpregset },
549 #endif /* HAVE_PTRACE_GETREGS */
550 NULL_REGSET
551 };
552
553 bool
554 x86_target::low_supports_breakpoints ()
555 {
556 return true;
557 }
558
559 CORE_ADDR
560 x86_target::low_get_pc (regcache *regcache)
561 {
562 int use_64bit = register_size (regcache->tdesc, 0) == 8;
563
564 if (use_64bit)
565 {
566 uint64_t pc;
567
568 collect_register_by_name (regcache, "rip", &pc);
569 return (CORE_ADDR) pc;
570 }
571 else
572 {
573 uint32_t pc;
574
575 collect_register_by_name (regcache, "eip", &pc);
576 return (CORE_ADDR) pc;
577 }
578 }
579
580 void
581 x86_target::low_set_pc (regcache *regcache, CORE_ADDR pc)
582 {
583 int use_64bit = register_size (regcache->tdesc, 0) == 8;
584
585 if (use_64bit)
586 {
587 uint64_t newpc = pc;
588
589 supply_register_by_name (regcache, "rip", &newpc);
590 }
591 else
592 {
593 uint32_t newpc = pc;
594
595 supply_register_by_name (regcache, "eip", &newpc);
596 }
597 }
598
599 int
600 x86_target::low_decr_pc_after_break ()
601 {
602 return 1;
603 }
604
605 \f
606 static const gdb_byte x86_breakpoint[] = { 0xCC };
607 #define x86_breakpoint_len 1
608
609 bool
610 x86_target::low_breakpoint_at (CORE_ADDR pc)
611 {
612 unsigned char c;
613
614 read_memory (pc, &c, 1);
615 if (c == 0xCC)
616 return true;
617
618 return false;
619 }
620 \f
621 /* Low-level function vector. */
622 struct x86_dr_low_type x86_dr_low =
623 {
624 x86_linux_dr_set_control,
625 x86_linux_dr_set_addr,
626 x86_linux_dr_get_addr,
627 x86_linux_dr_get_status,
628 x86_linux_dr_get_control,
629 sizeof (void *),
630 };
631 \f
632 /* Breakpoint/Watchpoint support. */
633
634 bool
635 x86_target::supports_z_point_type (char z_type)
636 {
637 switch (z_type)
638 {
639 case Z_PACKET_SW_BP:
640 case Z_PACKET_HW_BP:
641 case Z_PACKET_WRITE_WP:
642 case Z_PACKET_ACCESS_WP:
643 return true;
644 default:
645 return false;
646 }
647 }
648
649 int
650 x86_target::low_insert_point (raw_bkpt_type type, CORE_ADDR addr,
651 int size, raw_breakpoint *bp)
652 {
653 struct process_info *proc = current_process ();
654
655 switch (type)
656 {
657 case raw_bkpt_type_hw:
658 case raw_bkpt_type_write_wp:
659 case raw_bkpt_type_access_wp:
660 {
661 enum target_hw_bp_type hw_type
662 = raw_bkpt_type_to_target_hw_bp_type (type);
663 struct x86_debug_reg_state *state
664 = &proc->priv->arch_private->debug_reg_state;
665
666 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
667 }
668
669 default:
670 /* Unsupported. */
671 return 1;
672 }
673 }
674
675 int
676 x86_target::low_remove_point (raw_bkpt_type type, CORE_ADDR addr,
677 int size, raw_breakpoint *bp)
678 {
679 struct process_info *proc = current_process ();
680
681 switch (type)
682 {
683 case raw_bkpt_type_hw:
684 case raw_bkpt_type_write_wp:
685 case raw_bkpt_type_access_wp:
686 {
687 enum target_hw_bp_type hw_type
688 = raw_bkpt_type_to_target_hw_bp_type (type);
689 struct x86_debug_reg_state *state
690 = &proc->priv->arch_private->debug_reg_state;
691
692 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
693 }
694 default:
695 /* Unsupported. */
696 return 1;
697 }
698 }
699
700 bool
701 x86_target::low_stopped_by_watchpoint ()
702 {
703 struct process_info *proc = current_process ();
704 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
705 }
706
707 CORE_ADDR
708 x86_target::low_stopped_data_address ()
709 {
710 struct process_info *proc = current_process ();
711 CORE_ADDR addr;
712 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
713 &addr))
714 return addr;
715 return 0;
716 }
717 \f
718 /* Called when a new process is created. */
719
720 arch_process_info *
721 x86_target::low_new_process ()
722 {
723 struct arch_process_info *info = XCNEW (struct arch_process_info);
724
725 x86_low_init_dregs (&info->debug_reg_state);
726
727 return info;
728 }
729
730 /* Called when a process is being deleted. */
731
732 void
733 x86_target::low_delete_process (arch_process_info *info)
734 {
735 xfree (info);
736 }
737
738 void
739 x86_target::low_new_thread (lwp_info *lwp)
740 {
741 /* This comes from nat/. */
742 x86_linux_new_thread (lwp);
743 }
744
745 void
746 x86_target::low_delete_thread (arch_lwp_info *alwp)
747 {
748 /* This comes from nat/. */
749 x86_linux_delete_thread (alwp);
750 }
751
752 /* Target routine for new_fork. */
753
754 void
755 x86_target::low_new_fork (process_info *parent, process_info *child)
756 {
757 /* These are allocated by linux_add_process. */
758 gdb_assert (parent->priv != NULL
759 && parent->priv->arch_private != NULL);
760 gdb_assert (child->priv != NULL
761 && child->priv->arch_private != NULL);
762
763 /* Linux kernel before 2.6.33 commit
764 72f674d203cd230426437cdcf7dd6f681dad8b0d
765 will inherit hardware debug registers from parent
766 on fork/vfork/clone. Newer Linux kernels create such tasks with
767 zeroed debug registers.
768
769 GDB core assumes the child inherits the watchpoints/hw
770 breakpoints of the parent, and will remove them all from the
771 forked off process. Copy the debug registers mirrors into the
772 new process so that all breakpoints and watchpoints can be
773 removed together. The debug registers mirror will become zeroed
774 in the end before detaching the forked off process, thus making
775 this compatible with older Linux kernels too. */
776
777 *child->priv->arch_private = *parent->priv->arch_private;
778 }
779
780 void
781 x86_target::low_prepare_to_resume (lwp_info *lwp)
782 {
783 /* This comes from nat/. */
784 x86_linux_prepare_to_resume (lwp);
785 }
786
787 /* See nat/x86-dregs.h. */
788
789 struct x86_debug_reg_state *
790 x86_debug_reg_state (pid_t pid)
791 {
792 struct process_info *proc = find_process_pid (pid);
793
794 return &proc->priv->arch_private->debug_reg_state;
795 }
796 \f
797 /* When GDBSERVER is built as a 64-bit application on linux, the
798 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
799 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
800 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
801 conversion in-place ourselves. */
802
803 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
804 layout of the inferiors' architecture. Returns true if any
805 conversion was done; false otherwise. If DIRECTION is 1, then copy
806 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
807 INF. */
808
809 bool
810 x86_target::low_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
811 {
812 #ifdef __x86_64__
813 unsigned int machine;
814 int tid = lwpid_of (current_thread);
815 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
816
817 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
818 if (!is_64bit_tdesc ())
819 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
820 FIXUP_32);
821 /* No fixup for native x32 GDB. */
822 else if (!is_elf64 && sizeof (void *) == 8)
823 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
824 FIXUP_X32);
825 #endif
826
827 return false;
828 }
829 \f
830 static int use_xml;
831
832 /* Format of XSAVE extended state is:
833 struct
834 {
835 fxsave_bytes[0..463]
836 sw_usable_bytes[464..511]
837 xstate_hdr_bytes[512..575]
838 avx_bytes[576..831]
839 future_state etc
840 };
841
842 Same memory layout will be used for the coredump NT_X86_XSTATE
843 representing the XSAVE extended state registers.
844
845 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
846 extended state mask, which is the same as the extended control register
847 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
848 together with the mask saved in the xstate_hdr_bytes to determine what
849 states the processor/OS supports and what state, used or initialized,
850 the process/thread is in. */
851 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
852
853 /* Does the current host support the GETFPXREGS request? The header
854 file may or may not define it, and even if it is defined, the
855 kernel will return EIO if it's running on a pre-SSE processor. */
856 int have_ptrace_getfpxregs =
857 #ifdef HAVE_PTRACE_GETFPXREGS
858 -1
859 #else
860 0
861 #endif
862 ;
863
864 /* Get Linux/x86 target description from running target. */
865
866 static const struct target_desc *
867 x86_linux_read_description (void)
868 {
869 unsigned int machine;
870 int is_elf64;
871 int xcr0_features;
872 int tid;
873 static uint64_t xcr0;
874 struct regset_info *regset;
875
876 tid = lwpid_of (current_thread);
877
878 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
879
880 if (sizeof (void *) == 4)
881 {
882 if (is_elf64 > 0)
883 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
884 #ifndef __x86_64__
885 else if (machine == EM_X86_64)
886 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
887 #endif
888 }
889
890 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
891 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
892 {
893 elf_fpxregset_t fpxregs;
894
895 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
896 {
897 have_ptrace_getfpxregs = 0;
898 have_ptrace_getregset = 0;
899 return i386_linux_read_description (X86_XSTATE_X87);
900 }
901 else
902 have_ptrace_getfpxregs = 1;
903 }
904 #endif
905
906 if (!use_xml)
907 {
908 x86_xcr0 = X86_XSTATE_SSE_MASK;
909
910 /* Don't use XML. */
911 #ifdef __x86_64__
912 if (machine == EM_X86_64)
913 return tdesc_amd64_linux_no_xml.get ();
914 else
915 #endif
916 return tdesc_i386_linux_no_xml.get ();
917 }
918
919 if (have_ptrace_getregset == -1)
920 {
921 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
922 struct iovec iov;
923
924 iov.iov_base = xstateregs;
925 iov.iov_len = sizeof (xstateregs);
926
927 /* Check if PTRACE_GETREGSET works. */
928 if (ptrace (PTRACE_GETREGSET, tid,
929 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
930 have_ptrace_getregset = 0;
931 else
932 {
933 have_ptrace_getregset = 1;
934
935 /* Get XCR0 from XSAVE extended state. */
936 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
937 / sizeof (uint64_t))];
938
939 /* Use PTRACE_GETREGSET if it is available. */
940 for (regset = x86_regsets;
941 regset->fill_function != NULL; regset++)
942 if (regset->get_request == PTRACE_GETREGSET)
943 regset->size = X86_XSTATE_SIZE (xcr0);
944 else if (regset->type != GENERAL_REGS)
945 regset->size = 0;
946 }
947 }
948
949 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
950 xcr0_features = (have_ptrace_getregset
951 && (xcr0 & X86_XSTATE_ALL_MASK));
952
953 if (xcr0_features)
954 x86_xcr0 = xcr0;
955
956 if (machine == EM_X86_64)
957 {
958 #ifdef __x86_64__
959 const target_desc *tdesc = NULL;
960
961 if (xcr0_features)
962 {
963 tdesc = amd64_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK,
964 !is_elf64);
965 }
966
967 if (tdesc == NULL)
968 tdesc = amd64_linux_read_description (X86_XSTATE_SSE_MASK, !is_elf64);
969 return tdesc;
970 #endif
971 }
972 else
973 {
974 const target_desc *tdesc = NULL;
975
976 if (xcr0_features)
977 tdesc = i386_linux_read_description (xcr0 & X86_XSTATE_ALL_MASK);
978
979 if (tdesc == NULL)
980 tdesc = i386_linux_read_description (X86_XSTATE_SSE);
981
982 return tdesc;
983 }
984
985 gdb_assert_not_reached ("failed to return tdesc");
986 }
987
988 /* Update all the target description of all processes; a new GDB
989 connected, and it may or not support xml target descriptions. */
990
991 void
992 x86_target::update_xmltarget ()
993 {
994 scoped_restore_current_thread restore_thread;
995
996 /* Before changing the register cache's internal layout, flush the
997 contents of the current valid caches back to the threads, and
998 release the current regcache objects. */
999 regcache_release ();
1000
1001 for_each_process ([this] (process_info *proc) {
1002 int pid = proc->pid;
1003
1004 /* Look up any thread of this process. */
1005 switch_to_thread (find_any_thread_of_pid (pid));
1006
1007 low_arch_setup ();
1008 });
1009 }
1010
1011 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1012 PTRACE_GETREGSET. */
1013
1014 void
1015 x86_target::process_qsupported (gdb::array_view<const char * const> features)
1016 {
1017 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1018 with "i386" in qSupported query, it supports x86 XML target
1019 descriptions. */
1020 use_xml = 0;
1021
1022 for (const char *feature : features)
1023 {
1024 if (startswith (feature, "xmlRegisters="))
1025 {
1026 char *copy = xstrdup (feature + 13);
1027
1028 char *saveptr;
1029 for (char *p = strtok_r (copy, ",", &saveptr);
1030 p != NULL;
1031 p = strtok_r (NULL, ",", &saveptr))
1032 {
1033 if (strcmp (p, "i386") == 0)
1034 {
1035 use_xml = 1;
1036 break;
1037 }
1038 }
1039
1040 free (copy);
1041 }
1042 }
1043
1044 update_xmltarget ();
1045 }
1046
1047 /* Common for x86/x86-64. */
1048
1049 static struct regsets_info x86_regsets_info =
1050 {
1051 x86_regsets, /* regsets */
1052 0, /* num_regsets */
1053 NULL, /* disabled_regsets */
1054 };
1055
1056 #ifdef __x86_64__
1057 static struct regs_info amd64_linux_regs_info =
1058 {
1059 NULL, /* regset_bitmap */
1060 NULL, /* usrregs_info */
1061 &x86_regsets_info
1062 };
1063 #endif
1064 static struct usrregs_info i386_linux_usrregs_info =
1065 {
1066 I386_NUM_REGS,
1067 i386_regmap,
1068 };
1069
1070 static struct regs_info i386_linux_regs_info =
1071 {
1072 NULL, /* regset_bitmap */
1073 &i386_linux_usrregs_info,
1074 &x86_regsets_info
1075 };
1076
1077 const regs_info *
1078 x86_target::get_regs_info ()
1079 {
1080 #ifdef __x86_64__
1081 if (is_64bit_tdesc ())
1082 return &amd64_linux_regs_info;
1083 else
1084 #endif
1085 return &i386_linux_regs_info;
1086 }
1087
1088 /* Initialize the target description for the architecture of the
1089 inferior. */
1090
1091 void
1092 x86_target::low_arch_setup ()
1093 {
1094 current_process ()->tdesc = x86_linux_read_description ();
1095 }
1096
1097 bool
1098 x86_target::low_supports_catch_syscall ()
1099 {
1100 return true;
1101 }
1102
1103 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
1104 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
1105
1106 void
1107 x86_target::low_get_syscall_trapinfo (regcache *regcache, int *sysno)
1108 {
1109 int use_64bit = register_size (regcache->tdesc, 0) == 8;
1110
1111 if (use_64bit)
1112 {
1113 long l_sysno;
1114
1115 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1116 *sysno = (int) l_sysno;
1117 }
1118 else
1119 collect_register_by_name (regcache, "orig_eax", sysno);
1120 }
1121
1122 bool
1123 x86_target::supports_tracepoints ()
1124 {
1125 return true;
1126 }
1127
1128 static void
1129 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1130 {
1131 target_write_memory (*to, buf, len);
1132 *to += len;
1133 }
1134
1135 static int
1136 push_opcode (unsigned char *buf, const char *op)
1137 {
1138 unsigned char *buf_org = buf;
1139
1140 while (1)
1141 {
1142 char *endptr;
1143 unsigned long ul = strtoul (op, &endptr, 16);
1144
1145 if (endptr == op)
1146 break;
1147
1148 *buf++ = ul;
1149 op = endptr;
1150 }
1151
1152 return buf - buf_org;
1153 }
1154
1155 #ifdef __x86_64__
1156
1157 /* Build a jump pad that saves registers and calls a collection
1158 function. Writes a jump instruction to the jump pad to
1159 JJUMPAD_INSN. The caller is responsible to write it in at the
1160 tracepoint address. */
1161
1162 static int
1163 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1164 CORE_ADDR collector,
1165 CORE_ADDR lockaddr,
1166 ULONGEST orig_size,
1167 CORE_ADDR *jump_entry,
1168 CORE_ADDR *trampoline,
1169 ULONGEST *trampoline_size,
1170 unsigned char *jjump_pad_insn,
1171 ULONGEST *jjump_pad_insn_size,
1172 CORE_ADDR *adjusted_insn_addr,
1173 CORE_ADDR *adjusted_insn_addr_end,
1174 char *err)
1175 {
1176 unsigned char buf[40];
1177 int i, offset;
1178 int64_t loffset;
1179
1180 CORE_ADDR buildaddr = *jump_entry;
1181
1182 /* Build the jump pad. */
1183
1184 /* First, do tracepoint data collection. Save registers. */
1185 i = 0;
1186 /* Need to ensure stack pointer saved first. */
1187 buf[i++] = 0x54; /* push %rsp */
1188 buf[i++] = 0x55; /* push %rbp */
1189 buf[i++] = 0x57; /* push %rdi */
1190 buf[i++] = 0x56; /* push %rsi */
1191 buf[i++] = 0x52; /* push %rdx */
1192 buf[i++] = 0x51; /* push %rcx */
1193 buf[i++] = 0x53; /* push %rbx */
1194 buf[i++] = 0x50; /* push %rax */
1195 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1196 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1197 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1198 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1199 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1200 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1201 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1202 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1203 buf[i++] = 0x9c; /* pushfq */
1204 buf[i++] = 0x48; /* movabs <addr>,%rdi */
1205 buf[i++] = 0xbf;
1206 memcpy (buf + i, &tpaddr, 8);
1207 i += 8;
1208 buf[i++] = 0x57; /* push %rdi */
1209 append_insns (&buildaddr, i, buf);
1210
1211 /* Stack space for the collecting_t object. */
1212 i = 0;
1213 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1214 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1215 memcpy (buf + i, &tpoint, 8);
1216 i += 8;
1217 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1218 i += push_opcode (&buf[i],
1219 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1220 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1221 append_insns (&buildaddr, i, buf);
1222
1223 /* spin-lock. */
1224 i = 0;
1225 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1226 memcpy (&buf[i], (void *) &lockaddr, 8);
1227 i += 8;
1228 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1229 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1230 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1231 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1232 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1233 append_insns (&buildaddr, i, buf);
1234
1235 /* Set up the gdb_collect call. */
1236 /* At this point, (stack pointer + 0x18) is the base of our saved
1237 register block. */
1238
1239 i = 0;
1240 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1241 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1242
1243 /* tpoint address may be 64-bit wide. */
1244 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1245 memcpy (buf + i, &tpoint, 8);
1246 i += 8;
1247 append_insns (&buildaddr, i, buf);
1248
1249 /* The collector function being in the shared library, may be
1250 >31-bits away off the jump pad. */
1251 i = 0;
1252 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1253 memcpy (buf + i, &collector, 8);
1254 i += 8;
1255 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1256 append_insns (&buildaddr, i, buf);
1257
1258 /* Clear the spin-lock. */
1259 i = 0;
1260 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1261 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1262 memcpy (buf + i, &lockaddr, 8);
1263 i += 8;
1264 append_insns (&buildaddr, i, buf);
1265
1266 /* Remove stack that had been used for the collect_t object. */
1267 i = 0;
1268 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1269 append_insns (&buildaddr, i, buf);
1270
1271 /* Restore register state. */
1272 i = 0;
1273 buf[i++] = 0x48; /* add $0x8,%rsp */
1274 buf[i++] = 0x83;
1275 buf[i++] = 0xc4;
1276 buf[i++] = 0x08;
1277 buf[i++] = 0x9d; /* popfq */
1278 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1279 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1280 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1281 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1282 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1283 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1284 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1285 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1286 buf[i++] = 0x58; /* pop %rax */
1287 buf[i++] = 0x5b; /* pop %rbx */
1288 buf[i++] = 0x59; /* pop %rcx */
1289 buf[i++] = 0x5a; /* pop %rdx */
1290 buf[i++] = 0x5e; /* pop %rsi */
1291 buf[i++] = 0x5f; /* pop %rdi */
1292 buf[i++] = 0x5d; /* pop %rbp */
1293 buf[i++] = 0x5c; /* pop %rsp */
1294 append_insns (&buildaddr, i, buf);
1295
1296 /* Now, adjust the original instruction to execute in the jump
1297 pad. */
1298 *adjusted_insn_addr = buildaddr;
1299 relocate_instruction (&buildaddr, tpaddr);
1300 *adjusted_insn_addr_end = buildaddr;
1301
1302 /* Finally, write a jump back to the program. */
1303
1304 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1305 if (loffset > INT_MAX || loffset < INT_MIN)
1306 {
1307 sprintf (err,
1308 "E.Jump back from jump pad too far from tracepoint "
1309 "(offset 0x%" PRIx64 " > int32).", loffset);
1310 return 1;
1311 }
1312
1313 offset = (int) loffset;
1314 memcpy (buf, jump_insn, sizeof (jump_insn));
1315 memcpy (buf + 1, &offset, 4);
1316 append_insns (&buildaddr, sizeof (jump_insn), buf);
1317
1318 /* The jump pad is now built. Wire in a jump to our jump pad. This
1319 is always done last (by our caller actually), so that we can
1320 install fast tracepoints with threads running. This relies on
1321 the agent's atomic write support. */
1322 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1323 if (loffset > INT_MAX || loffset < INT_MIN)
1324 {
1325 sprintf (err,
1326 "E.Jump pad too far from tracepoint "
1327 "(offset 0x%" PRIx64 " > int32).", loffset);
1328 return 1;
1329 }
1330
1331 offset = (int) loffset;
1332
1333 memcpy (buf, jump_insn, sizeof (jump_insn));
1334 memcpy (buf + 1, &offset, 4);
1335 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1336 *jjump_pad_insn_size = sizeof (jump_insn);
1337
1338 /* Return the end address of our pad. */
1339 *jump_entry = buildaddr;
1340
1341 return 0;
1342 }
1343
1344 #endif /* __x86_64__ */
1345
1346 /* Build a jump pad that saves registers and calls a collection
1347 function. Writes a jump instruction to the jump pad to
1348 JJUMPAD_INSN. The caller is responsible to write it in at the
1349 tracepoint address. */
1350
1351 static int
1352 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1353 CORE_ADDR collector,
1354 CORE_ADDR lockaddr,
1355 ULONGEST orig_size,
1356 CORE_ADDR *jump_entry,
1357 CORE_ADDR *trampoline,
1358 ULONGEST *trampoline_size,
1359 unsigned char *jjump_pad_insn,
1360 ULONGEST *jjump_pad_insn_size,
1361 CORE_ADDR *adjusted_insn_addr,
1362 CORE_ADDR *adjusted_insn_addr_end,
1363 char *err)
1364 {
1365 unsigned char buf[0x100];
1366 int i, offset;
1367 CORE_ADDR buildaddr = *jump_entry;
1368
1369 /* Build the jump pad. */
1370
1371 /* First, do tracepoint data collection. Save registers. */
1372 i = 0;
1373 buf[i++] = 0x60; /* pushad */
1374 buf[i++] = 0x68; /* push tpaddr aka $pc */
1375 *((int *)(buf + i)) = (int) tpaddr;
1376 i += 4;
1377 buf[i++] = 0x9c; /* pushf */
1378 buf[i++] = 0x1e; /* push %ds */
1379 buf[i++] = 0x06; /* push %es */
1380 buf[i++] = 0x0f; /* push %fs */
1381 buf[i++] = 0xa0;
1382 buf[i++] = 0x0f; /* push %gs */
1383 buf[i++] = 0xa8;
1384 buf[i++] = 0x16; /* push %ss */
1385 buf[i++] = 0x0e; /* push %cs */
1386 append_insns (&buildaddr, i, buf);
1387
1388 /* Stack space for the collecting_t object. */
1389 i = 0;
1390 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1391
1392 /* Build the object. */
1393 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1394 memcpy (buf + i, &tpoint, 4);
1395 i += 4;
1396 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1397
1398 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1399 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1400 append_insns (&buildaddr, i, buf);
1401
1402 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1403 If we cared for it, this could be using xchg alternatively. */
1404
1405 i = 0;
1406 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1407 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1408 %esp,<lockaddr> */
1409 memcpy (&buf[i], (void *) &lockaddr, 4);
1410 i += 4;
1411 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1412 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1413 append_insns (&buildaddr, i, buf);
1414
1415
1416 /* Set up arguments to the gdb_collect call. */
1417 i = 0;
1418 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1419 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1420 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1421 append_insns (&buildaddr, i, buf);
1422
1423 i = 0;
1424 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1425 append_insns (&buildaddr, i, buf);
1426
1427 i = 0;
1428 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1429 memcpy (&buf[i], (void *) &tpoint, 4);
1430 i += 4;
1431 append_insns (&buildaddr, i, buf);
1432
1433 buf[0] = 0xe8; /* call <reladdr> */
1434 offset = collector - (buildaddr + sizeof (jump_insn));
1435 memcpy (buf + 1, &offset, 4);
1436 append_insns (&buildaddr, 5, buf);
1437 /* Clean up after the call. */
1438 buf[0] = 0x83; /* add $0x8,%esp */
1439 buf[1] = 0xc4;
1440 buf[2] = 0x08;
1441 append_insns (&buildaddr, 3, buf);
1442
1443
1444 /* Clear the spin-lock. This would need the LOCK prefix on older
1445 broken archs. */
1446 i = 0;
1447 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1448 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1449 memcpy (buf + i, &lockaddr, 4);
1450 i += 4;
1451 append_insns (&buildaddr, i, buf);
1452
1453
1454 /* Remove stack that had been used for the collect_t object. */
1455 i = 0;
1456 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1457 append_insns (&buildaddr, i, buf);
1458
1459 i = 0;
1460 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1461 buf[i++] = 0xc4;
1462 buf[i++] = 0x04;
1463 buf[i++] = 0x17; /* pop %ss */
1464 buf[i++] = 0x0f; /* pop %gs */
1465 buf[i++] = 0xa9;
1466 buf[i++] = 0x0f; /* pop %fs */
1467 buf[i++] = 0xa1;
1468 buf[i++] = 0x07; /* pop %es */
1469 buf[i++] = 0x1f; /* pop %ds */
1470 buf[i++] = 0x9d; /* popf */
1471 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1472 buf[i++] = 0xc4;
1473 buf[i++] = 0x04;
1474 buf[i++] = 0x61; /* popad */
1475 append_insns (&buildaddr, i, buf);
1476
1477 /* Now, adjust the original instruction to execute in the jump
1478 pad. */
1479 *adjusted_insn_addr = buildaddr;
1480 relocate_instruction (&buildaddr, tpaddr);
1481 *adjusted_insn_addr_end = buildaddr;
1482
1483 /* Write the jump back to the program. */
1484 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1485 memcpy (buf, jump_insn, sizeof (jump_insn));
1486 memcpy (buf + 1, &offset, 4);
1487 append_insns (&buildaddr, sizeof (jump_insn), buf);
1488
1489 /* The jump pad is now built. Wire in a jump to our jump pad. This
1490 is always done last (by our caller actually), so that we can
1491 install fast tracepoints with threads running. This relies on
1492 the agent's atomic write support. */
1493 if (orig_size == 4)
1494 {
1495 /* Create a trampoline. */
1496 *trampoline_size = sizeof (jump_insn);
1497 if (!claim_trampoline_space (*trampoline_size, trampoline))
1498 {
1499 /* No trampoline space available. */
1500 strcpy (err,
1501 "E.Cannot allocate trampoline space needed for fast "
1502 "tracepoints on 4-byte instructions.");
1503 return 1;
1504 }
1505
1506 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1507 memcpy (buf, jump_insn, sizeof (jump_insn));
1508 memcpy (buf + 1, &offset, 4);
1509 target_write_memory (*trampoline, buf, sizeof (jump_insn));
1510
1511 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1512 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1513 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1514 memcpy (buf + 2, &offset, 2);
1515 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1516 *jjump_pad_insn_size = sizeof (small_jump_insn);
1517 }
1518 else
1519 {
1520 /* Else use a 32-bit relative jump instruction. */
1521 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1522 memcpy (buf, jump_insn, sizeof (jump_insn));
1523 memcpy (buf + 1, &offset, 4);
1524 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1525 *jjump_pad_insn_size = sizeof (jump_insn);
1526 }
1527
1528 /* Return the end address of our pad. */
1529 *jump_entry = buildaddr;
1530
1531 return 0;
1532 }
1533
1534 bool
1535 x86_target::supports_fast_tracepoints ()
1536 {
1537 return true;
1538 }
1539
1540 int
1541 x86_target::install_fast_tracepoint_jump_pad (CORE_ADDR tpoint,
1542 CORE_ADDR tpaddr,
1543 CORE_ADDR collector,
1544 CORE_ADDR lockaddr,
1545 ULONGEST orig_size,
1546 CORE_ADDR *jump_entry,
1547 CORE_ADDR *trampoline,
1548 ULONGEST *trampoline_size,
1549 unsigned char *jjump_pad_insn,
1550 ULONGEST *jjump_pad_insn_size,
1551 CORE_ADDR *adjusted_insn_addr,
1552 CORE_ADDR *adjusted_insn_addr_end,
1553 char *err)
1554 {
1555 #ifdef __x86_64__
1556 if (is_64bit_tdesc ())
1557 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1558 collector, lockaddr,
1559 orig_size, jump_entry,
1560 trampoline, trampoline_size,
1561 jjump_pad_insn,
1562 jjump_pad_insn_size,
1563 adjusted_insn_addr,
1564 adjusted_insn_addr_end,
1565 err);
1566 #endif
1567
1568 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1569 collector, lockaddr,
1570 orig_size, jump_entry,
1571 trampoline, trampoline_size,
1572 jjump_pad_insn,
1573 jjump_pad_insn_size,
1574 adjusted_insn_addr,
1575 adjusted_insn_addr_end,
1576 err);
1577 }
1578
1579 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1580 architectures. */
1581
1582 int
1583 x86_target::get_min_fast_tracepoint_insn_len ()
1584 {
1585 static int warned_about_fast_tracepoints = 0;
1586
1587 #ifdef __x86_64__
1588 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1589 used for fast tracepoints. */
1590 if (is_64bit_tdesc ())
1591 return 5;
1592 #endif
1593
1594 if (agent_loaded_p ())
1595 {
1596 char errbuf[IPA_BUFSIZ];
1597
1598 errbuf[0] = '\0';
1599
1600 /* On x86, if trampolines are available, then 4-byte jump instructions
1601 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1602 with a 4-byte offset are used instead. */
1603 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1604 return 4;
1605 else
1606 {
1607 /* GDB has no channel to explain to user why a shorter fast
1608 tracepoint is not possible, but at least make GDBserver
1609 mention that something has gone awry. */
1610 if (!warned_about_fast_tracepoints)
1611 {
1612 warning ("4-byte fast tracepoints not available; %s", errbuf);
1613 warned_about_fast_tracepoints = 1;
1614 }
1615 return 5;
1616 }
1617 }
1618 else
1619 {
1620 /* Indicate that the minimum length is currently unknown since the IPA
1621 has not loaded yet. */
1622 return 0;
1623 }
1624 }
1625
1626 static void
1627 add_insns (unsigned char *start, int len)
1628 {
1629 CORE_ADDR buildaddr = current_insn_ptr;
1630
1631 if (debug_threads)
1632 debug_printf ("Adding %d bytes of insn at %s\n",
1633 len, paddress (buildaddr));
1634
1635 append_insns (&buildaddr, len, start);
1636 current_insn_ptr = buildaddr;
1637 }
1638
1639 /* Our general strategy for emitting code is to avoid specifying raw
1640 bytes whenever possible, and instead copy a block of inline asm
1641 that is embedded in the function. This is a little messy, because
1642 we need to keep the compiler from discarding what looks like dead
1643 code, plus suppress various warnings. */
1644
1645 #define EMIT_ASM(NAME, INSNS) \
1646 do \
1647 { \
1648 extern unsigned char start_ ## NAME, end_ ## NAME; \
1649 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1650 __asm__ ("jmp end_" #NAME "\n" \
1651 "\t" "start_" #NAME ":" \
1652 "\t" INSNS "\n" \
1653 "\t" "end_" #NAME ":"); \
1654 } while (0)
1655
1656 #ifdef __x86_64__
1657
1658 #define EMIT_ASM32(NAME,INSNS) \
1659 do \
1660 { \
1661 extern unsigned char start_ ## NAME, end_ ## NAME; \
1662 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1663 __asm__ (".code32\n" \
1664 "\t" "jmp end_" #NAME "\n" \
1665 "\t" "start_" #NAME ":\n" \
1666 "\t" INSNS "\n" \
1667 "\t" "end_" #NAME ":\n" \
1668 ".code64\n"); \
1669 } while (0)
1670
1671 #else
1672
1673 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1674
1675 #endif
1676
1677 #ifdef __x86_64__
1678
1679 static void
1680 amd64_emit_prologue (void)
1681 {
1682 EMIT_ASM (amd64_prologue,
1683 "pushq %rbp\n\t"
1684 "movq %rsp,%rbp\n\t"
1685 "sub $0x20,%rsp\n\t"
1686 "movq %rdi,-8(%rbp)\n\t"
1687 "movq %rsi,-16(%rbp)");
1688 }
1689
1690
1691 static void
1692 amd64_emit_epilogue (void)
1693 {
1694 EMIT_ASM (amd64_epilogue,
1695 "movq -16(%rbp),%rdi\n\t"
1696 "movq %rax,(%rdi)\n\t"
1697 "xor %rax,%rax\n\t"
1698 "leave\n\t"
1699 "ret");
1700 }
1701
1702 static void
1703 amd64_emit_add (void)
1704 {
1705 EMIT_ASM (amd64_add,
1706 "add (%rsp),%rax\n\t"
1707 "lea 0x8(%rsp),%rsp");
1708 }
1709
1710 static void
1711 amd64_emit_sub (void)
1712 {
1713 EMIT_ASM (amd64_sub,
1714 "sub %rax,(%rsp)\n\t"
1715 "pop %rax");
1716 }
1717
1718 static void
1719 amd64_emit_mul (void)
1720 {
1721 emit_error = 1;
1722 }
1723
1724 static void
1725 amd64_emit_lsh (void)
1726 {
1727 emit_error = 1;
1728 }
1729
1730 static void
1731 amd64_emit_rsh_signed (void)
1732 {
1733 emit_error = 1;
1734 }
1735
1736 static void
1737 amd64_emit_rsh_unsigned (void)
1738 {
1739 emit_error = 1;
1740 }
1741
1742 static void
1743 amd64_emit_ext (int arg)
1744 {
1745 switch (arg)
1746 {
1747 case 8:
1748 EMIT_ASM (amd64_ext_8,
1749 "cbtw\n\t"
1750 "cwtl\n\t"
1751 "cltq");
1752 break;
1753 case 16:
1754 EMIT_ASM (amd64_ext_16,
1755 "cwtl\n\t"
1756 "cltq");
1757 break;
1758 case 32:
1759 EMIT_ASM (amd64_ext_32,
1760 "cltq");
1761 break;
1762 default:
1763 emit_error = 1;
1764 }
1765 }
1766
1767 static void
1768 amd64_emit_log_not (void)
1769 {
1770 EMIT_ASM (amd64_log_not,
1771 "test %rax,%rax\n\t"
1772 "sete %cl\n\t"
1773 "movzbq %cl,%rax");
1774 }
1775
1776 static void
1777 amd64_emit_bit_and (void)
1778 {
1779 EMIT_ASM (amd64_and,
1780 "and (%rsp),%rax\n\t"
1781 "lea 0x8(%rsp),%rsp");
1782 }
1783
1784 static void
1785 amd64_emit_bit_or (void)
1786 {
1787 EMIT_ASM (amd64_or,
1788 "or (%rsp),%rax\n\t"
1789 "lea 0x8(%rsp),%rsp");
1790 }
1791
1792 static void
1793 amd64_emit_bit_xor (void)
1794 {
1795 EMIT_ASM (amd64_xor,
1796 "xor (%rsp),%rax\n\t"
1797 "lea 0x8(%rsp),%rsp");
1798 }
1799
1800 static void
1801 amd64_emit_bit_not (void)
1802 {
1803 EMIT_ASM (amd64_bit_not,
1804 "xorq $0xffffffffffffffff,%rax");
1805 }
1806
1807 static void
1808 amd64_emit_equal (void)
1809 {
1810 EMIT_ASM (amd64_equal,
1811 "cmp %rax,(%rsp)\n\t"
1812 "je .Lamd64_equal_true\n\t"
1813 "xor %rax,%rax\n\t"
1814 "jmp .Lamd64_equal_end\n\t"
1815 ".Lamd64_equal_true:\n\t"
1816 "mov $0x1,%rax\n\t"
1817 ".Lamd64_equal_end:\n\t"
1818 "lea 0x8(%rsp),%rsp");
1819 }
1820
1821 static void
1822 amd64_emit_less_signed (void)
1823 {
1824 EMIT_ASM (amd64_less_signed,
1825 "cmp %rax,(%rsp)\n\t"
1826 "jl .Lamd64_less_signed_true\n\t"
1827 "xor %rax,%rax\n\t"
1828 "jmp .Lamd64_less_signed_end\n\t"
1829 ".Lamd64_less_signed_true:\n\t"
1830 "mov $1,%rax\n\t"
1831 ".Lamd64_less_signed_end:\n\t"
1832 "lea 0x8(%rsp),%rsp");
1833 }
1834
1835 static void
1836 amd64_emit_less_unsigned (void)
1837 {
1838 EMIT_ASM (amd64_less_unsigned,
1839 "cmp %rax,(%rsp)\n\t"
1840 "jb .Lamd64_less_unsigned_true\n\t"
1841 "xor %rax,%rax\n\t"
1842 "jmp .Lamd64_less_unsigned_end\n\t"
1843 ".Lamd64_less_unsigned_true:\n\t"
1844 "mov $1,%rax\n\t"
1845 ".Lamd64_less_unsigned_end:\n\t"
1846 "lea 0x8(%rsp),%rsp");
1847 }
1848
1849 static void
1850 amd64_emit_ref (int size)
1851 {
1852 switch (size)
1853 {
1854 case 1:
1855 EMIT_ASM (amd64_ref1,
1856 "movb (%rax),%al");
1857 break;
1858 case 2:
1859 EMIT_ASM (amd64_ref2,
1860 "movw (%rax),%ax");
1861 break;
1862 case 4:
1863 EMIT_ASM (amd64_ref4,
1864 "movl (%rax),%eax");
1865 break;
1866 case 8:
1867 EMIT_ASM (amd64_ref8,
1868 "movq (%rax),%rax");
1869 break;
1870 }
1871 }
1872
1873 static void
1874 amd64_emit_if_goto (int *offset_p, int *size_p)
1875 {
1876 EMIT_ASM (amd64_if_goto,
1877 "mov %rax,%rcx\n\t"
1878 "pop %rax\n\t"
1879 "cmp $0,%rcx\n\t"
1880 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1881 if (offset_p)
1882 *offset_p = 10;
1883 if (size_p)
1884 *size_p = 4;
1885 }
1886
1887 static void
1888 amd64_emit_goto (int *offset_p, int *size_p)
1889 {
1890 EMIT_ASM (amd64_goto,
1891 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1892 if (offset_p)
1893 *offset_p = 1;
1894 if (size_p)
1895 *size_p = 4;
1896 }
1897
1898 static void
1899 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1900 {
1901 int diff = (to - (from + size));
1902 unsigned char buf[sizeof (int)];
1903
1904 if (size != 4)
1905 {
1906 emit_error = 1;
1907 return;
1908 }
1909
1910 memcpy (buf, &diff, sizeof (int));
1911 target_write_memory (from, buf, sizeof (int));
1912 }
1913
1914 static void
1915 amd64_emit_const (LONGEST num)
1916 {
1917 unsigned char buf[16];
1918 int i;
1919 CORE_ADDR buildaddr = current_insn_ptr;
1920
1921 i = 0;
1922 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1923 memcpy (&buf[i], &num, sizeof (num));
1924 i += 8;
1925 append_insns (&buildaddr, i, buf);
1926 current_insn_ptr = buildaddr;
1927 }
1928
1929 static void
1930 amd64_emit_call (CORE_ADDR fn)
1931 {
1932 unsigned char buf[16];
1933 int i;
1934 CORE_ADDR buildaddr;
1935 LONGEST offset64;
1936
1937 /* The destination function being in the shared library, may be
1938 >31-bits away off the compiled code pad. */
1939
1940 buildaddr = current_insn_ptr;
1941
1942 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1943
1944 i = 0;
1945
1946 if (offset64 > INT_MAX || offset64 < INT_MIN)
1947 {
1948 /* Offset is too large for a call. Use callq, but that requires
1949 a register, so avoid it if possible. Use r10, since it is
1950 call-clobbered, we don't have to push/pop it. */
1951 buf[i++] = 0x48; /* mov $fn,%r10 */
1952 buf[i++] = 0xba;
1953 memcpy (buf + i, &fn, 8);
1954 i += 8;
1955 buf[i++] = 0xff; /* callq *%r10 */
1956 buf[i++] = 0xd2;
1957 }
1958 else
1959 {
1960 int offset32 = offset64; /* we know we can't overflow here. */
1961
1962 buf[i++] = 0xe8; /* call <reladdr> */
1963 memcpy (buf + i, &offset32, 4);
1964 i += 4;
1965 }
1966
1967 append_insns (&buildaddr, i, buf);
1968 current_insn_ptr = buildaddr;
1969 }
1970
1971 static void
1972 amd64_emit_reg (int reg)
1973 {
1974 unsigned char buf[16];
1975 int i;
1976 CORE_ADDR buildaddr;
1977
1978 /* Assume raw_regs is still in %rdi. */
1979 buildaddr = current_insn_ptr;
1980 i = 0;
1981 buf[i++] = 0xbe; /* mov $<n>,%esi */
1982 memcpy (&buf[i], &reg, sizeof (reg));
1983 i += 4;
1984 append_insns (&buildaddr, i, buf);
1985 current_insn_ptr = buildaddr;
1986 amd64_emit_call (get_raw_reg_func_addr ());
1987 }
1988
1989 static void
1990 amd64_emit_pop (void)
1991 {
1992 EMIT_ASM (amd64_pop,
1993 "pop %rax");
1994 }
1995
1996 static void
1997 amd64_emit_stack_flush (void)
1998 {
1999 EMIT_ASM (amd64_stack_flush,
2000 "push %rax");
2001 }
2002
2003 static void
2004 amd64_emit_zero_ext (int arg)
2005 {
2006 switch (arg)
2007 {
2008 case 8:
2009 EMIT_ASM (amd64_zero_ext_8,
2010 "and $0xff,%rax");
2011 break;
2012 case 16:
2013 EMIT_ASM (amd64_zero_ext_16,
2014 "and $0xffff,%rax");
2015 break;
2016 case 32:
2017 EMIT_ASM (amd64_zero_ext_32,
2018 "mov $0xffffffff,%rcx\n\t"
2019 "and %rcx,%rax");
2020 break;
2021 default:
2022 emit_error = 1;
2023 }
2024 }
2025
2026 static void
2027 amd64_emit_swap (void)
2028 {
2029 EMIT_ASM (amd64_swap,
2030 "mov %rax,%rcx\n\t"
2031 "pop %rax\n\t"
2032 "push %rcx");
2033 }
2034
2035 static void
2036 amd64_emit_stack_adjust (int n)
2037 {
2038 unsigned char buf[16];
2039 int i;
2040 CORE_ADDR buildaddr = current_insn_ptr;
2041
2042 i = 0;
2043 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2044 buf[i++] = 0x8d;
2045 buf[i++] = 0x64;
2046 buf[i++] = 0x24;
2047 /* This only handles adjustments up to 16, but we don't expect any more. */
2048 buf[i++] = n * 8;
2049 append_insns (&buildaddr, i, buf);
2050 current_insn_ptr = buildaddr;
2051 }
2052
2053 /* FN's prototype is `LONGEST(*fn)(int)'. */
2054
2055 static void
2056 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2057 {
2058 unsigned char buf[16];
2059 int i;
2060 CORE_ADDR buildaddr;
2061
2062 buildaddr = current_insn_ptr;
2063 i = 0;
2064 buf[i++] = 0xbf; /* movl $<n>,%edi */
2065 memcpy (&buf[i], &arg1, sizeof (arg1));
2066 i += 4;
2067 append_insns (&buildaddr, i, buf);
2068 current_insn_ptr = buildaddr;
2069 amd64_emit_call (fn);
2070 }
2071
2072 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2073
2074 static void
2075 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2076 {
2077 unsigned char buf[16];
2078 int i;
2079 CORE_ADDR buildaddr;
2080
2081 buildaddr = current_insn_ptr;
2082 i = 0;
2083 buf[i++] = 0xbf; /* movl $<n>,%edi */
2084 memcpy (&buf[i], &arg1, sizeof (arg1));
2085 i += 4;
2086 append_insns (&buildaddr, i, buf);
2087 current_insn_ptr = buildaddr;
2088 EMIT_ASM (amd64_void_call_2_a,
2089 /* Save away a copy of the stack top. */
2090 "push %rax\n\t"
2091 /* Also pass top as the second argument. */
2092 "mov %rax,%rsi");
2093 amd64_emit_call (fn);
2094 EMIT_ASM (amd64_void_call_2_b,
2095 /* Restore the stack top, %rax may have been trashed. */
2096 "pop %rax");
2097 }
2098
2099 static void
2100 amd64_emit_eq_goto (int *offset_p, int *size_p)
2101 {
2102 EMIT_ASM (amd64_eq,
2103 "cmp %rax,(%rsp)\n\t"
2104 "jne .Lamd64_eq_fallthru\n\t"
2105 "lea 0x8(%rsp),%rsp\n\t"
2106 "pop %rax\n\t"
2107 /* jmp, but don't trust the assembler to choose the right jump */
2108 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2109 ".Lamd64_eq_fallthru:\n\t"
2110 "lea 0x8(%rsp),%rsp\n\t"
2111 "pop %rax");
2112
2113 if (offset_p)
2114 *offset_p = 13;
2115 if (size_p)
2116 *size_p = 4;
2117 }
2118
2119 static void
2120 amd64_emit_ne_goto (int *offset_p, int *size_p)
2121 {
2122 EMIT_ASM (amd64_ne,
2123 "cmp %rax,(%rsp)\n\t"
2124 "je .Lamd64_ne_fallthru\n\t"
2125 "lea 0x8(%rsp),%rsp\n\t"
2126 "pop %rax\n\t"
2127 /* jmp, but don't trust the assembler to choose the right jump */
2128 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2129 ".Lamd64_ne_fallthru:\n\t"
2130 "lea 0x8(%rsp),%rsp\n\t"
2131 "pop %rax");
2132
2133 if (offset_p)
2134 *offset_p = 13;
2135 if (size_p)
2136 *size_p = 4;
2137 }
2138
2139 static void
2140 amd64_emit_lt_goto (int *offset_p, int *size_p)
2141 {
2142 EMIT_ASM (amd64_lt,
2143 "cmp %rax,(%rsp)\n\t"
2144 "jnl .Lamd64_lt_fallthru\n\t"
2145 "lea 0x8(%rsp),%rsp\n\t"
2146 "pop %rax\n\t"
2147 /* jmp, but don't trust the assembler to choose the right jump */
2148 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2149 ".Lamd64_lt_fallthru:\n\t"
2150 "lea 0x8(%rsp),%rsp\n\t"
2151 "pop %rax");
2152
2153 if (offset_p)
2154 *offset_p = 13;
2155 if (size_p)
2156 *size_p = 4;
2157 }
2158
2159 static void
2160 amd64_emit_le_goto (int *offset_p, int *size_p)
2161 {
2162 EMIT_ASM (amd64_le,
2163 "cmp %rax,(%rsp)\n\t"
2164 "jnle .Lamd64_le_fallthru\n\t"
2165 "lea 0x8(%rsp),%rsp\n\t"
2166 "pop %rax\n\t"
2167 /* jmp, but don't trust the assembler to choose the right jump */
2168 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2169 ".Lamd64_le_fallthru:\n\t"
2170 "lea 0x8(%rsp),%rsp\n\t"
2171 "pop %rax");
2172
2173 if (offset_p)
2174 *offset_p = 13;
2175 if (size_p)
2176 *size_p = 4;
2177 }
2178
2179 static void
2180 amd64_emit_gt_goto (int *offset_p, int *size_p)
2181 {
2182 EMIT_ASM (amd64_gt,
2183 "cmp %rax,(%rsp)\n\t"
2184 "jng .Lamd64_gt_fallthru\n\t"
2185 "lea 0x8(%rsp),%rsp\n\t"
2186 "pop %rax\n\t"
2187 /* jmp, but don't trust the assembler to choose the right jump */
2188 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2189 ".Lamd64_gt_fallthru:\n\t"
2190 "lea 0x8(%rsp),%rsp\n\t"
2191 "pop %rax");
2192
2193 if (offset_p)
2194 *offset_p = 13;
2195 if (size_p)
2196 *size_p = 4;
2197 }
2198
2199 static void
2200 amd64_emit_ge_goto (int *offset_p, int *size_p)
2201 {
2202 EMIT_ASM (amd64_ge,
2203 "cmp %rax,(%rsp)\n\t"
2204 "jnge .Lamd64_ge_fallthru\n\t"
2205 ".Lamd64_ge_jump:\n\t"
2206 "lea 0x8(%rsp),%rsp\n\t"
2207 "pop %rax\n\t"
2208 /* jmp, but don't trust the assembler to choose the right jump */
2209 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2210 ".Lamd64_ge_fallthru:\n\t"
2211 "lea 0x8(%rsp),%rsp\n\t"
2212 "pop %rax");
2213
2214 if (offset_p)
2215 *offset_p = 13;
2216 if (size_p)
2217 *size_p = 4;
2218 }
2219
2220 static emit_ops amd64_emit_ops =
2221 {
2222 amd64_emit_prologue,
2223 amd64_emit_epilogue,
2224 amd64_emit_add,
2225 amd64_emit_sub,
2226 amd64_emit_mul,
2227 amd64_emit_lsh,
2228 amd64_emit_rsh_signed,
2229 amd64_emit_rsh_unsigned,
2230 amd64_emit_ext,
2231 amd64_emit_log_not,
2232 amd64_emit_bit_and,
2233 amd64_emit_bit_or,
2234 amd64_emit_bit_xor,
2235 amd64_emit_bit_not,
2236 amd64_emit_equal,
2237 amd64_emit_less_signed,
2238 amd64_emit_less_unsigned,
2239 amd64_emit_ref,
2240 amd64_emit_if_goto,
2241 amd64_emit_goto,
2242 amd64_write_goto_address,
2243 amd64_emit_const,
2244 amd64_emit_call,
2245 amd64_emit_reg,
2246 amd64_emit_pop,
2247 amd64_emit_stack_flush,
2248 amd64_emit_zero_ext,
2249 amd64_emit_swap,
2250 amd64_emit_stack_adjust,
2251 amd64_emit_int_call_1,
2252 amd64_emit_void_call_2,
2253 amd64_emit_eq_goto,
2254 amd64_emit_ne_goto,
2255 amd64_emit_lt_goto,
2256 amd64_emit_le_goto,
2257 amd64_emit_gt_goto,
2258 amd64_emit_ge_goto
2259 };
2260
2261 #endif /* __x86_64__ */
2262
2263 static void
2264 i386_emit_prologue (void)
2265 {
2266 EMIT_ASM32 (i386_prologue,
2267 "push %ebp\n\t"
2268 "mov %esp,%ebp\n\t"
2269 "push %ebx");
2270 /* At this point, the raw regs base address is at 8(%ebp), and the
2271 value pointer is at 12(%ebp). */
2272 }
2273
2274 static void
2275 i386_emit_epilogue (void)
2276 {
2277 EMIT_ASM32 (i386_epilogue,
2278 "mov 12(%ebp),%ecx\n\t"
2279 "mov %eax,(%ecx)\n\t"
2280 "mov %ebx,0x4(%ecx)\n\t"
2281 "xor %eax,%eax\n\t"
2282 "pop %ebx\n\t"
2283 "pop %ebp\n\t"
2284 "ret");
2285 }
2286
2287 static void
2288 i386_emit_add (void)
2289 {
2290 EMIT_ASM32 (i386_add,
2291 "add (%esp),%eax\n\t"
2292 "adc 0x4(%esp),%ebx\n\t"
2293 "lea 0x8(%esp),%esp");
2294 }
2295
2296 static void
2297 i386_emit_sub (void)
2298 {
2299 EMIT_ASM32 (i386_sub,
2300 "subl %eax,(%esp)\n\t"
2301 "sbbl %ebx,4(%esp)\n\t"
2302 "pop %eax\n\t"
2303 "pop %ebx\n\t");
2304 }
2305
2306 static void
2307 i386_emit_mul (void)
2308 {
2309 emit_error = 1;
2310 }
2311
2312 static void
2313 i386_emit_lsh (void)
2314 {
2315 emit_error = 1;
2316 }
2317
2318 static void
2319 i386_emit_rsh_signed (void)
2320 {
2321 emit_error = 1;
2322 }
2323
2324 static void
2325 i386_emit_rsh_unsigned (void)
2326 {
2327 emit_error = 1;
2328 }
2329
2330 static void
2331 i386_emit_ext (int arg)
2332 {
2333 switch (arg)
2334 {
2335 case 8:
2336 EMIT_ASM32 (i386_ext_8,
2337 "cbtw\n\t"
2338 "cwtl\n\t"
2339 "movl %eax,%ebx\n\t"
2340 "sarl $31,%ebx");
2341 break;
2342 case 16:
2343 EMIT_ASM32 (i386_ext_16,
2344 "cwtl\n\t"
2345 "movl %eax,%ebx\n\t"
2346 "sarl $31,%ebx");
2347 break;
2348 case 32:
2349 EMIT_ASM32 (i386_ext_32,
2350 "movl %eax,%ebx\n\t"
2351 "sarl $31,%ebx");
2352 break;
2353 default:
2354 emit_error = 1;
2355 }
2356 }
2357
2358 static void
2359 i386_emit_log_not (void)
2360 {
2361 EMIT_ASM32 (i386_log_not,
2362 "or %ebx,%eax\n\t"
2363 "test %eax,%eax\n\t"
2364 "sete %cl\n\t"
2365 "xor %ebx,%ebx\n\t"
2366 "movzbl %cl,%eax");
2367 }
2368
2369 static void
2370 i386_emit_bit_and (void)
2371 {
2372 EMIT_ASM32 (i386_and,
2373 "and (%esp),%eax\n\t"
2374 "and 0x4(%esp),%ebx\n\t"
2375 "lea 0x8(%esp),%esp");
2376 }
2377
2378 static void
2379 i386_emit_bit_or (void)
2380 {
2381 EMIT_ASM32 (i386_or,
2382 "or (%esp),%eax\n\t"
2383 "or 0x4(%esp),%ebx\n\t"
2384 "lea 0x8(%esp),%esp");
2385 }
2386
2387 static void
2388 i386_emit_bit_xor (void)
2389 {
2390 EMIT_ASM32 (i386_xor,
2391 "xor (%esp),%eax\n\t"
2392 "xor 0x4(%esp),%ebx\n\t"
2393 "lea 0x8(%esp),%esp");
2394 }
2395
2396 static void
2397 i386_emit_bit_not (void)
2398 {
2399 EMIT_ASM32 (i386_bit_not,
2400 "xor $0xffffffff,%eax\n\t"
2401 "xor $0xffffffff,%ebx\n\t");
2402 }
2403
2404 static void
2405 i386_emit_equal (void)
2406 {
2407 EMIT_ASM32 (i386_equal,
2408 "cmpl %ebx,4(%esp)\n\t"
2409 "jne .Li386_equal_false\n\t"
2410 "cmpl %eax,(%esp)\n\t"
2411 "je .Li386_equal_true\n\t"
2412 ".Li386_equal_false:\n\t"
2413 "xor %eax,%eax\n\t"
2414 "jmp .Li386_equal_end\n\t"
2415 ".Li386_equal_true:\n\t"
2416 "mov $1,%eax\n\t"
2417 ".Li386_equal_end:\n\t"
2418 "xor %ebx,%ebx\n\t"
2419 "lea 0x8(%esp),%esp");
2420 }
2421
2422 static void
2423 i386_emit_less_signed (void)
2424 {
2425 EMIT_ASM32 (i386_less_signed,
2426 "cmpl %ebx,4(%esp)\n\t"
2427 "jl .Li386_less_signed_true\n\t"
2428 "jne .Li386_less_signed_false\n\t"
2429 "cmpl %eax,(%esp)\n\t"
2430 "jl .Li386_less_signed_true\n\t"
2431 ".Li386_less_signed_false:\n\t"
2432 "xor %eax,%eax\n\t"
2433 "jmp .Li386_less_signed_end\n\t"
2434 ".Li386_less_signed_true:\n\t"
2435 "mov $1,%eax\n\t"
2436 ".Li386_less_signed_end:\n\t"
2437 "xor %ebx,%ebx\n\t"
2438 "lea 0x8(%esp),%esp");
2439 }
2440
2441 static void
2442 i386_emit_less_unsigned (void)
2443 {
2444 EMIT_ASM32 (i386_less_unsigned,
2445 "cmpl %ebx,4(%esp)\n\t"
2446 "jb .Li386_less_unsigned_true\n\t"
2447 "jne .Li386_less_unsigned_false\n\t"
2448 "cmpl %eax,(%esp)\n\t"
2449 "jb .Li386_less_unsigned_true\n\t"
2450 ".Li386_less_unsigned_false:\n\t"
2451 "xor %eax,%eax\n\t"
2452 "jmp .Li386_less_unsigned_end\n\t"
2453 ".Li386_less_unsigned_true:\n\t"
2454 "mov $1,%eax\n\t"
2455 ".Li386_less_unsigned_end:\n\t"
2456 "xor %ebx,%ebx\n\t"
2457 "lea 0x8(%esp),%esp");
2458 }
2459
2460 static void
2461 i386_emit_ref (int size)
2462 {
2463 switch (size)
2464 {
2465 case 1:
2466 EMIT_ASM32 (i386_ref1,
2467 "movb (%eax),%al");
2468 break;
2469 case 2:
2470 EMIT_ASM32 (i386_ref2,
2471 "movw (%eax),%ax");
2472 break;
2473 case 4:
2474 EMIT_ASM32 (i386_ref4,
2475 "movl (%eax),%eax");
2476 break;
2477 case 8:
2478 EMIT_ASM32 (i386_ref8,
2479 "movl 4(%eax),%ebx\n\t"
2480 "movl (%eax),%eax");
2481 break;
2482 }
2483 }
2484
2485 static void
2486 i386_emit_if_goto (int *offset_p, int *size_p)
2487 {
2488 EMIT_ASM32 (i386_if_goto,
2489 "mov %eax,%ecx\n\t"
2490 "or %ebx,%ecx\n\t"
2491 "pop %eax\n\t"
2492 "pop %ebx\n\t"
2493 "cmpl $0,%ecx\n\t"
2494 /* Don't trust the assembler to choose the right jump */
2495 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2496
2497 if (offset_p)
2498 *offset_p = 11; /* be sure that this matches the sequence above */
2499 if (size_p)
2500 *size_p = 4;
2501 }
2502
2503 static void
2504 i386_emit_goto (int *offset_p, int *size_p)
2505 {
2506 EMIT_ASM32 (i386_goto,
2507 /* Don't trust the assembler to choose the right jump */
2508 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2509 if (offset_p)
2510 *offset_p = 1;
2511 if (size_p)
2512 *size_p = 4;
2513 }
2514
2515 static void
2516 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2517 {
2518 int diff = (to - (from + size));
2519 unsigned char buf[sizeof (int)];
2520
2521 /* We're only doing 4-byte sizes at the moment. */
2522 if (size != 4)
2523 {
2524 emit_error = 1;
2525 return;
2526 }
2527
2528 memcpy (buf, &diff, sizeof (int));
2529 target_write_memory (from, buf, sizeof (int));
2530 }
2531
2532 static void
2533 i386_emit_const (LONGEST num)
2534 {
2535 unsigned char buf[16];
2536 int i, hi, lo;
2537 CORE_ADDR buildaddr = current_insn_ptr;
2538
2539 i = 0;
2540 buf[i++] = 0xb8; /* mov $<n>,%eax */
2541 lo = num & 0xffffffff;
2542 memcpy (&buf[i], &lo, sizeof (lo));
2543 i += 4;
2544 hi = ((num >> 32) & 0xffffffff);
2545 if (hi)
2546 {
2547 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2548 memcpy (&buf[i], &hi, sizeof (hi));
2549 i += 4;
2550 }
2551 else
2552 {
2553 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2554 }
2555 append_insns (&buildaddr, i, buf);
2556 current_insn_ptr = buildaddr;
2557 }
2558
2559 static void
2560 i386_emit_call (CORE_ADDR fn)
2561 {
2562 unsigned char buf[16];
2563 int i, offset;
2564 CORE_ADDR buildaddr;
2565
2566 buildaddr = current_insn_ptr;
2567 i = 0;
2568 buf[i++] = 0xe8; /* call <reladdr> */
2569 offset = ((int) fn) - (buildaddr + 5);
2570 memcpy (buf + 1, &offset, 4);
2571 append_insns (&buildaddr, 5, buf);
2572 current_insn_ptr = buildaddr;
2573 }
2574
2575 static void
2576 i386_emit_reg (int reg)
2577 {
2578 unsigned char buf[16];
2579 int i;
2580 CORE_ADDR buildaddr;
2581
2582 EMIT_ASM32 (i386_reg_a,
2583 "sub $0x8,%esp");
2584 buildaddr = current_insn_ptr;
2585 i = 0;
2586 buf[i++] = 0xb8; /* mov $<n>,%eax */
2587 memcpy (&buf[i], &reg, sizeof (reg));
2588 i += 4;
2589 append_insns (&buildaddr, i, buf);
2590 current_insn_ptr = buildaddr;
2591 EMIT_ASM32 (i386_reg_b,
2592 "mov %eax,4(%esp)\n\t"
2593 "mov 8(%ebp),%eax\n\t"
2594 "mov %eax,(%esp)");
2595 i386_emit_call (get_raw_reg_func_addr ());
2596 EMIT_ASM32 (i386_reg_c,
2597 "xor %ebx,%ebx\n\t"
2598 "lea 0x8(%esp),%esp");
2599 }
2600
2601 static void
2602 i386_emit_pop (void)
2603 {
2604 EMIT_ASM32 (i386_pop,
2605 "pop %eax\n\t"
2606 "pop %ebx");
2607 }
2608
2609 static void
2610 i386_emit_stack_flush (void)
2611 {
2612 EMIT_ASM32 (i386_stack_flush,
2613 "push %ebx\n\t"
2614 "push %eax");
2615 }
2616
2617 static void
2618 i386_emit_zero_ext (int arg)
2619 {
2620 switch (arg)
2621 {
2622 case 8:
2623 EMIT_ASM32 (i386_zero_ext_8,
2624 "and $0xff,%eax\n\t"
2625 "xor %ebx,%ebx");
2626 break;
2627 case 16:
2628 EMIT_ASM32 (i386_zero_ext_16,
2629 "and $0xffff,%eax\n\t"
2630 "xor %ebx,%ebx");
2631 break;
2632 case 32:
2633 EMIT_ASM32 (i386_zero_ext_32,
2634 "xor %ebx,%ebx");
2635 break;
2636 default:
2637 emit_error = 1;
2638 }
2639 }
2640
2641 static void
2642 i386_emit_swap (void)
2643 {
2644 EMIT_ASM32 (i386_swap,
2645 "mov %eax,%ecx\n\t"
2646 "mov %ebx,%edx\n\t"
2647 "pop %eax\n\t"
2648 "pop %ebx\n\t"
2649 "push %edx\n\t"
2650 "push %ecx");
2651 }
2652
2653 static void
2654 i386_emit_stack_adjust (int n)
2655 {
2656 unsigned char buf[16];
2657 int i;
2658 CORE_ADDR buildaddr = current_insn_ptr;
2659
2660 i = 0;
2661 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2662 buf[i++] = 0x64;
2663 buf[i++] = 0x24;
2664 buf[i++] = n * 8;
2665 append_insns (&buildaddr, i, buf);
2666 current_insn_ptr = buildaddr;
2667 }
2668
2669 /* FN's prototype is `LONGEST(*fn)(int)'. */
2670
2671 static void
2672 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2673 {
2674 unsigned char buf[16];
2675 int i;
2676 CORE_ADDR buildaddr;
2677
2678 EMIT_ASM32 (i386_int_call_1_a,
2679 /* Reserve a bit of stack space. */
2680 "sub $0x8,%esp");
2681 /* Put the one argument on the stack. */
2682 buildaddr = current_insn_ptr;
2683 i = 0;
2684 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2685 buf[i++] = 0x04;
2686 buf[i++] = 0x24;
2687 memcpy (&buf[i], &arg1, sizeof (arg1));
2688 i += 4;
2689 append_insns (&buildaddr, i, buf);
2690 current_insn_ptr = buildaddr;
2691 i386_emit_call (fn);
2692 EMIT_ASM32 (i386_int_call_1_c,
2693 "mov %edx,%ebx\n\t"
2694 "lea 0x8(%esp),%esp");
2695 }
2696
2697 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2698
2699 static void
2700 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2701 {
2702 unsigned char buf[16];
2703 int i;
2704 CORE_ADDR buildaddr;
2705
2706 EMIT_ASM32 (i386_void_call_2_a,
2707 /* Preserve %eax only; we don't have to worry about %ebx. */
2708 "push %eax\n\t"
2709 /* Reserve a bit of stack space for arguments. */
2710 "sub $0x10,%esp\n\t"
2711 /* Copy "top" to the second argument position. (Note that
2712 we can't assume function won't scribble on its
2713 arguments, so don't try to restore from this.) */
2714 "mov %eax,4(%esp)\n\t"
2715 "mov %ebx,8(%esp)");
2716 /* Put the first argument on the stack. */
2717 buildaddr = current_insn_ptr;
2718 i = 0;
2719 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2720 buf[i++] = 0x04;
2721 buf[i++] = 0x24;
2722 memcpy (&buf[i], &arg1, sizeof (arg1));
2723 i += 4;
2724 append_insns (&buildaddr, i, buf);
2725 current_insn_ptr = buildaddr;
2726 i386_emit_call (fn);
2727 EMIT_ASM32 (i386_void_call_2_b,
2728 "lea 0x10(%esp),%esp\n\t"
2729 /* Restore original stack top. */
2730 "pop %eax");
2731 }
2732
2733
2734 static void
2735 i386_emit_eq_goto (int *offset_p, int *size_p)
2736 {
2737 EMIT_ASM32 (eq,
2738 /* Check low half first, more likely to be decider */
2739 "cmpl %eax,(%esp)\n\t"
2740 "jne .Leq_fallthru\n\t"
2741 "cmpl %ebx,4(%esp)\n\t"
2742 "jne .Leq_fallthru\n\t"
2743 "lea 0x8(%esp),%esp\n\t"
2744 "pop %eax\n\t"
2745 "pop %ebx\n\t"
2746 /* jmp, but don't trust the assembler to choose the right jump */
2747 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2748 ".Leq_fallthru:\n\t"
2749 "lea 0x8(%esp),%esp\n\t"
2750 "pop %eax\n\t"
2751 "pop %ebx");
2752
2753 if (offset_p)
2754 *offset_p = 18;
2755 if (size_p)
2756 *size_p = 4;
2757 }
2758
2759 static void
2760 i386_emit_ne_goto (int *offset_p, int *size_p)
2761 {
2762 EMIT_ASM32 (ne,
2763 /* Check low half first, more likely to be decider */
2764 "cmpl %eax,(%esp)\n\t"
2765 "jne .Lne_jump\n\t"
2766 "cmpl %ebx,4(%esp)\n\t"
2767 "je .Lne_fallthru\n\t"
2768 ".Lne_jump:\n\t"
2769 "lea 0x8(%esp),%esp\n\t"
2770 "pop %eax\n\t"
2771 "pop %ebx\n\t"
2772 /* jmp, but don't trust the assembler to choose the right jump */
2773 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2774 ".Lne_fallthru:\n\t"
2775 "lea 0x8(%esp),%esp\n\t"
2776 "pop %eax\n\t"
2777 "pop %ebx");
2778
2779 if (offset_p)
2780 *offset_p = 18;
2781 if (size_p)
2782 *size_p = 4;
2783 }
2784
2785 static void
2786 i386_emit_lt_goto (int *offset_p, int *size_p)
2787 {
2788 EMIT_ASM32 (lt,
2789 "cmpl %ebx,4(%esp)\n\t"
2790 "jl .Llt_jump\n\t"
2791 "jne .Llt_fallthru\n\t"
2792 "cmpl %eax,(%esp)\n\t"
2793 "jnl .Llt_fallthru\n\t"
2794 ".Llt_jump:\n\t"
2795 "lea 0x8(%esp),%esp\n\t"
2796 "pop %eax\n\t"
2797 "pop %ebx\n\t"
2798 /* jmp, but don't trust the assembler to choose the right jump */
2799 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2800 ".Llt_fallthru:\n\t"
2801 "lea 0x8(%esp),%esp\n\t"
2802 "pop %eax\n\t"
2803 "pop %ebx");
2804
2805 if (offset_p)
2806 *offset_p = 20;
2807 if (size_p)
2808 *size_p = 4;
2809 }
2810
2811 static void
2812 i386_emit_le_goto (int *offset_p, int *size_p)
2813 {
2814 EMIT_ASM32 (le,
2815 "cmpl %ebx,4(%esp)\n\t"
2816 "jle .Lle_jump\n\t"
2817 "jne .Lle_fallthru\n\t"
2818 "cmpl %eax,(%esp)\n\t"
2819 "jnle .Lle_fallthru\n\t"
2820 ".Lle_jump:\n\t"
2821 "lea 0x8(%esp),%esp\n\t"
2822 "pop %eax\n\t"
2823 "pop %ebx\n\t"
2824 /* jmp, but don't trust the assembler to choose the right jump */
2825 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2826 ".Lle_fallthru:\n\t"
2827 "lea 0x8(%esp),%esp\n\t"
2828 "pop %eax\n\t"
2829 "pop %ebx");
2830
2831 if (offset_p)
2832 *offset_p = 20;
2833 if (size_p)
2834 *size_p = 4;
2835 }
2836
2837 static void
2838 i386_emit_gt_goto (int *offset_p, int *size_p)
2839 {
2840 EMIT_ASM32 (gt,
2841 "cmpl %ebx,4(%esp)\n\t"
2842 "jg .Lgt_jump\n\t"
2843 "jne .Lgt_fallthru\n\t"
2844 "cmpl %eax,(%esp)\n\t"
2845 "jng .Lgt_fallthru\n\t"
2846 ".Lgt_jump:\n\t"
2847 "lea 0x8(%esp),%esp\n\t"
2848 "pop %eax\n\t"
2849 "pop %ebx\n\t"
2850 /* jmp, but don't trust the assembler to choose the right jump */
2851 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2852 ".Lgt_fallthru:\n\t"
2853 "lea 0x8(%esp),%esp\n\t"
2854 "pop %eax\n\t"
2855 "pop %ebx");
2856
2857 if (offset_p)
2858 *offset_p = 20;
2859 if (size_p)
2860 *size_p = 4;
2861 }
2862
2863 static void
2864 i386_emit_ge_goto (int *offset_p, int *size_p)
2865 {
2866 EMIT_ASM32 (ge,
2867 "cmpl %ebx,4(%esp)\n\t"
2868 "jge .Lge_jump\n\t"
2869 "jne .Lge_fallthru\n\t"
2870 "cmpl %eax,(%esp)\n\t"
2871 "jnge .Lge_fallthru\n\t"
2872 ".Lge_jump:\n\t"
2873 "lea 0x8(%esp),%esp\n\t"
2874 "pop %eax\n\t"
2875 "pop %ebx\n\t"
2876 /* jmp, but don't trust the assembler to choose the right jump */
2877 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2878 ".Lge_fallthru:\n\t"
2879 "lea 0x8(%esp),%esp\n\t"
2880 "pop %eax\n\t"
2881 "pop %ebx");
2882
2883 if (offset_p)
2884 *offset_p = 20;
2885 if (size_p)
2886 *size_p = 4;
2887 }
2888
2889 static emit_ops i386_emit_ops =
2890 {
2891 i386_emit_prologue,
2892 i386_emit_epilogue,
2893 i386_emit_add,
2894 i386_emit_sub,
2895 i386_emit_mul,
2896 i386_emit_lsh,
2897 i386_emit_rsh_signed,
2898 i386_emit_rsh_unsigned,
2899 i386_emit_ext,
2900 i386_emit_log_not,
2901 i386_emit_bit_and,
2902 i386_emit_bit_or,
2903 i386_emit_bit_xor,
2904 i386_emit_bit_not,
2905 i386_emit_equal,
2906 i386_emit_less_signed,
2907 i386_emit_less_unsigned,
2908 i386_emit_ref,
2909 i386_emit_if_goto,
2910 i386_emit_goto,
2911 i386_write_goto_address,
2912 i386_emit_const,
2913 i386_emit_call,
2914 i386_emit_reg,
2915 i386_emit_pop,
2916 i386_emit_stack_flush,
2917 i386_emit_zero_ext,
2918 i386_emit_swap,
2919 i386_emit_stack_adjust,
2920 i386_emit_int_call_1,
2921 i386_emit_void_call_2,
2922 i386_emit_eq_goto,
2923 i386_emit_ne_goto,
2924 i386_emit_lt_goto,
2925 i386_emit_le_goto,
2926 i386_emit_gt_goto,
2927 i386_emit_ge_goto
2928 };
2929
2930
2931 emit_ops *
2932 x86_target::emit_ops ()
2933 {
2934 #ifdef __x86_64__
2935 if (is_64bit_tdesc ())
2936 return &amd64_emit_ops;
2937 else
2938 #endif
2939 return &i386_emit_ops;
2940 }
2941
2942 /* Implementation of target ops method "sw_breakpoint_from_kind". */
2943
2944 const gdb_byte *
2945 x86_target::sw_breakpoint_from_kind (int kind, int *size)
2946 {
2947 *size = x86_breakpoint_len;
2948 return x86_breakpoint;
2949 }
2950
2951 bool
2952 x86_target::low_supports_range_stepping ()
2953 {
2954 return true;
2955 }
2956
2957 int
2958 x86_target::get_ipa_tdesc_idx ()
2959 {
2960 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2961 const struct target_desc *tdesc = regcache->tdesc;
2962
2963 #ifdef __x86_64__
2964 return amd64_get_ipa_tdesc_idx (tdesc);
2965 #endif
2966
2967 if (tdesc == tdesc_i386_linux_no_xml.get ())
2968 return X86_TDESC_SSE;
2969
2970 return i386_get_ipa_tdesc_idx (tdesc);
2971 }
2972
2973 /* The linux target ops object. */
2974
2975 linux_process_target *the_linux_target = &the_x86_target;
2976
2977 void
2978 initialize_low_arch (void)
2979 {
2980 /* Initialize the Linux target descriptions. */
2981 #ifdef __x86_64__
2982 tdesc_amd64_linux_no_xml = allocate_target_description ();
2983 copy_target_description (tdesc_amd64_linux_no_xml.get (),
2984 amd64_linux_read_description (X86_XSTATE_SSE_MASK,
2985 false));
2986 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2987 #endif
2988
2989 tdesc_i386_linux_no_xml = allocate_target_description ();
2990 copy_target_description (tdesc_i386_linux_no_xml.get (),
2991 i386_linux_read_description (X86_XSTATE_SSE_MASK));
2992 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2993
2994 initialize_regsets_info (&x86_regsets_info);
2995 }