]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-x86-low.c
Fix PR gdb/20287 - x32 and "gdb_static_assert (sizeof (nat_siginfo_t) == sizeof ...
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2016 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #ifdef __x86_64__
31 #include "nat/amd64-linux-siginfo.h"
32 #endif
33
34 #include "gdb_proc_service.h"
35 /* Don't include elf/common.h if linux/elf.h got included by
36 gdb_proc_service.h. */
37 #ifndef ELFMAG0
38 #include "elf/common.h"
39 #endif
40
41 #include "agent.h"
42 #include "tdesc.h"
43 #include "tracepoint.h"
44 #include "ax.h"
45 #include "nat/linux-nat.h"
46 #include "nat/x86-linux.h"
47 #include "nat/x86-linux-dregs.h"
48 #include "linux-x86-tdesc.h"
49
50 #ifdef __x86_64__
51 static struct target_desc *tdesc_amd64_linux_no_xml;
52 #endif
53 static struct target_desc *tdesc_i386_linux_no_xml;
54
55
56 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
57 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
58
59 /* Backward compatibility for gdb without XML support. */
60
61 static const char *xmltarget_i386_linux_no_xml = "@<target>\
62 <architecture>i386</architecture>\
63 <osabi>GNU/Linux</osabi>\
64 </target>";
65
66 #ifdef __x86_64__
67 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
68 <architecture>i386:x86-64</architecture>\
69 <osabi>GNU/Linux</osabi>\
70 </target>";
71 #endif
72
73 #include <sys/reg.h>
74 #include <sys/procfs.h>
75 #include "nat/gdb_ptrace.h"
76 #include <sys/uio.h>
77
78 #ifndef PTRACE_GET_THREAD_AREA
79 #define PTRACE_GET_THREAD_AREA 25
80 #endif
81
82 /* This definition comes from prctl.h, but some kernels may not have it. */
83 #ifndef PTRACE_ARCH_PRCTL
84 #define PTRACE_ARCH_PRCTL 30
85 #endif
86
87 /* The following definitions come from prctl.h, but may be absent
88 for certain configurations. */
89 #ifndef ARCH_GET_FS
90 #define ARCH_SET_GS 0x1001
91 #define ARCH_SET_FS 0x1002
92 #define ARCH_GET_FS 0x1003
93 #define ARCH_GET_GS 0x1004
94 #endif
95
96 /* Per-process arch-specific data we want to keep. */
97
98 struct arch_process_info
99 {
100 struct x86_debug_reg_state debug_reg_state;
101 };
102
103 #ifdef __x86_64__
104
105 /* Mapping between the general-purpose registers in `struct user'
106 format and GDB's register array layout.
107 Note that the transfer layout uses 64-bit regs. */
108 static /*const*/ int i386_regmap[] =
109 {
110 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
111 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
112 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
113 DS * 8, ES * 8, FS * 8, GS * 8
114 };
115
116 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
117
118 /* So code below doesn't have to care, i386 or amd64. */
119 #define ORIG_EAX ORIG_RAX
120 #define REGSIZE 8
121
122 static const int x86_64_regmap[] =
123 {
124 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
125 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
126 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
127 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
128 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
129 DS * 8, ES * 8, FS * 8, GS * 8,
130 -1, -1, -1, -1, -1, -1, -1, -1,
131 -1, -1, -1, -1, -1, -1, -1, -1,
132 -1, -1, -1, -1, -1, -1, -1, -1,
133 -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 ORIG_RAX * 8,
136 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
137 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
138 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
139 -1, -1, -1, -1, -1, -1, -1, -1,
140 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
141 -1, -1, -1, -1, -1, -1, -1, -1,
142 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
143 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
144 -1, -1, -1, -1, -1, -1, -1, -1,
145 -1, -1, -1, -1, -1, -1, -1, -1,
146 -1, -1, -1, -1, -1, -1, -1, -1
147 };
148
149 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
150 #define X86_64_USER_REGS (GS + 1)
151
152 #else /* ! __x86_64__ */
153
154 /* Mapping between the general-purpose registers in `struct user'
155 format and GDB's register array layout. */
156 static /*const*/ int i386_regmap[] =
157 {
158 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
159 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
160 EIP * 4, EFL * 4, CS * 4, SS * 4,
161 DS * 4, ES * 4, FS * 4, GS * 4
162 };
163
164 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
165
166 #define REGSIZE 4
167
168 #endif
169
170 #ifdef __x86_64__
171
172 /* Returns true if the current inferior belongs to a x86-64 process,
173 per the tdesc. */
174
175 static int
176 is_64bit_tdesc (void)
177 {
178 struct regcache *regcache = get_thread_regcache (current_thread, 0);
179
180 return register_size (regcache->tdesc, 0) == 8;
181 }
182
183 #endif
184
185 \f
186 /* Called by libthread_db. */
187
188 ps_err_e
189 ps_get_thread_area (const struct ps_prochandle *ph,
190 lwpid_t lwpid, int idx, void **base)
191 {
192 #ifdef __x86_64__
193 int use_64bit = is_64bit_tdesc ();
194
195 if (use_64bit)
196 {
197 switch (idx)
198 {
199 case FS:
200 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
201 return PS_OK;
202 break;
203 case GS:
204 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
205 return PS_OK;
206 break;
207 default:
208 return PS_BADADDR;
209 }
210 return PS_ERR;
211 }
212 #endif
213
214 {
215 unsigned int desc[4];
216
217 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
218 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
219 return PS_ERR;
220
221 /* Ensure we properly extend the value to 64-bits for x86_64. */
222 *base = (void *) (uintptr_t) desc[1];
223 return PS_OK;
224 }
225 }
226
227 /* Get the thread area address. This is used to recognize which
228 thread is which when tracing with the in-process agent library. We
229 don't read anything from the address, and treat it as opaque; it's
230 the address itself that we assume is unique per-thread. */
231
232 static int
233 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
234 {
235 #ifdef __x86_64__
236 int use_64bit = is_64bit_tdesc ();
237
238 if (use_64bit)
239 {
240 void *base;
241 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
242 {
243 *addr = (CORE_ADDR) (uintptr_t) base;
244 return 0;
245 }
246
247 return -1;
248 }
249 #endif
250
251 {
252 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
253 struct thread_info *thr = get_lwp_thread (lwp);
254 struct regcache *regcache = get_thread_regcache (thr, 1);
255 unsigned int desc[4];
256 ULONGEST gs = 0;
257 const int reg_thread_area = 3; /* bits to scale down register value. */
258 int idx;
259
260 collect_register_by_name (regcache, "gs", &gs);
261
262 idx = gs >> reg_thread_area;
263
264 if (ptrace (PTRACE_GET_THREAD_AREA,
265 lwpid_of (thr),
266 (void *) (long) idx, (unsigned long) &desc) < 0)
267 return -1;
268
269 *addr = desc[1];
270 return 0;
271 }
272 }
273
274
275 \f
276 static int
277 x86_cannot_store_register (int regno)
278 {
279 #ifdef __x86_64__
280 if (is_64bit_tdesc ())
281 return 0;
282 #endif
283
284 return regno >= I386_NUM_REGS;
285 }
286
287 static int
288 x86_cannot_fetch_register (int regno)
289 {
290 #ifdef __x86_64__
291 if (is_64bit_tdesc ())
292 return 0;
293 #endif
294
295 return regno >= I386_NUM_REGS;
296 }
297
298 static void
299 x86_fill_gregset (struct regcache *regcache, void *buf)
300 {
301 int i;
302
303 #ifdef __x86_64__
304 if (register_size (regcache->tdesc, 0) == 8)
305 {
306 for (i = 0; i < X86_64_NUM_REGS; i++)
307 if (x86_64_regmap[i] != -1)
308 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
309 return;
310 }
311
312 /* 32-bit inferior registers need to be zero-extended.
313 Callers would read uninitialized memory otherwise. */
314 memset (buf, 0x00, X86_64_USER_REGS * 8);
315 #endif
316
317 for (i = 0; i < I386_NUM_REGS; i++)
318 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
319
320 collect_register_by_name (regcache, "orig_eax",
321 ((char *) buf) + ORIG_EAX * REGSIZE);
322 }
323
324 static void
325 x86_store_gregset (struct regcache *regcache, const void *buf)
326 {
327 int i;
328
329 #ifdef __x86_64__
330 if (register_size (regcache->tdesc, 0) == 8)
331 {
332 for (i = 0; i < X86_64_NUM_REGS; i++)
333 if (x86_64_regmap[i] != -1)
334 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
335 return;
336 }
337 #endif
338
339 for (i = 0; i < I386_NUM_REGS; i++)
340 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
341
342 supply_register_by_name (regcache, "orig_eax",
343 ((char *) buf) + ORIG_EAX * REGSIZE);
344 }
345
346 static void
347 x86_fill_fpregset (struct regcache *regcache, void *buf)
348 {
349 #ifdef __x86_64__
350 i387_cache_to_fxsave (regcache, buf);
351 #else
352 i387_cache_to_fsave (regcache, buf);
353 #endif
354 }
355
356 static void
357 x86_store_fpregset (struct regcache *regcache, const void *buf)
358 {
359 #ifdef __x86_64__
360 i387_fxsave_to_cache (regcache, buf);
361 #else
362 i387_fsave_to_cache (regcache, buf);
363 #endif
364 }
365
366 #ifndef __x86_64__
367
368 static void
369 x86_fill_fpxregset (struct regcache *regcache, void *buf)
370 {
371 i387_cache_to_fxsave (regcache, buf);
372 }
373
374 static void
375 x86_store_fpxregset (struct regcache *regcache, const void *buf)
376 {
377 i387_fxsave_to_cache (regcache, buf);
378 }
379
380 #endif
381
382 static void
383 x86_fill_xstateregset (struct regcache *regcache, void *buf)
384 {
385 i387_cache_to_xsave (regcache, buf);
386 }
387
388 static void
389 x86_store_xstateregset (struct regcache *regcache, const void *buf)
390 {
391 i387_xsave_to_cache (regcache, buf);
392 }
393
394 /* ??? The non-biarch i386 case stores all the i387 regs twice.
395 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
396 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
397 doesn't work. IWBN to avoid the duplication in the case where it
398 does work. Maybe the arch_setup routine could check whether it works
399 and update the supported regsets accordingly. */
400
401 static struct regset_info x86_regsets[] =
402 {
403 #ifdef HAVE_PTRACE_GETREGS
404 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
405 GENERAL_REGS,
406 x86_fill_gregset, x86_store_gregset },
407 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
408 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
409 # ifndef __x86_64__
410 # ifdef HAVE_PTRACE_GETFPXREGS
411 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
412 EXTENDED_REGS,
413 x86_fill_fpxregset, x86_store_fpxregset },
414 # endif
415 # endif
416 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
417 FP_REGS,
418 x86_fill_fpregset, x86_store_fpregset },
419 #endif /* HAVE_PTRACE_GETREGS */
420 NULL_REGSET
421 };
422
423 static CORE_ADDR
424 x86_get_pc (struct regcache *regcache)
425 {
426 int use_64bit = register_size (regcache->tdesc, 0) == 8;
427
428 if (use_64bit)
429 {
430 unsigned long pc;
431 collect_register_by_name (regcache, "rip", &pc);
432 return (CORE_ADDR) pc;
433 }
434 else
435 {
436 unsigned int pc;
437 collect_register_by_name (regcache, "eip", &pc);
438 return (CORE_ADDR) pc;
439 }
440 }
441
442 static void
443 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
444 {
445 int use_64bit = register_size (regcache->tdesc, 0) == 8;
446
447 if (use_64bit)
448 {
449 unsigned long newpc = pc;
450 supply_register_by_name (regcache, "rip", &newpc);
451 }
452 else
453 {
454 unsigned int newpc = pc;
455 supply_register_by_name (regcache, "eip", &newpc);
456 }
457 }
458 \f
459 static const gdb_byte x86_breakpoint[] = { 0xCC };
460 #define x86_breakpoint_len 1
461
462 static int
463 x86_breakpoint_at (CORE_ADDR pc)
464 {
465 unsigned char c;
466
467 (*the_target->read_memory) (pc, &c, 1);
468 if (c == 0xCC)
469 return 1;
470
471 return 0;
472 }
473 \f
474 /* Low-level function vector. */
475 struct x86_dr_low_type x86_dr_low =
476 {
477 x86_linux_dr_set_control,
478 x86_linux_dr_set_addr,
479 x86_linux_dr_get_addr,
480 x86_linux_dr_get_status,
481 x86_linux_dr_get_control,
482 sizeof (void *),
483 };
484 \f
485 /* Breakpoint/Watchpoint support. */
486
487 static int
488 x86_supports_z_point_type (char z_type)
489 {
490 switch (z_type)
491 {
492 case Z_PACKET_SW_BP:
493 case Z_PACKET_HW_BP:
494 case Z_PACKET_WRITE_WP:
495 case Z_PACKET_ACCESS_WP:
496 return 1;
497 default:
498 return 0;
499 }
500 }
501
502 static int
503 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
504 int size, struct raw_breakpoint *bp)
505 {
506 struct process_info *proc = current_process ();
507
508 switch (type)
509 {
510 case raw_bkpt_type_hw:
511 case raw_bkpt_type_write_wp:
512 case raw_bkpt_type_access_wp:
513 {
514 enum target_hw_bp_type hw_type
515 = raw_bkpt_type_to_target_hw_bp_type (type);
516 struct x86_debug_reg_state *state
517 = &proc->priv->arch_private->debug_reg_state;
518
519 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
520 }
521
522 default:
523 /* Unsupported. */
524 return 1;
525 }
526 }
527
528 static int
529 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
530 int size, struct raw_breakpoint *bp)
531 {
532 struct process_info *proc = current_process ();
533
534 switch (type)
535 {
536 case raw_bkpt_type_hw:
537 case raw_bkpt_type_write_wp:
538 case raw_bkpt_type_access_wp:
539 {
540 enum target_hw_bp_type hw_type
541 = raw_bkpt_type_to_target_hw_bp_type (type);
542 struct x86_debug_reg_state *state
543 = &proc->priv->arch_private->debug_reg_state;
544
545 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
546 }
547 default:
548 /* Unsupported. */
549 return 1;
550 }
551 }
552
553 static int
554 x86_stopped_by_watchpoint (void)
555 {
556 struct process_info *proc = current_process ();
557 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
558 }
559
560 static CORE_ADDR
561 x86_stopped_data_address (void)
562 {
563 struct process_info *proc = current_process ();
564 CORE_ADDR addr;
565 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
566 &addr))
567 return addr;
568 return 0;
569 }
570 \f
571 /* Called when a new process is created. */
572
573 static struct arch_process_info *
574 x86_linux_new_process (void)
575 {
576 struct arch_process_info *info = XCNEW (struct arch_process_info);
577
578 x86_low_init_dregs (&info->debug_reg_state);
579
580 return info;
581 }
582
583 /* Target routine for linux_new_fork. */
584
585 static void
586 x86_linux_new_fork (struct process_info *parent, struct process_info *child)
587 {
588 /* These are allocated by linux_add_process. */
589 gdb_assert (parent->priv != NULL
590 && parent->priv->arch_private != NULL);
591 gdb_assert (child->priv != NULL
592 && child->priv->arch_private != NULL);
593
594 /* Linux kernel before 2.6.33 commit
595 72f674d203cd230426437cdcf7dd6f681dad8b0d
596 will inherit hardware debug registers from parent
597 on fork/vfork/clone. Newer Linux kernels create such tasks with
598 zeroed debug registers.
599
600 GDB core assumes the child inherits the watchpoints/hw
601 breakpoints of the parent, and will remove them all from the
602 forked off process. Copy the debug registers mirrors into the
603 new process so that all breakpoints and watchpoints can be
604 removed together. The debug registers mirror will become zeroed
605 in the end before detaching the forked off process, thus making
606 this compatible with older Linux kernels too. */
607
608 *child->priv->arch_private = *parent->priv->arch_private;
609 }
610
611 /* See nat/x86-dregs.h. */
612
613 struct x86_debug_reg_state *
614 x86_debug_reg_state (pid_t pid)
615 {
616 struct process_info *proc = find_process_pid (pid);
617
618 return &proc->priv->arch_private->debug_reg_state;
619 }
620 \f
621 /* When GDBSERVER is built as a 64-bit application on linux, the
622 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
623 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
624 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
625 conversion in-place ourselves. */
626
627 /* Convert a ptrace/host siginfo object, into/from the siginfo in the
628 layout of the inferiors' architecture. Returns true if any
629 conversion was done; false otherwise. If DIRECTION is 1, then copy
630 from INF to PTRACE. If DIRECTION is 0, copy from PTRACE to
631 INF. */
632
633 static int
634 x86_siginfo_fixup (siginfo_t *ptrace, gdb_byte *inf, int direction)
635 {
636 #ifdef __x86_64__
637 unsigned int machine;
638 int tid = lwpid_of (current_thread);
639 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
640
641 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
642 if (!is_64bit_tdesc ())
643 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
644 FIXUP_32);
645 /* No fixup for native x32 GDB. */
646 else if (!is_elf64 && sizeof (void *) == 8)
647 return amd64_linux_siginfo_fixup_common (ptrace, inf, direction,
648 FIXUP_X32);
649 #endif
650
651 return 0;
652 }
653 \f
654 static int use_xml;
655
656 /* Format of XSAVE extended state is:
657 struct
658 {
659 fxsave_bytes[0..463]
660 sw_usable_bytes[464..511]
661 xstate_hdr_bytes[512..575]
662 avx_bytes[576..831]
663 future_state etc
664 };
665
666 Same memory layout will be used for the coredump NT_X86_XSTATE
667 representing the XSAVE extended state registers.
668
669 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
670 extended state mask, which is the same as the extended control register
671 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
672 together with the mask saved in the xstate_hdr_bytes to determine what
673 states the processor/OS supports and what state, used or initialized,
674 the process/thread is in. */
675 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
676
677 /* Does the current host support the GETFPXREGS request? The header
678 file may or may not define it, and even if it is defined, the
679 kernel will return EIO if it's running on a pre-SSE processor. */
680 int have_ptrace_getfpxregs =
681 #ifdef HAVE_PTRACE_GETFPXREGS
682 -1
683 #else
684 0
685 #endif
686 ;
687
688 /* Get Linux/x86 target description from running target. */
689
690 static const struct target_desc *
691 x86_linux_read_description (void)
692 {
693 unsigned int machine;
694 int is_elf64;
695 int xcr0_features;
696 int tid;
697 static uint64_t xcr0;
698 struct regset_info *regset;
699
700 tid = lwpid_of (current_thread);
701
702 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
703
704 if (sizeof (void *) == 4)
705 {
706 if (is_elf64 > 0)
707 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
708 #ifndef __x86_64__
709 else if (machine == EM_X86_64)
710 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
711 #endif
712 }
713
714 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
715 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
716 {
717 elf_fpxregset_t fpxregs;
718
719 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
720 {
721 have_ptrace_getfpxregs = 0;
722 have_ptrace_getregset = 0;
723 return tdesc_i386_mmx_linux;
724 }
725 else
726 have_ptrace_getfpxregs = 1;
727 }
728 #endif
729
730 if (!use_xml)
731 {
732 x86_xcr0 = X86_XSTATE_SSE_MASK;
733
734 /* Don't use XML. */
735 #ifdef __x86_64__
736 if (machine == EM_X86_64)
737 return tdesc_amd64_linux_no_xml;
738 else
739 #endif
740 return tdesc_i386_linux_no_xml;
741 }
742
743 if (have_ptrace_getregset == -1)
744 {
745 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
746 struct iovec iov;
747
748 iov.iov_base = xstateregs;
749 iov.iov_len = sizeof (xstateregs);
750
751 /* Check if PTRACE_GETREGSET works. */
752 if (ptrace (PTRACE_GETREGSET, tid,
753 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
754 have_ptrace_getregset = 0;
755 else
756 {
757 have_ptrace_getregset = 1;
758
759 /* Get XCR0 from XSAVE extended state. */
760 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
761 / sizeof (uint64_t))];
762
763 /* Use PTRACE_GETREGSET if it is available. */
764 for (regset = x86_regsets;
765 regset->fill_function != NULL; regset++)
766 if (regset->get_request == PTRACE_GETREGSET)
767 regset->size = X86_XSTATE_SIZE (xcr0);
768 else if (regset->type != GENERAL_REGS)
769 regset->size = 0;
770 }
771 }
772
773 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
774 xcr0_features = (have_ptrace_getregset
775 && (xcr0 & X86_XSTATE_ALL_MASK));
776
777 if (xcr0_features)
778 x86_xcr0 = xcr0;
779
780 if (machine == EM_X86_64)
781 {
782 #ifdef __x86_64__
783 if (is_elf64)
784 {
785 if (xcr0_features)
786 {
787 switch (xcr0 & X86_XSTATE_ALL_MASK)
788 {
789 case X86_XSTATE_AVX512_MASK:
790 return tdesc_amd64_avx512_linux;
791
792 case X86_XSTATE_AVX_MPX_MASK:
793 return tdesc_amd64_avx_mpx_linux;
794
795 case X86_XSTATE_MPX_MASK:
796 return tdesc_amd64_mpx_linux;
797
798 case X86_XSTATE_AVX_MASK:
799 return tdesc_amd64_avx_linux;
800
801 default:
802 return tdesc_amd64_linux;
803 }
804 }
805 else
806 return tdesc_amd64_linux;
807 }
808 else
809 {
810 if (xcr0_features)
811 {
812 switch (xcr0 & X86_XSTATE_ALL_MASK)
813 {
814 case X86_XSTATE_AVX512_MASK:
815 return tdesc_x32_avx512_linux;
816
817 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
818 case X86_XSTATE_AVX_MASK:
819 return tdesc_x32_avx_linux;
820
821 default:
822 return tdesc_x32_linux;
823 }
824 }
825 else
826 return tdesc_x32_linux;
827 }
828 #endif
829 }
830 else
831 {
832 if (xcr0_features)
833 {
834 switch (xcr0 & X86_XSTATE_ALL_MASK)
835 {
836 case (X86_XSTATE_AVX512_MASK):
837 return tdesc_i386_avx512_linux;
838
839 case (X86_XSTATE_MPX_MASK):
840 return tdesc_i386_mpx_linux;
841
842 case (X86_XSTATE_AVX_MPX_MASK):
843 return tdesc_i386_avx_mpx_linux;
844
845 case (X86_XSTATE_AVX_MASK):
846 return tdesc_i386_avx_linux;
847
848 default:
849 return tdesc_i386_linux;
850 }
851 }
852 else
853 return tdesc_i386_linux;
854 }
855
856 gdb_assert_not_reached ("failed to return tdesc");
857 }
858
859 /* Callback for find_inferior. Stops iteration when a thread with a
860 given PID is found. */
861
862 static int
863 same_process_callback (struct inferior_list_entry *entry, void *data)
864 {
865 int pid = *(int *) data;
866
867 return (ptid_get_pid (entry->id) == pid);
868 }
869
870 /* Callback for for_each_inferior. Calls the arch_setup routine for
871 each process. */
872
873 static void
874 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
875 {
876 int pid = ptid_get_pid (entry->id);
877
878 /* Look up any thread of this processes. */
879 current_thread
880 = (struct thread_info *) find_inferior (&all_threads,
881 same_process_callback, &pid);
882
883 the_low_target.arch_setup ();
884 }
885
886 /* Update all the target description of all processes; a new GDB
887 connected, and it may or not support xml target descriptions. */
888
889 static void
890 x86_linux_update_xmltarget (void)
891 {
892 struct thread_info *saved_thread = current_thread;
893
894 /* Before changing the register cache's internal layout, flush the
895 contents of the current valid caches back to the threads, and
896 release the current regcache objects. */
897 regcache_release ();
898
899 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
900
901 current_thread = saved_thread;
902 }
903
904 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
905 PTRACE_GETREGSET. */
906
907 static void
908 x86_linux_process_qsupported (char **features, int count)
909 {
910 int i;
911
912 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
913 with "i386" in qSupported query, it supports x86 XML target
914 descriptions. */
915 use_xml = 0;
916 for (i = 0; i < count; i++)
917 {
918 const char *feature = features[i];
919
920 if (startswith (feature, "xmlRegisters="))
921 {
922 char *copy = xstrdup (feature + 13);
923 char *p;
924
925 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
926 {
927 if (strcmp (p, "i386") == 0)
928 {
929 use_xml = 1;
930 break;
931 }
932 }
933
934 free (copy);
935 }
936 }
937 x86_linux_update_xmltarget ();
938 }
939
940 /* Common for x86/x86-64. */
941
942 static struct regsets_info x86_regsets_info =
943 {
944 x86_regsets, /* regsets */
945 0, /* num_regsets */
946 NULL, /* disabled_regsets */
947 };
948
949 #ifdef __x86_64__
950 static struct regs_info amd64_linux_regs_info =
951 {
952 NULL, /* regset_bitmap */
953 NULL, /* usrregs_info */
954 &x86_regsets_info
955 };
956 #endif
957 static struct usrregs_info i386_linux_usrregs_info =
958 {
959 I386_NUM_REGS,
960 i386_regmap,
961 };
962
963 static struct regs_info i386_linux_regs_info =
964 {
965 NULL, /* regset_bitmap */
966 &i386_linux_usrregs_info,
967 &x86_regsets_info
968 };
969
970 const struct regs_info *
971 x86_linux_regs_info (void)
972 {
973 #ifdef __x86_64__
974 if (is_64bit_tdesc ())
975 return &amd64_linux_regs_info;
976 else
977 #endif
978 return &i386_linux_regs_info;
979 }
980
981 /* Initialize the target description for the architecture of the
982 inferior. */
983
984 static void
985 x86_arch_setup (void)
986 {
987 current_process ()->tdesc = x86_linux_read_description ();
988 }
989
990 /* Fill *SYSNO and *SYSRET with the syscall nr trapped and the syscall return
991 code. This should only be called if LWP got a SYSCALL_SIGTRAP. */
992
993 static void
994 x86_get_syscall_trapinfo (struct regcache *regcache, int *sysno)
995 {
996 int use_64bit = register_size (regcache->tdesc, 0) == 8;
997
998 if (use_64bit)
999 {
1000 long l_sysno;
1001
1002 collect_register_by_name (regcache, "orig_rax", &l_sysno);
1003 *sysno = (int) l_sysno;
1004 }
1005 else
1006 collect_register_by_name (regcache, "orig_eax", sysno);
1007 }
1008
1009 static int
1010 x86_supports_tracepoints (void)
1011 {
1012 return 1;
1013 }
1014
1015 static void
1016 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1017 {
1018 write_inferior_memory (*to, buf, len);
1019 *to += len;
1020 }
1021
1022 static int
1023 push_opcode (unsigned char *buf, char *op)
1024 {
1025 unsigned char *buf_org = buf;
1026
1027 while (1)
1028 {
1029 char *endptr;
1030 unsigned long ul = strtoul (op, &endptr, 16);
1031
1032 if (endptr == op)
1033 break;
1034
1035 *buf++ = ul;
1036 op = endptr;
1037 }
1038
1039 return buf - buf_org;
1040 }
1041
1042 #ifdef __x86_64__
1043
1044 /* Build a jump pad that saves registers and calls a collection
1045 function. Writes a jump instruction to the jump pad to
1046 JJUMPAD_INSN. The caller is responsible to write it in at the
1047 tracepoint address. */
1048
1049 static int
1050 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1051 CORE_ADDR collector,
1052 CORE_ADDR lockaddr,
1053 ULONGEST orig_size,
1054 CORE_ADDR *jump_entry,
1055 CORE_ADDR *trampoline,
1056 ULONGEST *trampoline_size,
1057 unsigned char *jjump_pad_insn,
1058 ULONGEST *jjump_pad_insn_size,
1059 CORE_ADDR *adjusted_insn_addr,
1060 CORE_ADDR *adjusted_insn_addr_end,
1061 char *err)
1062 {
1063 unsigned char buf[40];
1064 int i, offset;
1065 int64_t loffset;
1066
1067 CORE_ADDR buildaddr = *jump_entry;
1068
1069 /* Build the jump pad. */
1070
1071 /* First, do tracepoint data collection. Save registers. */
1072 i = 0;
1073 /* Need to ensure stack pointer saved first. */
1074 buf[i++] = 0x54; /* push %rsp */
1075 buf[i++] = 0x55; /* push %rbp */
1076 buf[i++] = 0x57; /* push %rdi */
1077 buf[i++] = 0x56; /* push %rsi */
1078 buf[i++] = 0x52; /* push %rdx */
1079 buf[i++] = 0x51; /* push %rcx */
1080 buf[i++] = 0x53; /* push %rbx */
1081 buf[i++] = 0x50; /* push %rax */
1082 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1083 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1084 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1085 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1086 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1087 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1088 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1089 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1090 buf[i++] = 0x9c; /* pushfq */
1091 buf[i++] = 0x48; /* movl <addr>,%rdi */
1092 buf[i++] = 0xbf;
1093 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1094 i += sizeof (unsigned long);
1095 buf[i++] = 0x57; /* push %rdi */
1096 append_insns (&buildaddr, i, buf);
1097
1098 /* Stack space for the collecting_t object. */
1099 i = 0;
1100 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1101 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1102 memcpy (buf + i, &tpoint, 8);
1103 i += 8;
1104 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1105 i += push_opcode (&buf[i],
1106 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1107 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1108 append_insns (&buildaddr, i, buf);
1109
1110 /* spin-lock. */
1111 i = 0;
1112 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1113 memcpy (&buf[i], (void *) &lockaddr, 8);
1114 i += 8;
1115 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1116 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1117 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1118 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1119 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1120 append_insns (&buildaddr, i, buf);
1121
1122 /* Set up the gdb_collect call. */
1123 /* At this point, (stack pointer + 0x18) is the base of our saved
1124 register block. */
1125
1126 i = 0;
1127 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1128 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1129
1130 /* tpoint address may be 64-bit wide. */
1131 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1132 memcpy (buf + i, &tpoint, 8);
1133 i += 8;
1134 append_insns (&buildaddr, i, buf);
1135
1136 /* The collector function being in the shared library, may be
1137 >31-bits away off the jump pad. */
1138 i = 0;
1139 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1140 memcpy (buf + i, &collector, 8);
1141 i += 8;
1142 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1143 append_insns (&buildaddr, i, buf);
1144
1145 /* Clear the spin-lock. */
1146 i = 0;
1147 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1148 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1149 memcpy (buf + i, &lockaddr, 8);
1150 i += 8;
1151 append_insns (&buildaddr, i, buf);
1152
1153 /* Remove stack that had been used for the collect_t object. */
1154 i = 0;
1155 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1156 append_insns (&buildaddr, i, buf);
1157
1158 /* Restore register state. */
1159 i = 0;
1160 buf[i++] = 0x48; /* add $0x8,%rsp */
1161 buf[i++] = 0x83;
1162 buf[i++] = 0xc4;
1163 buf[i++] = 0x08;
1164 buf[i++] = 0x9d; /* popfq */
1165 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1166 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1167 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1168 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1169 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1170 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1171 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1172 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1173 buf[i++] = 0x58; /* pop %rax */
1174 buf[i++] = 0x5b; /* pop %rbx */
1175 buf[i++] = 0x59; /* pop %rcx */
1176 buf[i++] = 0x5a; /* pop %rdx */
1177 buf[i++] = 0x5e; /* pop %rsi */
1178 buf[i++] = 0x5f; /* pop %rdi */
1179 buf[i++] = 0x5d; /* pop %rbp */
1180 buf[i++] = 0x5c; /* pop %rsp */
1181 append_insns (&buildaddr, i, buf);
1182
1183 /* Now, adjust the original instruction to execute in the jump
1184 pad. */
1185 *adjusted_insn_addr = buildaddr;
1186 relocate_instruction (&buildaddr, tpaddr);
1187 *adjusted_insn_addr_end = buildaddr;
1188
1189 /* Finally, write a jump back to the program. */
1190
1191 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1192 if (loffset > INT_MAX || loffset < INT_MIN)
1193 {
1194 sprintf (err,
1195 "E.Jump back from jump pad too far from tracepoint "
1196 "(offset 0x%" PRIx64 " > int32).", loffset);
1197 return 1;
1198 }
1199
1200 offset = (int) loffset;
1201 memcpy (buf, jump_insn, sizeof (jump_insn));
1202 memcpy (buf + 1, &offset, 4);
1203 append_insns (&buildaddr, sizeof (jump_insn), buf);
1204
1205 /* The jump pad is now built. Wire in a jump to our jump pad. This
1206 is always done last (by our caller actually), so that we can
1207 install fast tracepoints with threads running. This relies on
1208 the agent's atomic write support. */
1209 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1210 if (loffset > INT_MAX || loffset < INT_MIN)
1211 {
1212 sprintf (err,
1213 "E.Jump pad too far from tracepoint "
1214 "(offset 0x%" PRIx64 " > int32).", loffset);
1215 return 1;
1216 }
1217
1218 offset = (int) loffset;
1219
1220 memcpy (buf, jump_insn, sizeof (jump_insn));
1221 memcpy (buf + 1, &offset, 4);
1222 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1223 *jjump_pad_insn_size = sizeof (jump_insn);
1224
1225 /* Return the end address of our pad. */
1226 *jump_entry = buildaddr;
1227
1228 return 0;
1229 }
1230
1231 #endif /* __x86_64__ */
1232
1233 /* Build a jump pad that saves registers and calls a collection
1234 function. Writes a jump instruction to the jump pad to
1235 JJUMPAD_INSN. The caller is responsible to write it in at the
1236 tracepoint address. */
1237
1238 static int
1239 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1240 CORE_ADDR collector,
1241 CORE_ADDR lockaddr,
1242 ULONGEST orig_size,
1243 CORE_ADDR *jump_entry,
1244 CORE_ADDR *trampoline,
1245 ULONGEST *trampoline_size,
1246 unsigned char *jjump_pad_insn,
1247 ULONGEST *jjump_pad_insn_size,
1248 CORE_ADDR *adjusted_insn_addr,
1249 CORE_ADDR *adjusted_insn_addr_end,
1250 char *err)
1251 {
1252 unsigned char buf[0x100];
1253 int i, offset;
1254 CORE_ADDR buildaddr = *jump_entry;
1255
1256 /* Build the jump pad. */
1257
1258 /* First, do tracepoint data collection. Save registers. */
1259 i = 0;
1260 buf[i++] = 0x60; /* pushad */
1261 buf[i++] = 0x68; /* push tpaddr aka $pc */
1262 *((int *)(buf + i)) = (int) tpaddr;
1263 i += 4;
1264 buf[i++] = 0x9c; /* pushf */
1265 buf[i++] = 0x1e; /* push %ds */
1266 buf[i++] = 0x06; /* push %es */
1267 buf[i++] = 0x0f; /* push %fs */
1268 buf[i++] = 0xa0;
1269 buf[i++] = 0x0f; /* push %gs */
1270 buf[i++] = 0xa8;
1271 buf[i++] = 0x16; /* push %ss */
1272 buf[i++] = 0x0e; /* push %cs */
1273 append_insns (&buildaddr, i, buf);
1274
1275 /* Stack space for the collecting_t object. */
1276 i = 0;
1277 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1278
1279 /* Build the object. */
1280 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1281 memcpy (buf + i, &tpoint, 4);
1282 i += 4;
1283 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1284
1285 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1286 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1287 append_insns (&buildaddr, i, buf);
1288
1289 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1290 If we cared for it, this could be using xchg alternatively. */
1291
1292 i = 0;
1293 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1294 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1295 %esp,<lockaddr> */
1296 memcpy (&buf[i], (void *) &lockaddr, 4);
1297 i += 4;
1298 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1299 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1300 append_insns (&buildaddr, i, buf);
1301
1302
1303 /* Set up arguments to the gdb_collect call. */
1304 i = 0;
1305 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1306 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1307 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1308 append_insns (&buildaddr, i, buf);
1309
1310 i = 0;
1311 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1312 append_insns (&buildaddr, i, buf);
1313
1314 i = 0;
1315 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1316 memcpy (&buf[i], (void *) &tpoint, 4);
1317 i += 4;
1318 append_insns (&buildaddr, i, buf);
1319
1320 buf[0] = 0xe8; /* call <reladdr> */
1321 offset = collector - (buildaddr + sizeof (jump_insn));
1322 memcpy (buf + 1, &offset, 4);
1323 append_insns (&buildaddr, 5, buf);
1324 /* Clean up after the call. */
1325 buf[0] = 0x83; /* add $0x8,%esp */
1326 buf[1] = 0xc4;
1327 buf[2] = 0x08;
1328 append_insns (&buildaddr, 3, buf);
1329
1330
1331 /* Clear the spin-lock. This would need the LOCK prefix on older
1332 broken archs. */
1333 i = 0;
1334 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1335 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1336 memcpy (buf + i, &lockaddr, 4);
1337 i += 4;
1338 append_insns (&buildaddr, i, buf);
1339
1340
1341 /* Remove stack that had been used for the collect_t object. */
1342 i = 0;
1343 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1344 append_insns (&buildaddr, i, buf);
1345
1346 i = 0;
1347 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1348 buf[i++] = 0xc4;
1349 buf[i++] = 0x04;
1350 buf[i++] = 0x17; /* pop %ss */
1351 buf[i++] = 0x0f; /* pop %gs */
1352 buf[i++] = 0xa9;
1353 buf[i++] = 0x0f; /* pop %fs */
1354 buf[i++] = 0xa1;
1355 buf[i++] = 0x07; /* pop %es */
1356 buf[i++] = 0x1f; /* pop %ds */
1357 buf[i++] = 0x9d; /* popf */
1358 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1359 buf[i++] = 0xc4;
1360 buf[i++] = 0x04;
1361 buf[i++] = 0x61; /* popad */
1362 append_insns (&buildaddr, i, buf);
1363
1364 /* Now, adjust the original instruction to execute in the jump
1365 pad. */
1366 *adjusted_insn_addr = buildaddr;
1367 relocate_instruction (&buildaddr, tpaddr);
1368 *adjusted_insn_addr_end = buildaddr;
1369
1370 /* Write the jump back to the program. */
1371 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1372 memcpy (buf, jump_insn, sizeof (jump_insn));
1373 memcpy (buf + 1, &offset, 4);
1374 append_insns (&buildaddr, sizeof (jump_insn), buf);
1375
1376 /* The jump pad is now built. Wire in a jump to our jump pad. This
1377 is always done last (by our caller actually), so that we can
1378 install fast tracepoints with threads running. This relies on
1379 the agent's atomic write support. */
1380 if (orig_size == 4)
1381 {
1382 /* Create a trampoline. */
1383 *trampoline_size = sizeof (jump_insn);
1384 if (!claim_trampoline_space (*trampoline_size, trampoline))
1385 {
1386 /* No trampoline space available. */
1387 strcpy (err,
1388 "E.Cannot allocate trampoline space needed for fast "
1389 "tracepoints on 4-byte instructions.");
1390 return 1;
1391 }
1392
1393 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1394 memcpy (buf, jump_insn, sizeof (jump_insn));
1395 memcpy (buf + 1, &offset, 4);
1396 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1397
1398 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1399 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1400 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1401 memcpy (buf + 2, &offset, 2);
1402 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1403 *jjump_pad_insn_size = sizeof (small_jump_insn);
1404 }
1405 else
1406 {
1407 /* Else use a 32-bit relative jump instruction. */
1408 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1409 memcpy (buf, jump_insn, sizeof (jump_insn));
1410 memcpy (buf + 1, &offset, 4);
1411 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1412 *jjump_pad_insn_size = sizeof (jump_insn);
1413 }
1414
1415 /* Return the end address of our pad. */
1416 *jump_entry = buildaddr;
1417
1418 return 0;
1419 }
1420
1421 static int
1422 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1423 CORE_ADDR collector,
1424 CORE_ADDR lockaddr,
1425 ULONGEST orig_size,
1426 CORE_ADDR *jump_entry,
1427 CORE_ADDR *trampoline,
1428 ULONGEST *trampoline_size,
1429 unsigned char *jjump_pad_insn,
1430 ULONGEST *jjump_pad_insn_size,
1431 CORE_ADDR *adjusted_insn_addr,
1432 CORE_ADDR *adjusted_insn_addr_end,
1433 char *err)
1434 {
1435 #ifdef __x86_64__
1436 if (is_64bit_tdesc ())
1437 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1438 collector, lockaddr,
1439 orig_size, jump_entry,
1440 trampoline, trampoline_size,
1441 jjump_pad_insn,
1442 jjump_pad_insn_size,
1443 adjusted_insn_addr,
1444 adjusted_insn_addr_end,
1445 err);
1446 #endif
1447
1448 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1449 collector, lockaddr,
1450 orig_size, jump_entry,
1451 trampoline, trampoline_size,
1452 jjump_pad_insn,
1453 jjump_pad_insn_size,
1454 adjusted_insn_addr,
1455 adjusted_insn_addr_end,
1456 err);
1457 }
1458
1459 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1460 architectures. */
1461
1462 static int
1463 x86_get_min_fast_tracepoint_insn_len (void)
1464 {
1465 static int warned_about_fast_tracepoints = 0;
1466
1467 #ifdef __x86_64__
1468 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1469 used for fast tracepoints. */
1470 if (is_64bit_tdesc ())
1471 return 5;
1472 #endif
1473
1474 if (agent_loaded_p ())
1475 {
1476 char errbuf[IPA_BUFSIZ];
1477
1478 errbuf[0] = '\0';
1479
1480 /* On x86, if trampolines are available, then 4-byte jump instructions
1481 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1482 with a 4-byte offset are used instead. */
1483 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1484 return 4;
1485 else
1486 {
1487 /* GDB has no channel to explain to user why a shorter fast
1488 tracepoint is not possible, but at least make GDBserver
1489 mention that something has gone awry. */
1490 if (!warned_about_fast_tracepoints)
1491 {
1492 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1493 warned_about_fast_tracepoints = 1;
1494 }
1495 return 5;
1496 }
1497 }
1498 else
1499 {
1500 /* Indicate that the minimum length is currently unknown since the IPA
1501 has not loaded yet. */
1502 return 0;
1503 }
1504 }
1505
1506 static void
1507 add_insns (unsigned char *start, int len)
1508 {
1509 CORE_ADDR buildaddr = current_insn_ptr;
1510
1511 if (debug_threads)
1512 debug_printf ("Adding %d bytes of insn at %s\n",
1513 len, paddress (buildaddr));
1514
1515 append_insns (&buildaddr, len, start);
1516 current_insn_ptr = buildaddr;
1517 }
1518
1519 /* Our general strategy for emitting code is to avoid specifying raw
1520 bytes whenever possible, and instead copy a block of inline asm
1521 that is embedded in the function. This is a little messy, because
1522 we need to keep the compiler from discarding what looks like dead
1523 code, plus suppress various warnings. */
1524
1525 #define EMIT_ASM(NAME, INSNS) \
1526 do \
1527 { \
1528 extern unsigned char start_ ## NAME, end_ ## NAME; \
1529 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1530 __asm__ ("jmp end_" #NAME "\n" \
1531 "\t" "start_" #NAME ":" \
1532 "\t" INSNS "\n" \
1533 "\t" "end_" #NAME ":"); \
1534 } while (0)
1535
1536 #ifdef __x86_64__
1537
1538 #define EMIT_ASM32(NAME,INSNS) \
1539 do \
1540 { \
1541 extern unsigned char start_ ## NAME, end_ ## NAME; \
1542 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1543 __asm__ (".code32\n" \
1544 "\t" "jmp end_" #NAME "\n" \
1545 "\t" "start_" #NAME ":\n" \
1546 "\t" INSNS "\n" \
1547 "\t" "end_" #NAME ":\n" \
1548 ".code64\n"); \
1549 } while (0)
1550
1551 #else
1552
1553 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1554
1555 #endif
1556
1557 #ifdef __x86_64__
1558
1559 static void
1560 amd64_emit_prologue (void)
1561 {
1562 EMIT_ASM (amd64_prologue,
1563 "pushq %rbp\n\t"
1564 "movq %rsp,%rbp\n\t"
1565 "sub $0x20,%rsp\n\t"
1566 "movq %rdi,-8(%rbp)\n\t"
1567 "movq %rsi,-16(%rbp)");
1568 }
1569
1570
1571 static void
1572 amd64_emit_epilogue (void)
1573 {
1574 EMIT_ASM (amd64_epilogue,
1575 "movq -16(%rbp),%rdi\n\t"
1576 "movq %rax,(%rdi)\n\t"
1577 "xor %rax,%rax\n\t"
1578 "leave\n\t"
1579 "ret");
1580 }
1581
1582 static void
1583 amd64_emit_add (void)
1584 {
1585 EMIT_ASM (amd64_add,
1586 "add (%rsp),%rax\n\t"
1587 "lea 0x8(%rsp),%rsp");
1588 }
1589
1590 static void
1591 amd64_emit_sub (void)
1592 {
1593 EMIT_ASM (amd64_sub,
1594 "sub %rax,(%rsp)\n\t"
1595 "pop %rax");
1596 }
1597
1598 static void
1599 amd64_emit_mul (void)
1600 {
1601 emit_error = 1;
1602 }
1603
1604 static void
1605 amd64_emit_lsh (void)
1606 {
1607 emit_error = 1;
1608 }
1609
1610 static void
1611 amd64_emit_rsh_signed (void)
1612 {
1613 emit_error = 1;
1614 }
1615
1616 static void
1617 amd64_emit_rsh_unsigned (void)
1618 {
1619 emit_error = 1;
1620 }
1621
1622 static void
1623 amd64_emit_ext (int arg)
1624 {
1625 switch (arg)
1626 {
1627 case 8:
1628 EMIT_ASM (amd64_ext_8,
1629 "cbtw\n\t"
1630 "cwtl\n\t"
1631 "cltq");
1632 break;
1633 case 16:
1634 EMIT_ASM (amd64_ext_16,
1635 "cwtl\n\t"
1636 "cltq");
1637 break;
1638 case 32:
1639 EMIT_ASM (amd64_ext_32,
1640 "cltq");
1641 break;
1642 default:
1643 emit_error = 1;
1644 }
1645 }
1646
1647 static void
1648 amd64_emit_log_not (void)
1649 {
1650 EMIT_ASM (amd64_log_not,
1651 "test %rax,%rax\n\t"
1652 "sete %cl\n\t"
1653 "movzbq %cl,%rax");
1654 }
1655
1656 static void
1657 amd64_emit_bit_and (void)
1658 {
1659 EMIT_ASM (amd64_and,
1660 "and (%rsp),%rax\n\t"
1661 "lea 0x8(%rsp),%rsp");
1662 }
1663
1664 static void
1665 amd64_emit_bit_or (void)
1666 {
1667 EMIT_ASM (amd64_or,
1668 "or (%rsp),%rax\n\t"
1669 "lea 0x8(%rsp),%rsp");
1670 }
1671
1672 static void
1673 amd64_emit_bit_xor (void)
1674 {
1675 EMIT_ASM (amd64_xor,
1676 "xor (%rsp),%rax\n\t"
1677 "lea 0x8(%rsp),%rsp");
1678 }
1679
1680 static void
1681 amd64_emit_bit_not (void)
1682 {
1683 EMIT_ASM (amd64_bit_not,
1684 "xorq $0xffffffffffffffff,%rax");
1685 }
1686
1687 static void
1688 amd64_emit_equal (void)
1689 {
1690 EMIT_ASM (amd64_equal,
1691 "cmp %rax,(%rsp)\n\t"
1692 "je .Lamd64_equal_true\n\t"
1693 "xor %rax,%rax\n\t"
1694 "jmp .Lamd64_equal_end\n\t"
1695 ".Lamd64_equal_true:\n\t"
1696 "mov $0x1,%rax\n\t"
1697 ".Lamd64_equal_end:\n\t"
1698 "lea 0x8(%rsp),%rsp");
1699 }
1700
1701 static void
1702 amd64_emit_less_signed (void)
1703 {
1704 EMIT_ASM (amd64_less_signed,
1705 "cmp %rax,(%rsp)\n\t"
1706 "jl .Lamd64_less_signed_true\n\t"
1707 "xor %rax,%rax\n\t"
1708 "jmp .Lamd64_less_signed_end\n\t"
1709 ".Lamd64_less_signed_true:\n\t"
1710 "mov $1,%rax\n\t"
1711 ".Lamd64_less_signed_end:\n\t"
1712 "lea 0x8(%rsp),%rsp");
1713 }
1714
1715 static void
1716 amd64_emit_less_unsigned (void)
1717 {
1718 EMIT_ASM (amd64_less_unsigned,
1719 "cmp %rax,(%rsp)\n\t"
1720 "jb .Lamd64_less_unsigned_true\n\t"
1721 "xor %rax,%rax\n\t"
1722 "jmp .Lamd64_less_unsigned_end\n\t"
1723 ".Lamd64_less_unsigned_true:\n\t"
1724 "mov $1,%rax\n\t"
1725 ".Lamd64_less_unsigned_end:\n\t"
1726 "lea 0x8(%rsp),%rsp");
1727 }
1728
1729 static void
1730 amd64_emit_ref (int size)
1731 {
1732 switch (size)
1733 {
1734 case 1:
1735 EMIT_ASM (amd64_ref1,
1736 "movb (%rax),%al");
1737 break;
1738 case 2:
1739 EMIT_ASM (amd64_ref2,
1740 "movw (%rax),%ax");
1741 break;
1742 case 4:
1743 EMIT_ASM (amd64_ref4,
1744 "movl (%rax),%eax");
1745 break;
1746 case 8:
1747 EMIT_ASM (amd64_ref8,
1748 "movq (%rax),%rax");
1749 break;
1750 }
1751 }
1752
1753 static void
1754 amd64_emit_if_goto (int *offset_p, int *size_p)
1755 {
1756 EMIT_ASM (amd64_if_goto,
1757 "mov %rax,%rcx\n\t"
1758 "pop %rax\n\t"
1759 "cmp $0,%rcx\n\t"
1760 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1761 if (offset_p)
1762 *offset_p = 10;
1763 if (size_p)
1764 *size_p = 4;
1765 }
1766
1767 static void
1768 amd64_emit_goto (int *offset_p, int *size_p)
1769 {
1770 EMIT_ASM (amd64_goto,
1771 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1772 if (offset_p)
1773 *offset_p = 1;
1774 if (size_p)
1775 *size_p = 4;
1776 }
1777
1778 static void
1779 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1780 {
1781 int diff = (to - (from + size));
1782 unsigned char buf[sizeof (int)];
1783
1784 if (size != 4)
1785 {
1786 emit_error = 1;
1787 return;
1788 }
1789
1790 memcpy (buf, &diff, sizeof (int));
1791 write_inferior_memory (from, buf, sizeof (int));
1792 }
1793
1794 static void
1795 amd64_emit_const (LONGEST num)
1796 {
1797 unsigned char buf[16];
1798 int i;
1799 CORE_ADDR buildaddr = current_insn_ptr;
1800
1801 i = 0;
1802 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
1803 memcpy (&buf[i], &num, sizeof (num));
1804 i += 8;
1805 append_insns (&buildaddr, i, buf);
1806 current_insn_ptr = buildaddr;
1807 }
1808
1809 static void
1810 amd64_emit_call (CORE_ADDR fn)
1811 {
1812 unsigned char buf[16];
1813 int i;
1814 CORE_ADDR buildaddr;
1815 LONGEST offset64;
1816
1817 /* The destination function being in the shared library, may be
1818 >31-bits away off the compiled code pad. */
1819
1820 buildaddr = current_insn_ptr;
1821
1822 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1823
1824 i = 0;
1825
1826 if (offset64 > INT_MAX || offset64 < INT_MIN)
1827 {
1828 /* Offset is too large for a call. Use callq, but that requires
1829 a register, so avoid it if possible. Use r10, since it is
1830 call-clobbered, we don't have to push/pop it. */
1831 buf[i++] = 0x48; /* mov $fn,%r10 */
1832 buf[i++] = 0xba;
1833 memcpy (buf + i, &fn, 8);
1834 i += 8;
1835 buf[i++] = 0xff; /* callq *%r10 */
1836 buf[i++] = 0xd2;
1837 }
1838 else
1839 {
1840 int offset32 = offset64; /* we know we can't overflow here. */
1841 memcpy (buf + i, &offset32, 4);
1842 i += 4;
1843 }
1844
1845 append_insns (&buildaddr, i, buf);
1846 current_insn_ptr = buildaddr;
1847 }
1848
1849 static void
1850 amd64_emit_reg (int reg)
1851 {
1852 unsigned char buf[16];
1853 int i;
1854 CORE_ADDR buildaddr;
1855
1856 /* Assume raw_regs is still in %rdi. */
1857 buildaddr = current_insn_ptr;
1858 i = 0;
1859 buf[i++] = 0xbe; /* mov $<n>,%esi */
1860 memcpy (&buf[i], &reg, sizeof (reg));
1861 i += 4;
1862 append_insns (&buildaddr, i, buf);
1863 current_insn_ptr = buildaddr;
1864 amd64_emit_call (get_raw_reg_func_addr ());
1865 }
1866
1867 static void
1868 amd64_emit_pop (void)
1869 {
1870 EMIT_ASM (amd64_pop,
1871 "pop %rax");
1872 }
1873
1874 static void
1875 amd64_emit_stack_flush (void)
1876 {
1877 EMIT_ASM (amd64_stack_flush,
1878 "push %rax");
1879 }
1880
1881 static void
1882 amd64_emit_zero_ext (int arg)
1883 {
1884 switch (arg)
1885 {
1886 case 8:
1887 EMIT_ASM (amd64_zero_ext_8,
1888 "and $0xff,%rax");
1889 break;
1890 case 16:
1891 EMIT_ASM (amd64_zero_ext_16,
1892 "and $0xffff,%rax");
1893 break;
1894 case 32:
1895 EMIT_ASM (amd64_zero_ext_32,
1896 "mov $0xffffffff,%rcx\n\t"
1897 "and %rcx,%rax");
1898 break;
1899 default:
1900 emit_error = 1;
1901 }
1902 }
1903
1904 static void
1905 amd64_emit_swap (void)
1906 {
1907 EMIT_ASM (amd64_swap,
1908 "mov %rax,%rcx\n\t"
1909 "pop %rax\n\t"
1910 "push %rcx");
1911 }
1912
1913 static void
1914 amd64_emit_stack_adjust (int n)
1915 {
1916 unsigned char buf[16];
1917 int i;
1918 CORE_ADDR buildaddr = current_insn_ptr;
1919
1920 i = 0;
1921 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1922 buf[i++] = 0x8d;
1923 buf[i++] = 0x64;
1924 buf[i++] = 0x24;
1925 /* This only handles adjustments up to 16, but we don't expect any more. */
1926 buf[i++] = n * 8;
1927 append_insns (&buildaddr, i, buf);
1928 current_insn_ptr = buildaddr;
1929 }
1930
1931 /* FN's prototype is `LONGEST(*fn)(int)'. */
1932
1933 static void
1934 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1935 {
1936 unsigned char buf[16];
1937 int i;
1938 CORE_ADDR buildaddr;
1939
1940 buildaddr = current_insn_ptr;
1941 i = 0;
1942 buf[i++] = 0xbf; /* movl $<n>,%edi */
1943 memcpy (&buf[i], &arg1, sizeof (arg1));
1944 i += 4;
1945 append_insns (&buildaddr, i, buf);
1946 current_insn_ptr = buildaddr;
1947 amd64_emit_call (fn);
1948 }
1949
1950 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
1951
1952 static void
1953 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1954 {
1955 unsigned char buf[16];
1956 int i;
1957 CORE_ADDR buildaddr;
1958
1959 buildaddr = current_insn_ptr;
1960 i = 0;
1961 buf[i++] = 0xbf; /* movl $<n>,%edi */
1962 memcpy (&buf[i], &arg1, sizeof (arg1));
1963 i += 4;
1964 append_insns (&buildaddr, i, buf);
1965 current_insn_ptr = buildaddr;
1966 EMIT_ASM (amd64_void_call_2_a,
1967 /* Save away a copy of the stack top. */
1968 "push %rax\n\t"
1969 /* Also pass top as the second argument. */
1970 "mov %rax,%rsi");
1971 amd64_emit_call (fn);
1972 EMIT_ASM (amd64_void_call_2_b,
1973 /* Restore the stack top, %rax may have been trashed. */
1974 "pop %rax");
1975 }
1976
1977 void
1978 amd64_emit_eq_goto (int *offset_p, int *size_p)
1979 {
1980 EMIT_ASM (amd64_eq,
1981 "cmp %rax,(%rsp)\n\t"
1982 "jne .Lamd64_eq_fallthru\n\t"
1983 "lea 0x8(%rsp),%rsp\n\t"
1984 "pop %rax\n\t"
1985 /* jmp, but don't trust the assembler to choose the right jump */
1986 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
1987 ".Lamd64_eq_fallthru:\n\t"
1988 "lea 0x8(%rsp),%rsp\n\t"
1989 "pop %rax");
1990
1991 if (offset_p)
1992 *offset_p = 13;
1993 if (size_p)
1994 *size_p = 4;
1995 }
1996
1997 void
1998 amd64_emit_ne_goto (int *offset_p, int *size_p)
1999 {
2000 EMIT_ASM (amd64_ne,
2001 "cmp %rax,(%rsp)\n\t"
2002 "je .Lamd64_ne_fallthru\n\t"
2003 "lea 0x8(%rsp),%rsp\n\t"
2004 "pop %rax\n\t"
2005 /* jmp, but don't trust the assembler to choose the right jump */
2006 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2007 ".Lamd64_ne_fallthru:\n\t"
2008 "lea 0x8(%rsp),%rsp\n\t"
2009 "pop %rax");
2010
2011 if (offset_p)
2012 *offset_p = 13;
2013 if (size_p)
2014 *size_p = 4;
2015 }
2016
2017 void
2018 amd64_emit_lt_goto (int *offset_p, int *size_p)
2019 {
2020 EMIT_ASM (amd64_lt,
2021 "cmp %rax,(%rsp)\n\t"
2022 "jnl .Lamd64_lt_fallthru\n\t"
2023 "lea 0x8(%rsp),%rsp\n\t"
2024 "pop %rax\n\t"
2025 /* jmp, but don't trust the assembler to choose the right jump */
2026 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2027 ".Lamd64_lt_fallthru:\n\t"
2028 "lea 0x8(%rsp),%rsp\n\t"
2029 "pop %rax");
2030
2031 if (offset_p)
2032 *offset_p = 13;
2033 if (size_p)
2034 *size_p = 4;
2035 }
2036
2037 void
2038 amd64_emit_le_goto (int *offset_p, int *size_p)
2039 {
2040 EMIT_ASM (amd64_le,
2041 "cmp %rax,(%rsp)\n\t"
2042 "jnle .Lamd64_le_fallthru\n\t"
2043 "lea 0x8(%rsp),%rsp\n\t"
2044 "pop %rax\n\t"
2045 /* jmp, but don't trust the assembler to choose the right jump */
2046 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2047 ".Lamd64_le_fallthru:\n\t"
2048 "lea 0x8(%rsp),%rsp\n\t"
2049 "pop %rax");
2050
2051 if (offset_p)
2052 *offset_p = 13;
2053 if (size_p)
2054 *size_p = 4;
2055 }
2056
2057 void
2058 amd64_emit_gt_goto (int *offset_p, int *size_p)
2059 {
2060 EMIT_ASM (amd64_gt,
2061 "cmp %rax,(%rsp)\n\t"
2062 "jng .Lamd64_gt_fallthru\n\t"
2063 "lea 0x8(%rsp),%rsp\n\t"
2064 "pop %rax\n\t"
2065 /* jmp, but don't trust the assembler to choose the right jump */
2066 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2067 ".Lamd64_gt_fallthru:\n\t"
2068 "lea 0x8(%rsp),%rsp\n\t"
2069 "pop %rax");
2070
2071 if (offset_p)
2072 *offset_p = 13;
2073 if (size_p)
2074 *size_p = 4;
2075 }
2076
2077 void
2078 amd64_emit_ge_goto (int *offset_p, int *size_p)
2079 {
2080 EMIT_ASM (amd64_ge,
2081 "cmp %rax,(%rsp)\n\t"
2082 "jnge .Lamd64_ge_fallthru\n\t"
2083 ".Lamd64_ge_jump:\n\t"
2084 "lea 0x8(%rsp),%rsp\n\t"
2085 "pop %rax\n\t"
2086 /* jmp, but don't trust the assembler to choose the right jump */
2087 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2088 ".Lamd64_ge_fallthru:\n\t"
2089 "lea 0x8(%rsp),%rsp\n\t"
2090 "pop %rax");
2091
2092 if (offset_p)
2093 *offset_p = 13;
2094 if (size_p)
2095 *size_p = 4;
2096 }
2097
2098 struct emit_ops amd64_emit_ops =
2099 {
2100 amd64_emit_prologue,
2101 amd64_emit_epilogue,
2102 amd64_emit_add,
2103 amd64_emit_sub,
2104 amd64_emit_mul,
2105 amd64_emit_lsh,
2106 amd64_emit_rsh_signed,
2107 amd64_emit_rsh_unsigned,
2108 amd64_emit_ext,
2109 amd64_emit_log_not,
2110 amd64_emit_bit_and,
2111 amd64_emit_bit_or,
2112 amd64_emit_bit_xor,
2113 amd64_emit_bit_not,
2114 amd64_emit_equal,
2115 amd64_emit_less_signed,
2116 amd64_emit_less_unsigned,
2117 amd64_emit_ref,
2118 amd64_emit_if_goto,
2119 amd64_emit_goto,
2120 amd64_write_goto_address,
2121 amd64_emit_const,
2122 amd64_emit_call,
2123 amd64_emit_reg,
2124 amd64_emit_pop,
2125 amd64_emit_stack_flush,
2126 amd64_emit_zero_ext,
2127 amd64_emit_swap,
2128 amd64_emit_stack_adjust,
2129 amd64_emit_int_call_1,
2130 amd64_emit_void_call_2,
2131 amd64_emit_eq_goto,
2132 amd64_emit_ne_goto,
2133 amd64_emit_lt_goto,
2134 amd64_emit_le_goto,
2135 amd64_emit_gt_goto,
2136 amd64_emit_ge_goto
2137 };
2138
2139 #endif /* __x86_64__ */
2140
2141 static void
2142 i386_emit_prologue (void)
2143 {
2144 EMIT_ASM32 (i386_prologue,
2145 "push %ebp\n\t"
2146 "mov %esp,%ebp\n\t"
2147 "push %ebx");
2148 /* At this point, the raw regs base address is at 8(%ebp), and the
2149 value pointer is at 12(%ebp). */
2150 }
2151
2152 static void
2153 i386_emit_epilogue (void)
2154 {
2155 EMIT_ASM32 (i386_epilogue,
2156 "mov 12(%ebp),%ecx\n\t"
2157 "mov %eax,(%ecx)\n\t"
2158 "mov %ebx,0x4(%ecx)\n\t"
2159 "xor %eax,%eax\n\t"
2160 "pop %ebx\n\t"
2161 "pop %ebp\n\t"
2162 "ret");
2163 }
2164
2165 static void
2166 i386_emit_add (void)
2167 {
2168 EMIT_ASM32 (i386_add,
2169 "add (%esp),%eax\n\t"
2170 "adc 0x4(%esp),%ebx\n\t"
2171 "lea 0x8(%esp),%esp");
2172 }
2173
2174 static void
2175 i386_emit_sub (void)
2176 {
2177 EMIT_ASM32 (i386_sub,
2178 "subl %eax,(%esp)\n\t"
2179 "sbbl %ebx,4(%esp)\n\t"
2180 "pop %eax\n\t"
2181 "pop %ebx\n\t");
2182 }
2183
2184 static void
2185 i386_emit_mul (void)
2186 {
2187 emit_error = 1;
2188 }
2189
2190 static void
2191 i386_emit_lsh (void)
2192 {
2193 emit_error = 1;
2194 }
2195
2196 static void
2197 i386_emit_rsh_signed (void)
2198 {
2199 emit_error = 1;
2200 }
2201
2202 static void
2203 i386_emit_rsh_unsigned (void)
2204 {
2205 emit_error = 1;
2206 }
2207
2208 static void
2209 i386_emit_ext (int arg)
2210 {
2211 switch (arg)
2212 {
2213 case 8:
2214 EMIT_ASM32 (i386_ext_8,
2215 "cbtw\n\t"
2216 "cwtl\n\t"
2217 "movl %eax,%ebx\n\t"
2218 "sarl $31,%ebx");
2219 break;
2220 case 16:
2221 EMIT_ASM32 (i386_ext_16,
2222 "cwtl\n\t"
2223 "movl %eax,%ebx\n\t"
2224 "sarl $31,%ebx");
2225 break;
2226 case 32:
2227 EMIT_ASM32 (i386_ext_32,
2228 "movl %eax,%ebx\n\t"
2229 "sarl $31,%ebx");
2230 break;
2231 default:
2232 emit_error = 1;
2233 }
2234 }
2235
2236 static void
2237 i386_emit_log_not (void)
2238 {
2239 EMIT_ASM32 (i386_log_not,
2240 "or %ebx,%eax\n\t"
2241 "test %eax,%eax\n\t"
2242 "sete %cl\n\t"
2243 "xor %ebx,%ebx\n\t"
2244 "movzbl %cl,%eax");
2245 }
2246
2247 static void
2248 i386_emit_bit_and (void)
2249 {
2250 EMIT_ASM32 (i386_and,
2251 "and (%esp),%eax\n\t"
2252 "and 0x4(%esp),%ebx\n\t"
2253 "lea 0x8(%esp),%esp");
2254 }
2255
2256 static void
2257 i386_emit_bit_or (void)
2258 {
2259 EMIT_ASM32 (i386_or,
2260 "or (%esp),%eax\n\t"
2261 "or 0x4(%esp),%ebx\n\t"
2262 "lea 0x8(%esp),%esp");
2263 }
2264
2265 static void
2266 i386_emit_bit_xor (void)
2267 {
2268 EMIT_ASM32 (i386_xor,
2269 "xor (%esp),%eax\n\t"
2270 "xor 0x4(%esp),%ebx\n\t"
2271 "lea 0x8(%esp),%esp");
2272 }
2273
2274 static void
2275 i386_emit_bit_not (void)
2276 {
2277 EMIT_ASM32 (i386_bit_not,
2278 "xor $0xffffffff,%eax\n\t"
2279 "xor $0xffffffff,%ebx\n\t");
2280 }
2281
2282 static void
2283 i386_emit_equal (void)
2284 {
2285 EMIT_ASM32 (i386_equal,
2286 "cmpl %ebx,4(%esp)\n\t"
2287 "jne .Li386_equal_false\n\t"
2288 "cmpl %eax,(%esp)\n\t"
2289 "je .Li386_equal_true\n\t"
2290 ".Li386_equal_false:\n\t"
2291 "xor %eax,%eax\n\t"
2292 "jmp .Li386_equal_end\n\t"
2293 ".Li386_equal_true:\n\t"
2294 "mov $1,%eax\n\t"
2295 ".Li386_equal_end:\n\t"
2296 "xor %ebx,%ebx\n\t"
2297 "lea 0x8(%esp),%esp");
2298 }
2299
2300 static void
2301 i386_emit_less_signed (void)
2302 {
2303 EMIT_ASM32 (i386_less_signed,
2304 "cmpl %ebx,4(%esp)\n\t"
2305 "jl .Li386_less_signed_true\n\t"
2306 "jne .Li386_less_signed_false\n\t"
2307 "cmpl %eax,(%esp)\n\t"
2308 "jl .Li386_less_signed_true\n\t"
2309 ".Li386_less_signed_false:\n\t"
2310 "xor %eax,%eax\n\t"
2311 "jmp .Li386_less_signed_end\n\t"
2312 ".Li386_less_signed_true:\n\t"
2313 "mov $1,%eax\n\t"
2314 ".Li386_less_signed_end:\n\t"
2315 "xor %ebx,%ebx\n\t"
2316 "lea 0x8(%esp),%esp");
2317 }
2318
2319 static void
2320 i386_emit_less_unsigned (void)
2321 {
2322 EMIT_ASM32 (i386_less_unsigned,
2323 "cmpl %ebx,4(%esp)\n\t"
2324 "jb .Li386_less_unsigned_true\n\t"
2325 "jne .Li386_less_unsigned_false\n\t"
2326 "cmpl %eax,(%esp)\n\t"
2327 "jb .Li386_less_unsigned_true\n\t"
2328 ".Li386_less_unsigned_false:\n\t"
2329 "xor %eax,%eax\n\t"
2330 "jmp .Li386_less_unsigned_end\n\t"
2331 ".Li386_less_unsigned_true:\n\t"
2332 "mov $1,%eax\n\t"
2333 ".Li386_less_unsigned_end:\n\t"
2334 "xor %ebx,%ebx\n\t"
2335 "lea 0x8(%esp),%esp");
2336 }
2337
2338 static void
2339 i386_emit_ref (int size)
2340 {
2341 switch (size)
2342 {
2343 case 1:
2344 EMIT_ASM32 (i386_ref1,
2345 "movb (%eax),%al");
2346 break;
2347 case 2:
2348 EMIT_ASM32 (i386_ref2,
2349 "movw (%eax),%ax");
2350 break;
2351 case 4:
2352 EMIT_ASM32 (i386_ref4,
2353 "movl (%eax),%eax");
2354 break;
2355 case 8:
2356 EMIT_ASM32 (i386_ref8,
2357 "movl 4(%eax),%ebx\n\t"
2358 "movl (%eax),%eax");
2359 break;
2360 }
2361 }
2362
2363 static void
2364 i386_emit_if_goto (int *offset_p, int *size_p)
2365 {
2366 EMIT_ASM32 (i386_if_goto,
2367 "mov %eax,%ecx\n\t"
2368 "or %ebx,%ecx\n\t"
2369 "pop %eax\n\t"
2370 "pop %ebx\n\t"
2371 "cmpl $0,%ecx\n\t"
2372 /* Don't trust the assembler to choose the right jump */
2373 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2374
2375 if (offset_p)
2376 *offset_p = 11; /* be sure that this matches the sequence above */
2377 if (size_p)
2378 *size_p = 4;
2379 }
2380
2381 static void
2382 i386_emit_goto (int *offset_p, int *size_p)
2383 {
2384 EMIT_ASM32 (i386_goto,
2385 /* Don't trust the assembler to choose the right jump */
2386 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2387 if (offset_p)
2388 *offset_p = 1;
2389 if (size_p)
2390 *size_p = 4;
2391 }
2392
2393 static void
2394 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2395 {
2396 int diff = (to - (from + size));
2397 unsigned char buf[sizeof (int)];
2398
2399 /* We're only doing 4-byte sizes at the moment. */
2400 if (size != 4)
2401 {
2402 emit_error = 1;
2403 return;
2404 }
2405
2406 memcpy (buf, &diff, sizeof (int));
2407 write_inferior_memory (from, buf, sizeof (int));
2408 }
2409
2410 static void
2411 i386_emit_const (LONGEST num)
2412 {
2413 unsigned char buf[16];
2414 int i, hi, lo;
2415 CORE_ADDR buildaddr = current_insn_ptr;
2416
2417 i = 0;
2418 buf[i++] = 0xb8; /* mov $<n>,%eax */
2419 lo = num & 0xffffffff;
2420 memcpy (&buf[i], &lo, sizeof (lo));
2421 i += 4;
2422 hi = ((num >> 32) & 0xffffffff);
2423 if (hi)
2424 {
2425 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2426 memcpy (&buf[i], &hi, sizeof (hi));
2427 i += 4;
2428 }
2429 else
2430 {
2431 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2432 }
2433 append_insns (&buildaddr, i, buf);
2434 current_insn_ptr = buildaddr;
2435 }
2436
2437 static void
2438 i386_emit_call (CORE_ADDR fn)
2439 {
2440 unsigned char buf[16];
2441 int i, offset;
2442 CORE_ADDR buildaddr;
2443
2444 buildaddr = current_insn_ptr;
2445 i = 0;
2446 buf[i++] = 0xe8; /* call <reladdr> */
2447 offset = ((int) fn) - (buildaddr + 5);
2448 memcpy (buf + 1, &offset, 4);
2449 append_insns (&buildaddr, 5, buf);
2450 current_insn_ptr = buildaddr;
2451 }
2452
2453 static void
2454 i386_emit_reg (int reg)
2455 {
2456 unsigned char buf[16];
2457 int i;
2458 CORE_ADDR buildaddr;
2459
2460 EMIT_ASM32 (i386_reg_a,
2461 "sub $0x8,%esp");
2462 buildaddr = current_insn_ptr;
2463 i = 0;
2464 buf[i++] = 0xb8; /* mov $<n>,%eax */
2465 memcpy (&buf[i], &reg, sizeof (reg));
2466 i += 4;
2467 append_insns (&buildaddr, i, buf);
2468 current_insn_ptr = buildaddr;
2469 EMIT_ASM32 (i386_reg_b,
2470 "mov %eax,4(%esp)\n\t"
2471 "mov 8(%ebp),%eax\n\t"
2472 "mov %eax,(%esp)");
2473 i386_emit_call (get_raw_reg_func_addr ());
2474 EMIT_ASM32 (i386_reg_c,
2475 "xor %ebx,%ebx\n\t"
2476 "lea 0x8(%esp),%esp");
2477 }
2478
2479 static void
2480 i386_emit_pop (void)
2481 {
2482 EMIT_ASM32 (i386_pop,
2483 "pop %eax\n\t"
2484 "pop %ebx");
2485 }
2486
2487 static void
2488 i386_emit_stack_flush (void)
2489 {
2490 EMIT_ASM32 (i386_stack_flush,
2491 "push %ebx\n\t"
2492 "push %eax");
2493 }
2494
2495 static void
2496 i386_emit_zero_ext (int arg)
2497 {
2498 switch (arg)
2499 {
2500 case 8:
2501 EMIT_ASM32 (i386_zero_ext_8,
2502 "and $0xff,%eax\n\t"
2503 "xor %ebx,%ebx");
2504 break;
2505 case 16:
2506 EMIT_ASM32 (i386_zero_ext_16,
2507 "and $0xffff,%eax\n\t"
2508 "xor %ebx,%ebx");
2509 break;
2510 case 32:
2511 EMIT_ASM32 (i386_zero_ext_32,
2512 "xor %ebx,%ebx");
2513 break;
2514 default:
2515 emit_error = 1;
2516 }
2517 }
2518
2519 static void
2520 i386_emit_swap (void)
2521 {
2522 EMIT_ASM32 (i386_swap,
2523 "mov %eax,%ecx\n\t"
2524 "mov %ebx,%edx\n\t"
2525 "pop %eax\n\t"
2526 "pop %ebx\n\t"
2527 "push %edx\n\t"
2528 "push %ecx");
2529 }
2530
2531 static void
2532 i386_emit_stack_adjust (int n)
2533 {
2534 unsigned char buf[16];
2535 int i;
2536 CORE_ADDR buildaddr = current_insn_ptr;
2537
2538 i = 0;
2539 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2540 buf[i++] = 0x64;
2541 buf[i++] = 0x24;
2542 buf[i++] = n * 8;
2543 append_insns (&buildaddr, i, buf);
2544 current_insn_ptr = buildaddr;
2545 }
2546
2547 /* FN's prototype is `LONGEST(*fn)(int)'. */
2548
2549 static void
2550 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2551 {
2552 unsigned char buf[16];
2553 int i;
2554 CORE_ADDR buildaddr;
2555
2556 EMIT_ASM32 (i386_int_call_1_a,
2557 /* Reserve a bit of stack space. */
2558 "sub $0x8,%esp");
2559 /* Put the one argument on the stack. */
2560 buildaddr = current_insn_ptr;
2561 i = 0;
2562 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2563 buf[i++] = 0x04;
2564 buf[i++] = 0x24;
2565 memcpy (&buf[i], &arg1, sizeof (arg1));
2566 i += 4;
2567 append_insns (&buildaddr, i, buf);
2568 current_insn_ptr = buildaddr;
2569 i386_emit_call (fn);
2570 EMIT_ASM32 (i386_int_call_1_c,
2571 "mov %edx,%ebx\n\t"
2572 "lea 0x8(%esp),%esp");
2573 }
2574
2575 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2576
2577 static void
2578 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2579 {
2580 unsigned char buf[16];
2581 int i;
2582 CORE_ADDR buildaddr;
2583
2584 EMIT_ASM32 (i386_void_call_2_a,
2585 /* Preserve %eax only; we don't have to worry about %ebx. */
2586 "push %eax\n\t"
2587 /* Reserve a bit of stack space for arguments. */
2588 "sub $0x10,%esp\n\t"
2589 /* Copy "top" to the second argument position. (Note that
2590 we can't assume function won't scribble on its
2591 arguments, so don't try to restore from this.) */
2592 "mov %eax,4(%esp)\n\t"
2593 "mov %ebx,8(%esp)");
2594 /* Put the first argument on the stack. */
2595 buildaddr = current_insn_ptr;
2596 i = 0;
2597 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2598 buf[i++] = 0x04;
2599 buf[i++] = 0x24;
2600 memcpy (&buf[i], &arg1, sizeof (arg1));
2601 i += 4;
2602 append_insns (&buildaddr, i, buf);
2603 current_insn_ptr = buildaddr;
2604 i386_emit_call (fn);
2605 EMIT_ASM32 (i386_void_call_2_b,
2606 "lea 0x10(%esp),%esp\n\t"
2607 /* Restore original stack top. */
2608 "pop %eax");
2609 }
2610
2611
2612 void
2613 i386_emit_eq_goto (int *offset_p, int *size_p)
2614 {
2615 EMIT_ASM32 (eq,
2616 /* Check low half first, more likely to be decider */
2617 "cmpl %eax,(%esp)\n\t"
2618 "jne .Leq_fallthru\n\t"
2619 "cmpl %ebx,4(%esp)\n\t"
2620 "jne .Leq_fallthru\n\t"
2621 "lea 0x8(%esp),%esp\n\t"
2622 "pop %eax\n\t"
2623 "pop %ebx\n\t"
2624 /* jmp, but don't trust the assembler to choose the right jump */
2625 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2626 ".Leq_fallthru:\n\t"
2627 "lea 0x8(%esp),%esp\n\t"
2628 "pop %eax\n\t"
2629 "pop %ebx");
2630
2631 if (offset_p)
2632 *offset_p = 18;
2633 if (size_p)
2634 *size_p = 4;
2635 }
2636
2637 void
2638 i386_emit_ne_goto (int *offset_p, int *size_p)
2639 {
2640 EMIT_ASM32 (ne,
2641 /* Check low half first, more likely to be decider */
2642 "cmpl %eax,(%esp)\n\t"
2643 "jne .Lne_jump\n\t"
2644 "cmpl %ebx,4(%esp)\n\t"
2645 "je .Lne_fallthru\n\t"
2646 ".Lne_jump:\n\t"
2647 "lea 0x8(%esp),%esp\n\t"
2648 "pop %eax\n\t"
2649 "pop %ebx\n\t"
2650 /* jmp, but don't trust the assembler to choose the right jump */
2651 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2652 ".Lne_fallthru:\n\t"
2653 "lea 0x8(%esp),%esp\n\t"
2654 "pop %eax\n\t"
2655 "pop %ebx");
2656
2657 if (offset_p)
2658 *offset_p = 18;
2659 if (size_p)
2660 *size_p = 4;
2661 }
2662
2663 void
2664 i386_emit_lt_goto (int *offset_p, int *size_p)
2665 {
2666 EMIT_ASM32 (lt,
2667 "cmpl %ebx,4(%esp)\n\t"
2668 "jl .Llt_jump\n\t"
2669 "jne .Llt_fallthru\n\t"
2670 "cmpl %eax,(%esp)\n\t"
2671 "jnl .Llt_fallthru\n\t"
2672 ".Llt_jump:\n\t"
2673 "lea 0x8(%esp),%esp\n\t"
2674 "pop %eax\n\t"
2675 "pop %ebx\n\t"
2676 /* jmp, but don't trust the assembler to choose the right jump */
2677 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2678 ".Llt_fallthru:\n\t"
2679 "lea 0x8(%esp),%esp\n\t"
2680 "pop %eax\n\t"
2681 "pop %ebx");
2682
2683 if (offset_p)
2684 *offset_p = 20;
2685 if (size_p)
2686 *size_p = 4;
2687 }
2688
2689 void
2690 i386_emit_le_goto (int *offset_p, int *size_p)
2691 {
2692 EMIT_ASM32 (le,
2693 "cmpl %ebx,4(%esp)\n\t"
2694 "jle .Lle_jump\n\t"
2695 "jne .Lle_fallthru\n\t"
2696 "cmpl %eax,(%esp)\n\t"
2697 "jnle .Lle_fallthru\n\t"
2698 ".Lle_jump:\n\t"
2699 "lea 0x8(%esp),%esp\n\t"
2700 "pop %eax\n\t"
2701 "pop %ebx\n\t"
2702 /* jmp, but don't trust the assembler to choose the right jump */
2703 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2704 ".Lle_fallthru:\n\t"
2705 "lea 0x8(%esp),%esp\n\t"
2706 "pop %eax\n\t"
2707 "pop %ebx");
2708
2709 if (offset_p)
2710 *offset_p = 20;
2711 if (size_p)
2712 *size_p = 4;
2713 }
2714
2715 void
2716 i386_emit_gt_goto (int *offset_p, int *size_p)
2717 {
2718 EMIT_ASM32 (gt,
2719 "cmpl %ebx,4(%esp)\n\t"
2720 "jg .Lgt_jump\n\t"
2721 "jne .Lgt_fallthru\n\t"
2722 "cmpl %eax,(%esp)\n\t"
2723 "jng .Lgt_fallthru\n\t"
2724 ".Lgt_jump:\n\t"
2725 "lea 0x8(%esp),%esp\n\t"
2726 "pop %eax\n\t"
2727 "pop %ebx\n\t"
2728 /* jmp, but don't trust the assembler to choose the right jump */
2729 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2730 ".Lgt_fallthru:\n\t"
2731 "lea 0x8(%esp),%esp\n\t"
2732 "pop %eax\n\t"
2733 "pop %ebx");
2734
2735 if (offset_p)
2736 *offset_p = 20;
2737 if (size_p)
2738 *size_p = 4;
2739 }
2740
2741 void
2742 i386_emit_ge_goto (int *offset_p, int *size_p)
2743 {
2744 EMIT_ASM32 (ge,
2745 "cmpl %ebx,4(%esp)\n\t"
2746 "jge .Lge_jump\n\t"
2747 "jne .Lge_fallthru\n\t"
2748 "cmpl %eax,(%esp)\n\t"
2749 "jnge .Lge_fallthru\n\t"
2750 ".Lge_jump:\n\t"
2751 "lea 0x8(%esp),%esp\n\t"
2752 "pop %eax\n\t"
2753 "pop %ebx\n\t"
2754 /* jmp, but don't trust the assembler to choose the right jump */
2755 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2756 ".Lge_fallthru:\n\t"
2757 "lea 0x8(%esp),%esp\n\t"
2758 "pop %eax\n\t"
2759 "pop %ebx");
2760
2761 if (offset_p)
2762 *offset_p = 20;
2763 if (size_p)
2764 *size_p = 4;
2765 }
2766
2767 struct emit_ops i386_emit_ops =
2768 {
2769 i386_emit_prologue,
2770 i386_emit_epilogue,
2771 i386_emit_add,
2772 i386_emit_sub,
2773 i386_emit_mul,
2774 i386_emit_lsh,
2775 i386_emit_rsh_signed,
2776 i386_emit_rsh_unsigned,
2777 i386_emit_ext,
2778 i386_emit_log_not,
2779 i386_emit_bit_and,
2780 i386_emit_bit_or,
2781 i386_emit_bit_xor,
2782 i386_emit_bit_not,
2783 i386_emit_equal,
2784 i386_emit_less_signed,
2785 i386_emit_less_unsigned,
2786 i386_emit_ref,
2787 i386_emit_if_goto,
2788 i386_emit_goto,
2789 i386_write_goto_address,
2790 i386_emit_const,
2791 i386_emit_call,
2792 i386_emit_reg,
2793 i386_emit_pop,
2794 i386_emit_stack_flush,
2795 i386_emit_zero_ext,
2796 i386_emit_swap,
2797 i386_emit_stack_adjust,
2798 i386_emit_int_call_1,
2799 i386_emit_void_call_2,
2800 i386_emit_eq_goto,
2801 i386_emit_ne_goto,
2802 i386_emit_lt_goto,
2803 i386_emit_le_goto,
2804 i386_emit_gt_goto,
2805 i386_emit_ge_goto
2806 };
2807
2808
2809 static struct emit_ops *
2810 x86_emit_ops (void)
2811 {
2812 #ifdef __x86_64__
2813 if (is_64bit_tdesc ())
2814 return &amd64_emit_ops;
2815 else
2816 #endif
2817 return &i386_emit_ops;
2818 }
2819
2820 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
2821
2822 static const gdb_byte *
2823 x86_sw_breakpoint_from_kind (int kind, int *size)
2824 {
2825 *size = x86_breakpoint_len;
2826 return x86_breakpoint;
2827 }
2828
2829 static int
2830 x86_supports_range_stepping (void)
2831 {
2832 return 1;
2833 }
2834
2835 /* Implementation of linux_target_ops method "supports_hardware_single_step".
2836 */
2837
2838 static int
2839 x86_supports_hardware_single_step (void)
2840 {
2841 return 1;
2842 }
2843
2844 static int
2845 x86_get_ipa_tdesc_idx (void)
2846 {
2847 struct regcache *regcache = get_thread_regcache (current_thread, 0);
2848 const struct target_desc *tdesc = regcache->tdesc;
2849
2850 #ifdef __x86_64__
2851 if (tdesc == tdesc_amd64_linux || tdesc == tdesc_amd64_linux_no_xml
2852 || tdesc == tdesc_x32_linux)
2853 return X86_TDESC_SSE;
2854 if (tdesc == tdesc_amd64_avx_linux || tdesc == tdesc_x32_avx_linux)
2855 return X86_TDESC_AVX;
2856 if (tdesc == tdesc_amd64_mpx_linux)
2857 return X86_TDESC_MPX;
2858 if (tdesc == tdesc_amd64_avx_mpx_linux)
2859 return X86_TDESC_AVX_MPX;
2860 if (tdesc == tdesc_amd64_avx512_linux || tdesc == tdesc_x32_avx512_linux)
2861 return X86_TDESC_AVX512;
2862 #endif
2863
2864 if (tdesc == tdesc_i386_mmx_linux)
2865 return X86_TDESC_MMX;
2866 if (tdesc == tdesc_i386_linux || tdesc == tdesc_i386_linux_no_xml)
2867 return X86_TDESC_SSE;
2868 if (tdesc == tdesc_i386_avx_linux)
2869 return X86_TDESC_AVX;
2870 if (tdesc == tdesc_i386_mpx_linux)
2871 return X86_TDESC_MPX;
2872 if (tdesc == tdesc_i386_avx_mpx_linux)
2873 return X86_TDESC_AVX_MPX;
2874 if (tdesc == tdesc_i386_avx512_linux)
2875 return X86_TDESC_AVX512;
2876
2877 return 0;
2878 }
2879
2880 /* This is initialized assuming an amd64 target.
2881 x86_arch_setup will correct it for i386 or amd64 targets. */
2882
2883 struct linux_target_ops the_low_target =
2884 {
2885 x86_arch_setup,
2886 x86_linux_regs_info,
2887 x86_cannot_fetch_register,
2888 x86_cannot_store_register,
2889 NULL, /* fetch_register */
2890 x86_get_pc,
2891 x86_set_pc,
2892 NULL, /* breakpoint_kind_from_pc */
2893 x86_sw_breakpoint_from_kind,
2894 NULL,
2895 1,
2896 x86_breakpoint_at,
2897 x86_supports_z_point_type,
2898 x86_insert_point,
2899 x86_remove_point,
2900 x86_stopped_by_watchpoint,
2901 x86_stopped_data_address,
2902 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2903 native i386 case (no registers smaller than an xfer unit), and are not
2904 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2905 NULL,
2906 NULL,
2907 /* need to fix up i386 siginfo if host is amd64 */
2908 x86_siginfo_fixup,
2909 x86_linux_new_process,
2910 x86_linux_new_thread,
2911 x86_linux_new_fork,
2912 x86_linux_prepare_to_resume,
2913 x86_linux_process_qsupported,
2914 x86_supports_tracepoints,
2915 x86_get_thread_area,
2916 x86_install_fast_tracepoint_jump_pad,
2917 x86_emit_ops,
2918 x86_get_min_fast_tracepoint_insn_len,
2919 x86_supports_range_stepping,
2920 NULL, /* breakpoint_kind_from_current_state */
2921 x86_supports_hardware_single_step,
2922 x86_get_syscall_trapinfo,
2923 x86_get_ipa_tdesc_idx,
2924 };
2925
2926 void
2927 initialize_low_arch (void)
2928 {
2929 /* Initialize the Linux target descriptions. */
2930 #ifdef __x86_64__
2931 init_registers_amd64_linux ();
2932 init_registers_amd64_avx_linux ();
2933 init_registers_amd64_avx512_linux ();
2934 init_registers_amd64_mpx_linux ();
2935 init_registers_amd64_avx_mpx_linux ();
2936
2937 init_registers_x32_linux ();
2938 init_registers_x32_avx_linux ();
2939 init_registers_x32_avx512_linux ();
2940
2941 tdesc_amd64_linux_no_xml = XNEW (struct target_desc);
2942 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
2943 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
2944 #endif
2945 init_registers_i386_linux ();
2946 init_registers_i386_mmx_linux ();
2947 init_registers_i386_avx_linux ();
2948 init_registers_i386_avx512_linux ();
2949 init_registers_i386_mpx_linux ();
2950 init_registers_i386_avx_mpx_linux ();
2951
2952 tdesc_i386_linux_no_xml = XNEW (struct target_desc);
2953 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
2954 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
2955
2956 initialize_regsets_info (&x86_regsets_info);
2957 }