]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-x86-low.c
GDB copyright headers update after running GDB's copyright.py script.
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2016 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "x86-xstate.h"
28 #include "nat/gdb_ptrace.h"
29
30 #include "gdb_proc_service.h"
31 /* Don't include elf/common.h if linux/elf.h got included by
32 gdb_proc_service.h. */
33 #ifndef ELFMAG0
34 #include "elf/common.h"
35 #endif
36
37 #include "agent.h"
38 #include "tdesc.h"
39 #include "tracepoint.h"
40 #include "ax.h"
41 #include "nat/linux-nat.h"
42 #include "nat/x86-linux.h"
43 #include "nat/x86-linux-dregs.h"
44
45 #ifdef __x86_64__
46 /* Defined in auto-generated file amd64-linux.c. */
47 void init_registers_amd64_linux (void);
48 extern const struct target_desc *tdesc_amd64_linux;
49
50 /* Defined in auto-generated file amd64-avx-linux.c. */
51 void init_registers_amd64_avx_linux (void);
52 extern const struct target_desc *tdesc_amd64_avx_linux;
53
54 /* Defined in auto-generated file amd64-avx512-linux.c. */
55 void init_registers_amd64_avx512_linux (void);
56 extern const struct target_desc *tdesc_amd64_avx512_linux;
57
58 /* Defined in auto-generated file amd64-mpx-linux.c. */
59 void init_registers_amd64_mpx_linux (void);
60 extern const struct target_desc *tdesc_amd64_mpx_linux;
61
62 /* Defined in auto-generated file x32-linux.c. */
63 void init_registers_x32_linux (void);
64 extern const struct target_desc *tdesc_x32_linux;
65
66 /* Defined in auto-generated file x32-avx-linux.c. */
67 void init_registers_x32_avx_linux (void);
68 extern const struct target_desc *tdesc_x32_avx_linux;
69
70 /* Defined in auto-generated file x32-avx512-linux.c. */
71 void init_registers_x32_avx512_linux (void);
72 extern const struct target_desc *tdesc_x32_avx512_linux;
73
74 #endif
75
76 /* Defined in auto-generated file i386-linux.c. */
77 void init_registers_i386_linux (void);
78 extern const struct target_desc *tdesc_i386_linux;
79
80 /* Defined in auto-generated file i386-mmx-linux.c. */
81 void init_registers_i386_mmx_linux (void);
82 extern const struct target_desc *tdesc_i386_mmx_linux;
83
84 /* Defined in auto-generated file i386-avx-linux.c. */
85 void init_registers_i386_avx_linux (void);
86 extern const struct target_desc *tdesc_i386_avx_linux;
87
88 /* Defined in auto-generated file i386-avx512-linux.c. */
89 void init_registers_i386_avx512_linux (void);
90 extern const struct target_desc *tdesc_i386_avx512_linux;
91
92 /* Defined in auto-generated file i386-mpx-linux.c. */
93 void init_registers_i386_mpx_linux (void);
94 extern const struct target_desc *tdesc_i386_mpx_linux;
95
96 #ifdef __x86_64__
97 static struct target_desc *tdesc_amd64_linux_no_xml;
98 #endif
99 static struct target_desc *tdesc_i386_linux_no_xml;
100
101
102 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
103 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
104
105 /* Backward compatibility for gdb without XML support. */
106
107 static const char *xmltarget_i386_linux_no_xml = "@<target>\
108 <architecture>i386</architecture>\
109 <osabi>GNU/Linux</osabi>\
110 </target>";
111
112 #ifdef __x86_64__
113 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
114 <architecture>i386:x86-64</architecture>\
115 <osabi>GNU/Linux</osabi>\
116 </target>";
117 #endif
118
119 #include <sys/reg.h>
120 #include <sys/procfs.h>
121 #include "nat/gdb_ptrace.h"
122 #include <sys/uio.h>
123
124 #ifndef PTRACE_GET_THREAD_AREA
125 #define PTRACE_GET_THREAD_AREA 25
126 #endif
127
128 /* This definition comes from prctl.h, but some kernels may not have it. */
129 #ifndef PTRACE_ARCH_PRCTL
130 #define PTRACE_ARCH_PRCTL 30
131 #endif
132
133 /* The following definitions come from prctl.h, but may be absent
134 for certain configurations. */
135 #ifndef ARCH_GET_FS
136 #define ARCH_SET_GS 0x1001
137 #define ARCH_SET_FS 0x1002
138 #define ARCH_GET_FS 0x1003
139 #define ARCH_GET_GS 0x1004
140 #endif
141
142 /* Per-process arch-specific data we want to keep. */
143
144 struct arch_process_info
145 {
146 struct x86_debug_reg_state debug_reg_state;
147 };
148
149 #ifdef __x86_64__
150
151 /* Mapping between the general-purpose registers in `struct user'
152 format and GDB's register array layout.
153 Note that the transfer layout uses 64-bit regs. */
154 static /*const*/ int i386_regmap[] =
155 {
156 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
157 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
158 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
159 DS * 8, ES * 8, FS * 8, GS * 8
160 };
161
162 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
163
164 /* So code below doesn't have to care, i386 or amd64. */
165 #define ORIG_EAX ORIG_RAX
166 #define REGSIZE 8
167
168 static const int x86_64_regmap[] =
169 {
170 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
171 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
172 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
173 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
174 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
175 DS * 8, ES * 8, FS * 8, GS * 8,
176 -1, -1, -1, -1, -1, -1, -1, -1,
177 -1, -1, -1, -1, -1, -1, -1, -1,
178 -1, -1, -1, -1, -1, -1, -1, -1,
179 -1,
180 -1, -1, -1, -1, -1, -1, -1, -1,
181 ORIG_RAX * 8,
182 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
183 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
184 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
185 -1, -1, -1, -1, -1, -1, -1, -1,
186 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
187 -1, -1, -1, -1, -1, -1, -1, -1,
188 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
189 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
192 -1, -1, -1, -1, -1, -1, -1, -1
193 };
194
195 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
196 #define X86_64_USER_REGS (GS + 1)
197
198 #else /* ! __x86_64__ */
199
200 /* Mapping between the general-purpose registers in `struct user'
201 format and GDB's register array layout. */
202 static /*const*/ int i386_regmap[] =
203 {
204 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
205 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
206 EIP * 4, EFL * 4, CS * 4, SS * 4,
207 DS * 4, ES * 4, FS * 4, GS * 4
208 };
209
210 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
211
212 #define REGSIZE 4
213
214 #endif
215
216 #ifdef __x86_64__
217
218 /* Returns true if the current inferior belongs to a x86-64 process,
219 per the tdesc. */
220
221 static int
222 is_64bit_tdesc (void)
223 {
224 struct regcache *regcache = get_thread_regcache (current_thread, 0);
225
226 return register_size (regcache->tdesc, 0) == 8;
227 }
228
229 #endif
230
231 \f
232 /* Called by libthread_db. */
233
234 ps_err_e
235 ps_get_thread_area (const struct ps_prochandle *ph,
236 lwpid_t lwpid, int idx, void **base)
237 {
238 #ifdef __x86_64__
239 int use_64bit = is_64bit_tdesc ();
240
241 if (use_64bit)
242 {
243 switch (idx)
244 {
245 case FS:
246 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
247 return PS_OK;
248 break;
249 case GS:
250 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
251 return PS_OK;
252 break;
253 default:
254 return PS_BADADDR;
255 }
256 return PS_ERR;
257 }
258 #endif
259
260 {
261 unsigned int desc[4];
262
263 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
264 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
265 return PS_ERR;
266
267 /* Ensure we properly extend the value to 64-bits for x86_64. */
268 *base = (void *) (uintptr_t) desc[1];
269 return PS_OK;
270 }
271 }
272
273 /* Get the thread area address. This is used to recognize which
274 thread is which when tracing with the in-process agent library. We
275 don't read anything from the address, and treat it as opaque; it's
276 the address itself that we assume is unique per-thread. */
277
278 static int
279 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
280 {
281 #ifdef __x86_64__
282 int use_64bit = is_64bit_tdesc ();
283
284 if (use_64bit)
285 {
286 void *base;
287 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
288 {
289 *addr = (CORE_ADDR) (uintptr_t) base;
290 return 0;
291 }
292
293 return -1;
294 }
295 #endif
296
297 {
298 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
299 struct thread_info *thr = get_lwp_thread (lwp);
300 struct regcache *regcache = get_thread_regcache (thr, 1);
301 unsigned int desc[4];
302 ULONGEST gs = 0;
303 const int reg_thread_area = 3; /* bits to scale down register value. */
304 int idx;
305
306 collect_register_by_name (regcache, "gs", &gs);
307
308 idx = gs >> reg_thread_area;
309
310 if (ptrace (PTRACE_GET_THREAD_AREA,
311 lwpid_of (thr),
312 (void *) (long) idx, (unsigned long) &desc) < 0)
313 return -1;
314
315 *addr = desc[1];
316 return 0;
317 }
318 }
319
320
321 \f
322 static int
323 x86_cannot_store_register (int regno)
324 {
325 #ifdef __x86_64__
326 if (is_64bit_tdesc ())
327 return 0;
328 #endif
329
330 return regno >= I386_NUM_REGS;
331 }
332
333 static int
334 x86_cannot_fetch_register (int regno)
335 {
336 #ifdef __x86_64__
337 if (is_64bit_tdesc ())
338 return 0;
339 #endif
340
341 return regno >= I386_NUM_REGS;
342 }
343
344 static void
345 x86_fill_gregset (struct regcache *regcache, void *buf)
346 {
347 int i;
348
349 #ifdef __x86_64__
350 if (register_size (regcache->tdesc, 0) == 8)
351 {
352 for (i = 0; i < X86_64_NUM_REGS; i++)
353 if (x86_64_regmap[i] != -1)
354 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
355 return;
356 }
357
358 /* 32-bit inferior registers need to be zero-extended.
359 Callers would read uninitialized memory otherwise. */
360 memset (buf, 0x00, X86_64_USER_REGS * 8);
361 #endif
362
363 for (i = 0; i < I386_NUM_REGS; i++)
364 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
365
366 collect_register_by_name (regcache, "orig_eax",
367 ((char *) buf) + ORIG_EAX * REGSIZE);
368 }
369
370 static void
371 x86_store_gregset (struct regcache *regcache, const void *buf)
372 {
373 int i;
374
375 #ifdef __x86_64__
376 if (register_size (regcache->tdesc, 0) == 8)
377 {
378 for (i = 0; i < X86_64_NUM_REGS; i++)
379 if (x86_64_regmap[i] != -1)
380 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
381 return;
382 }
383 #endif
384
385 for (i = 0; i < I386_NUM_REGS; i++)
386 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
387
388 supply_register_by_name (regcache, "orig_eax",
389 ((char *) buf) + ORIG_EAX * REGSIZE);
390 }
391
392 static void
393 x86_fill_fpregset (struct regcache *regcache, void *buf)
394 {
395 #ifdef __x86_64__
396 i387_cache_to_fxsave (regcache, buf);
397 #else
398 i387_cache_to_fsave (regcache, buf);
399 #endif
400 }
401
402 static void
403 x86_store_fpregset (struct regcache *regcache, const void *buf)
404 {
405 #ifdef __x86_64__
406 i387_fxsave_to_cache (regcache, buf);
407 #else
408 i387_fsave_to_cache (regcache, buf);
409 #endif
410 }
411
412 #ifndef __x86_64__
413
414 static void
415 x86_fill_fpxregset (struct regcache *regcache, void *buf)
416 {
417 i387_cache_to_fxsave (regcache, buf);
418 }
419
420 static void
421 x86_store_fpxregset (struct regcache *regcache, const void *buf)
422 {
423 i387_fxsave_to_cache (regcache, buf);
424 }
425
426 #endif
427
428 static void
429 x86_fill_xstateregset (struct regcache *regcache, void *buf)
430 {
431 i387_cache_to_xsave (regcache, buf);
432 }
433
434 static void
435 x86_store_xstateregset (struct regcache *regcache, const void *buf)
436 {
437 i387_xsave_to_cache (regcache, buf);
438 }
439
440 /* ??? The non-biarch i386 case stores all the i387 regs twice.
441 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
442 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
443 doesn't work. IWBN to avoid the duplication in the case where it
444 does work. Maybe the arch_setup routine could check whether it works
445 and update the supported regsets accordingly. */
446
447 static struct regset_info x86_regsets[] =
448 {
449 #ifdef HAVE_PTRACE_GETREGS
450 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
451 GENERAL_REGS,
452 x86_fill_gregset, x86_store_gregset },
453 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
454 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
455 # ifndef __x86_64__
456 # ifdef HAVE_PTRACE_GETFPXREGS
457 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
458 EXTENDED_REGS,
459 x86_fill_fpxregset, x86_store_fpxregset },
460 # endif
461 # endif
462 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
463 FP_REGS,
464 x86_fill_fpregset, x86_store_fpregset },
465 #endif /* HAVE_PTRACE_GETREGS */
466 NULL_REGSET
467 };
468
469 static CORE_ADDR
470 x86_get_pc (struct regcache *regcache)
471 {
472 int use_64bit = register_size (regcache->tdesc, 0) == 8;
473
474 if (use_64bit)
475 {
476 unsigned long pc;
477 collect_register_by_name (regcache, "rip", &pc);
478 return (CORE_ADDR) pc;
479 }
480 else
481 {
482 unsigned int pc;
483 collect_register_by_name (regcache, "eip", &pc);
484 return (CORE_ADDR) pc;
485 }
486 }
487
488 static void
489 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
490 {
491 int use_64bit = register_size (regcache->tdesc, 0) == 8;
492
493 if (use_64bit)
494 {
495 unsigned long newpc = pc;
496 supply_register_by_name (regcache, "rip", &newpc);
497 }
498 else
499 {
500 unsigned int newpc = pc;
501 supply_register_by_name (regcache, "eip", &newpc);
502 }
503 }
504 \f
505 static const gdb_byte x86_breakpoint[] = { 0xCC };
506 #define x86_breakpoint_len 1
507
508 static int
509 x86_breakpoint_at (CORE_ADDR pc)
510 {
511 unsigned char c;
512
513 (*the_target->read_memory) (pc, &c, 1);
514 if (c == 0xCC)
515 return 1;
516
517 return 0;
518 }
519 \f
520 /* Low-level function vector. */
521 struct x86_dr_low_type x86_dr_low =
522 {
523 x86_linux_dr_set_control,
524 x86_linux_dr_set_addr,
525 x86_linux_dr_get_addr,
526 x86_linux_dr_get_status,
527 x86_linux_dr_get_control,
528 sizeof (void *),
529 };
530 \f
531 /* Breakpoint/Watchpoint support. */
532
533 static int
534 x86_supports_z_point_type (char z_type)
535 {
536 switch (z_type)
537 {
538 case Z_PACKET_SW_BP:
539 case Z_PACKET_HW_BP:
540 case Z_PACKET_WRITE_WP:
541 case Z_PACKET_ACCESS_WP:
542 return 1;
543 default:
544 return 0;
545 }
546 }
547
548 static int
549 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
550 int size, struct raw_breakpoint *bp)
551 {
552 struct process_info *proc = current_process ();
553
554 switch (type)
555 {
556 case raw_bkpt_type_hw:
557 case raw_bkpt_type_write_wp:
558 case raw_bkpt_type_access_wp:
559 {
560 enum target_hw_bp_type hw_type
561 = raw_bkpt_type_to_target_hw_bp_type (type);
562 struct x86_debug_reg_state *state
563 = &proc->priv->arch_private->debug_reg_state;
564
565 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
566 }
567
568 default:
569 /* Unsupported. */
570 return 1;
571 }
572 }
573
574 static int
575 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
576 int size, struct raw_breakpoint *bp)
577 {
578 struct process_info *proc = current_process ();
579
580 switch (type)
581 {
582 case raw_bkpt_type_hw:
583 case raw_bkpt_type_write_wp:
584 case raw_bkpt_type_access_wp:
585 {
586 enum target_hw_bp_type hw_type
587 = raw_bkpt_type_to_target_hw_bp_type (type);
588 struct x86_debug_reg_state *state
589 = &proc->priv->arch_private->debug_reg_state;
590
591 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
592 }
593 default:
594 /* Unsupported. */
595 return 1;
596 }
597 }
598
599 static int
600 x86_stopped_by_watchpoint (void)
601 {
602 struct process_info *proc = current_process ();
603 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
604 }
605
606 static CORE_ADDR
607 x86_stopped_data_address (void)
608 {
609 struct process_info *proc = current_process ();
610 CORE_ADDR addr;
611 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
612 &addr))
613 return addr;
614 return 0;
615 }
616 \f
617 /* Called when a new process is created. */
618
619 static struct arch_process_info *
620 x86_linux_new_process (void)
621 {
622 struct arch_process_info *info = XCNEW (struct arch_process_info);
623
624 x86_low_init_dregs (&info->debug_reg_state);
625
626 return info;
627 }
628
629 /* Target routine for linux_new_fork. */
630
631 static void
632 x86_linux_new_fork (struct process_info *parent, struct process_info *child)
633 {
634 /* These are allocated by linux_add_process. */
635 gdb_assert (parent->priv != NULL
636 && parent->priv->arch_private != NULL);
637 gdb_assert (child->priv != NULL
638 && child->priv->arch_private != NULL);
639
640 /* Linux kernel before 2.6.33 commit
641 72f674d203cd230426437cdcf7dd6f681dad8b0d
642 will inherit hardware debug registers from parent
643 on fork/vfork/clone. Newer Linux kernels create such tasks with
644 zeroed debug registers.
645
646 GDB core assumes the child inherits the watchpoints/hw
647 breakpoints of the parent, and will remove them all from the
648 forked off process. Copy the debug registers mirrors into the
649 new process so that all breakpoints and watchpoints can be
650 removed together. The debug registers mirror will become zeroed
651 in the end before detaching the forked off process, thus making
652 this compatible with older Linux kernels too. */
653
654 *child->priv->arch_private = *parent->priv->arch_private;
655 }
656
657 /* See nat/x86-dregs.h. */
658
659 struct x86_debug_reg_state *
660 x86_debug_reg_state (pid_t pid)
661 {
662 struct process_info *proc = find_process_pid (pid);
663
664 return &proc->priv->arch_private->debug_reg_state;
665 }
666 \f
667 /* When GDBSERVER is built as a 64-bit application on linux, the
668 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
669 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
670 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
671 conversion in-place ourselves. */
672
673 /* These types below (compat_*) define a siginfo type that is layout
674 compatible with the siginfo type exported by the 32-bit userspace
675 support. */
676
677 #ifdef __x86_64__
678
679 typedef int compat_int_t;
680 typedef unsigned int compat_uptr_t;
681
682 typedef int compat_time_t;
683 typedef int compat_timer_t;
684 typedef int compat_clock_t;
685
686 struct compat_timeval
687 {
688 compat_time_t tv_sec;
689 int tv_usec;
690 };
691
692 typedef union compat_sigval
693 {
694 compat_int_t sival_int;
695 compat_uptr_t sival_ptr;
696 } compat_sigval_t;
697
698 typedef struct compat_siginfo
699 {
700 int si_signo;
701 int si_errno;
702 int si_code;
703
704 union
705 {
706 int _pad[((128 / sizeof (int)) - 3)];
707
708 /* kill() */
709 struct
710 {
711 unsigned int _pid;
712 unsigned int _uid;
713 } _kill;
714
715 /* POSIX.1b timers */
716 struct
717 {
718 compat_timer_t _tid;
719 int _overrun;
720 compat_sigval_t _sigval;
721 } _timer;
722
723 /* POSIX.1b signals */
724 struct
725 {
726 unsigned int _pid;
727 unsigned int _uid;
728 compat_sigval_t _sigval;
729 } _rt;
730
731 /* SIGCHLD */
732 struct
733 {
734 unsigned int _pid;
735 unsigned int _uid;
736 int _status;
737 compat_clock_t _utime;
738 compat_clock_t _stime;
739 } _sigchld;
740
741 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
742 struct
743 {
744 unsigned int _addr;
745 } _sigfault;
746
747 /* SIGPOLL */
748 struct
749 {
750 int _band;
751 int _fd;
752 } _sigpoll;
753 } _sifields;
754 } compat_siginfo_t;
755
756 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
757 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
758
759 typedef struct compat_x32_siginfo
760 {
761 int si_signo;
762 int si_errno;
763 int si_code;
764
765 union
766 {
767 int _pad[((128 / sizeof (int)) - 3)];
768
769 /* kill() */
770 struct
771 {
772 unsigned int _pid;
773 unsigned int _uid;
774 } _kill;
775
776 /* POSIX.1b timers */
777 struct
778 {
779 compat_timer_t _tid;
780 int _overrun;
781 compat_sigval_t _sigval;
782 } _timer;
783
784 /* POSIX.1b signals */
785 struct
786 {
787 unsigned int _pid;
788 unsigned int _uid;
789 compat_sigval_t _sigval;
790 } _rt;
791
792 /* SIGCHLD */
793 struct
794 {
795 unsigned int _pid;
796 unsigned int _uid;
797 int _status;
798 compat_x32_clock_t _utime;
799 compat_x32_clock_t _stime;
800 } _sigchld;
801
802 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
803 struct
804 {
805 unsigned int _addr;
806 } _sigfault;
807
808 /* SIGPOLL */
809 struct
810 {
811 int _band;
812 int _fd;
813 } _sigpoll;
814 } _sifields;
815 } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
816
817 #define cpt_si_pid _sifields._kill._pid
818 #define cpt_si_uid _sifields._kill._uid
819 #define cpt_si_timerid _sifields._timer._tid
820 #define cpt_si_overrun _sifields._timer._overrun
821 #define cpt_si_status _sifields._sigchld._status
822 #define cpt_si_utime _sifields._sigchld._utime
823 #define cpt_si_stime _sifields._sigchld._stime
824 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
825 #define cpt_si_addr _sifields._sigfault._addr
826 #define cpt_si_band _sifields._sigpoll._band
827 #define cpt_si_fd _sifields._sigpoll._fd
828
829 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
830 In their place is si_timer1,si_timer2. */
831 #ifndef si_timerid
832 #define si_timerid si_timer1
833 #endif
834 #ifndef si_overrun
835 #define si_overrun si_timer2
836 #endif
837
838 static void
839 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
840 {
841 memset (to, 0, sizeof (*to));
842
843 to->si_signo = from->si_signo;
844 to->si_errno = from->si_errno;
845 to->si_code = from->si_code;
846
847 if (to->si_code == SI_TIMER)
848 {
849 to->cpt_si_timerid = from->si_timerid;
850 to->cpt_si_overrun = from->si_overrun;
851 to->cpt_si_ptr = (intptr_t) from->si_ptr;
852 }
853 else if (to->si_code == SI_USER)
854 {
855 to->cpt_si_pid = from->si_pid;
856 to->cpt_si_uid = from->si_uid;
857 }
858 else if (to->si_code < 0)
859 {
860 to->cpt_si_pid = from->si_pid;
861 to->cpt_si_uid = from->si_uid;
862 to->cpt_si_ptr = (intptr_t) from->si_ptr;
863 }
864 else
865 {
866 switch (to->si_signo)
867 {
868 case SIGCHLD:
869 to->cpt_si_pid = from->si_pid;
870 to->cpt_si_uid = from->si_uid;
871 to->cpt_si_status = from->si_status;
872 to->cpt_si_utime = from->si_utime;
873 to->cpt_si_stime = from->si_stime;
874 break;
875 case SIGILL:
876 case SIGFPE:
877 case SIGSEGV:
878 case SIGBUS:
879 to->cpt_si_addr = (intptr_t) from->si_addr;
880 break;
881 case SIGPOLL:
882 to->cpt_si_band = from->si_band;
883 to->cpt_si_fd = from->si_fd;
884 break;
885 default:
886 to->cpt_si_pid = from->si_pid;
887 to->cpt_si_uid = from->si_uid;
888 to->cpt_si_ptr = (intptr_t) from->si_ptr;
889 break;
890 }
891 }
892 }
893
894 static void
895 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
896 {
897 memset (to, 0, sizeof (*to));
898
899 to->si_signo = from->si_signo;
900 to->si_errno = from->si_errno;
901 to->si_code = from->si_code;
902
903 if (to->si_code == SI_TIMER)
904 {
905 to->si_timerid = from->cpt_si_timerid;
906 to->si_overrun = from->cpt_si_overrun;
907 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
908 }
909 else if (to->si_code == SI_USER)
910 {
911 to->si_pid = from->cpt_si_pid;
912 to->si_uid = from->cpt_si_uid;
913 }
914 else if (to->si_code < 0)
915 {
916 to->si_pid = from->cpt_si_pid;
917 to->si_uid = from->cpt_si_uid;
918 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
919 }
920 else
921 {
922 switch (to->si_signo)
923 {
924 case SIGCHLD:
925 to->si_pid = from->cpt_si_pid;
926 to->si_uid = from->cpt_si_uid;
927 to->si_status = from->cpt_si_status;
928 to->si_utime = from->cpt_si_utime;
929 to->si_stime = from->cpt_si_stime;
930 break;
931 case SIGILL:
932 case SIGFPE:
933 case SIGSEGV:
934 case SIGBUS:
935 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
936 break;
937 case SIGPOLL:
938 to->si_band = from->cpt_si_band;
939 to->si_fd = from->cpt_si_fd;
940 break;
941 default:
942 to->si_pid = from->cpt_si_pid;
943 to->si_uid = from->cpt_si_uid;
944 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
945 break;
946 }
947 }
948 }
949
950 static void
951 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
952 siginfo_t *from)
953 {
954 memset (to, 0, sizeof (*to));
955
956 to->si_signo = from->si_signo;
957 to->si_errno = from->si_errno;
958 to->si_code = from->si_code;
959
960 if (to->si_code == SI_TIMER)
961 {
962 to->cpt_si_timerid = from->si_timerid;
963 to->cpt_si_overrun = from->si_overrun;
964 to->cpt_si_ptr = (intptr_t) from->si_ptr;
965 }
966 else if (to->si_code == SI_USER)
967 {
968 to->cpt_si_pid = from->si_pid;
969 to->cpt_si_uid = from->si_uid;
970 }
971 else if (to->si_code < 0)
972 {
973 to->cpt_si_pid = from->si_pid;
974 to->cpt_si_uid = from->si_uid;
975 to->cpt_si_ptr = (intptr_t) from->si_ptr;
976 }
977 else
978 {
979 switch (to->si_signo)
980 {
981 case SIGCHLD:
982 to->cpt_si_pid = from->si_pid;
983 to->cpt_si_uid = from->si_uid;
984 to->cpt_si_status = from->si_status;
985 to->cpt_si_utime = from->si_utime;
986 to->cpt_si_stime = from->si_stime;
987 break;
988 case SIGILL:
989 case SIGFPE:
990 case SIGSEGV:
991 case SIGBUS:
992 to->cpt_si_addr = (intptr_t) from->si_addr;
993 break;
994 case SIGPOLL:
995 to->cpt_si_band = from->si_band;
996 to->cpt_si_fd = from->si_fd;
997 break;
998 default:
999 to->cpt_si_pid = from->si_pid;
1000 to->cpt_si_uid = from->si_uid;
1001 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1002 break;
1003 }
1004 }
1005 }
1006
1007 static void
1008 siginfo_from_compat_x32_siginfo (siginfo_t *to,
1009 compat_x32_siginfo_t *from)
1010 {
1011 memset (to, 0, sizeof (*to));
1012
1013 to->si_signo = from->si_signo;
1014 to->si_errno = from->si_errno;
1015 to->si_code = from->si_code;
1016
1017 if (to->si_code == SI_TIMER)
1018 {
1019 to->si_timerid = from->cpt_si_timerid;
1020 to->si_overrun = from->cpt_si_overrun;
1021 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1022 }
1023 else if (to->si_code == SI_USER)
1024 {
1025 to->si_pid = from->cpt_si_pid;
1026 to->si_uid = from->cpt_si_uid;
1027 }
1028 else if (to->si_code < 0)
1029 {
1030 to->si_pid = from->cpt_si_pid;
1031 to->si_uid = from->cpt_si_uid;
1032 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1033 }
1034 else
1035 {
1036 switch (to->si_signo)
1037 {
1038 case SIGCHLD:
1039 to->si_pid = from->cpt_si_pid;
1040 to->si_uid = from->cpt_si_uid;
1041 to->si_status = from->cpt_si_status;
1042 to->si_utime = from->cpt_si_utime;
1043 to->si_stime = from->cpt_si_stime;
1044 break;
1045 case SIGILL:
1046 case SIGFPE:
1047 case SIGSEGV:
1048 case SIGBUS:
1049 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1050 break;
1051 case SIGPOLL:
1052 to->si_band = from->cpt_si_band;
1053 to->si_fd = from->cpt_si_fd;
1054 break;
1055 default:
1056 to->si_pid = from->cpt_si_pid;
1057 to->si_uid = from->cpt_si_uid;
1058 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1059 break;
1060 }
1061 }
1062 }
1063
1064 #endif /* __x86_64__ */
1065
1066 /* Convert a native/host siginfo object, into/from the siginfo in the
1067 layout of the inferiors' architecture. Returns true if any
1068 conversion was done; false otherwise. If DIRECTION is 1, then copy
1069 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1070 INF. */
1071
1072 static int
1073 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
1074 {
1075 #ifdef __x86_64__
1076 unsigned int machine;
1077 int tid = lwpid_of (current_thread);
1078 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1079
1080 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1081 if (!is_64bit_tdesc ())
1082 {
1083 gdb_assert (sizeof (siginfo_t) == sizeof (compat_siginfo_t));
1084
1085 if (direction == 0)
1086 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1087 else
1088 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1089
1090 return 1;
1091 }
1092 /* No fixup for native x32 GDB. */
1093 else if (!is_elf64 && sizeof (void *) == 8)
1094 {
1095 gdb_assert (sizeof (siginfo_t) == sizeof (compat_x32_siginfo_t));
1096
1097 if (direction == 0)
1098 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1099 native);
1100 else
1101 siginfo_from_compat_x32_siginfo (native,
1102 (struct compat_x32_siginfo *) inf);
1103
1104 return 1;
1105 }
1106 #endif
1107
1108 return 0;
1109 }
1110 \f
1111 static int use_xml;
1112
1113 /* Format of XSAVE extended state is:
1114 struct
1115 {
1116 fxsave_bytes[0..463]
1117 sw_usable_bytes[464..511]
1118 xstate_hdr_bytes[512..575]
1119 avx_bytes[576..831]
1120 future_state etc
1121 };
1122
1123 Same memory layout will be used for the coredump NT_X86_XSTATE
1124 representing the XSAVE extended state registers.
1125
1126 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1127 extended state mask, which is the same as the extended control register
1128 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1129 together with the mask saved in the xstate_hdr_bytes to determine what
1130 states the processor/OS supports and what state, used or initialized,
1131 the process/thread is in. */
1132 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1133
1134 /* Does the current host support the GETFPXREGS request? The header
1135 file may or may not define it, and even if it is defined, the
1136 kernel will return EIO if it's running on a pre-SSE processor. */
1137 int have_ptrace_getfpxregs =
1138 #ifdef HAVE_PTRACE_GETFPXREGS
1139 -1
1140 #else
1141 0
1142 #endif
1143 ;
1144
1145 /* Get Linux/x86 target description from running target. */
1146
1147 static const struct target_desc *
1148 x86_linux_read_description (void)
1149 {
1150 unsigned int machine;
1151 int is_elf64;
1152 int xcr0_features;
1153 int tid;
1154 static uint64_t xcr0;
1155 struct regset_info *regset;
1156
1157 tid = lwpid_of (current_thread);
1158
1159 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1160
1161 if (sizeof (void *) == 4)
1162 {
1163 if (is_elf64 > 0)
1164 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1165 #ifndef __x86_64__
1166 else if (machine == EM_X86_64)
1167 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1168 #endif
1169 }
1170
1171 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1172 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1173 {
1174 elf_fpxregset_t fpxregs;
1175
1176 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
1177 {
1178 have_ptrace_getfpxregs = 0;
1179 have_ptrace_getregset = 0;
1180 return tdesc_i386_mmx_linux;
1181 }
1182 else
1183 have_ptrace_getfpxregs = 1;
1184 }
1185 #endif
1186
1187 if (!use_xml)
1188 {
1189 x86_xcr0 = X86_XSTATE_SSE_MASK;
1190
1191 /* Don't use XML. */
1192 #ifdef __x86_64__
1193 if (machine == EM_X86_64)
1194 return tdesc_amd64_linux_no_xml;
1195 else
1196 #endif
1197 return tdesc_i386_linux_no_xml;
1198 }
1199
1200 if (have_ptrace_getregset == -1)
1201 {
1202 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1203 struct iovec iov;
1204
1205 iov.iov_base = xstateregs;
1206 iov.iov_len = sizeof (xstateregs);
1207
1208 /* Check if PTRACE_GETREGSET works. */
1209 if (ptrace (PTRACE_GETREGSET, tid,
1210 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1211 have_ptrace_getregset = 0;
1212 else
1213 {
1214 have_ptrace_getregset = 1;
1215
1216 /* Get XCR0 from XSAVE extended state. */
1217 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1218 / sizeof (uint64_t))];
1219
1220 /* Use PTRACE_GETREGSET if it is available. */
1221 for (regset = x86_regsets;
1222 regset->fill_function != NULL; regset++)
1223 if (regset->get_request == PTRACE_GETREGSET)
1224 regset->size = X86_XSTATE_SIZE (xcr0);
1225 else if (regset->type != GENERAL_REGS)
1226 regset->size = 0;
1227 }
1228 }
1229
1230 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1231 xcr0_features = (have_ptrace_getregset
1232 && (xcr0 & X86_XSTATE_ALL_MASK));
1233
1234 if (xcr0_features)
1235 x86_xcr0 = xcr0;
1236
1237 if (machine == EM_X86_64)
1238 {
1239 #ifdef __x86_64__
1240 if (is_elf64)
1241 {
1242 if (xcr0_features)
1243 {
1244 switch (xcr0 & X86_XSTATE_ALL_MASK)
1245 {
1246 case X86_XSTATE_AVX512_MASK:
1247 return tdesc_amd64_avx512_linux;
1248
1249 case X86_XSTATE_MPX_MASK:
1250 return tdesc_amd64_mpx_linux;
1251
1252 case X86_XSTATE_AVX_MASK:
1253 return tdesc_amd64_avx_linux;
1254
1255 default:
1256 return tdesc_amd64_linux;
1257 }
1258 }
1259 else
1260 return tdesc_amd64_linux;
1261 }
1262 else
1263 {
1264 if (xcr0_features)
1265 {
1266 switch (xcr0 & X86_XSTATE_ALL_MASK)
1267 {
1268 case X86_XSTATE_AVX512_MASK:
1269 return tdesc_x32_avx512_linux;
1270
1271 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
1272 case X86_XSTATE_AVX_MASK:
1273 return tdesc_x32_avx_linux;
1274
1275 default:
1276 return tdesc_x32_linux;
1277 }
1278 }
1279 else
1280 return tdesc_x32_linux;
1281 }
1282 #endif
1283 }
1284 else
1285 {
1286 if (xcr0_features)
1287 {
1288 switch (xcr0 & X86_XSTATE_ALL_MASK)
1289 {
1290 case (X86_XSTATE_AVX512_MASK):
1291 return tdesc_i386_avx512_linux;
1292
1293 case (X86_XSTATE_MPX_MASK):
1294 return tdesc_i386_mpx_linux;
1295
1296 case (X86_XSTATE_AVX_MASK):
1297 return tdesc_i386_avx_linux;
1298
1299 default:
1300 return tdesc_i386_linux;
1301 }
1302 }
1303 else
1304 return tdesc_i386_linux;
1305 }
1306
1307 gdb_assert_not_reached ("failed to return tdesc");
1308 }
1309
1310 /* Callback for find_inferior. Stops iteration when a thread with a
1311 given PID is found. */
1312
1313 static int
1314 same_process_callback (struct inferior_list_entry *entry, void *data)
1315 {
1316 int pid = *(int *) data;
1317
1318 return (ptid_get_pid (entry->id) == pid);
1319 }
1320
1321 /* Callback for for_each_inferior. Calls the arch_setup routine for
1322 each process. */
1323
1324 static void
1325 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1326 {
1327 int pid = ptid_get_pid (entry->id);
1328
1329 /* Look up any thread of this processes. */
1330 current_thread
1331 = (struct thread_info *) find_inferior (&all_threads,
1332 same_process_callback, &pid);
1333
1334 the_low_target.arch_setup ();
1335 }
1336
1337 /* Update all the target description of all processes; a new GDB
1338 connected, and it may or not support xml target descriptions. */
1339
1340 static void
1341 x86_linux_update_xmltarget (void)
1342 {
1343 struct thread_info *saved_thread = current_thread;
1344
1345 /* Before changing the register cache's internal layout, flush the
1346 contents of the current valid caches back to the threads, and
1347 release the current regcache objects. */
1348 regcache_release ();
1349
1350 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1351
1352 current_thread = saved_thread;
1353 }
1354
1355 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1356 PTRACE_GETREGSET. */
1357
1358 static void
1359 x86_linux_process_qsupported (char **features, int count)
1360 {
1361 int i;
1362
1363 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1364 with "i386" in qSupported query, it supports x86 XML target
1365 descriptions. */
1366 use_xml = 0;
1367 for (i = 0; i < count; i++)
1368 {
1369 const char *feature = features[i];
1370
1371 if (startswith (feature, "xmlRegisters="))
1372 {
1373 char *copy = xstrdup (feature + 13);
1374 char *p;
1375
1376 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1377 {
1378 if (strcmp (p, "i386") == 0)
1379 {
1380 use_xml = 1;
1381 break;
1382 }
1383 }
1384
1385 free (copy);
1386 }
1387 }
1388 x86_linux_update_xmltarget ();
1389 }
1390
1391 /* Common for x86/x86-64. */
1392
1393 static struct regsets_info x86_regsets_info =
1394 {
1395 x86_regsets, /* regsets */
1396 0, /* num_regsets */
1397 NULL, /* disabled_regsets */
1398 };
1399
1400 #ifdef __x86_64__
1401 static struct regs_info amd64_linux_regs_info =
1402 {
1403 NULL, /* regset_bitmap */
1404 NULL, /* usrregs_info */
1405 &x86_regsets_info
1406 };
1407 #endif
1408 static struct usrregs_info i386_linux_usrregs_info =
1409 {
1410 I386_NUM_REGS,
1411 i386_regmap,
1412 };
1413
1414 static struct regs_info i386_linux_regs_info =
1415 {
1416 NULL, /* regset_bitmap */
1417 &i386_linux_usrregs_info,
1418 &x86_regsets_info
1419 };
1420
1421 const struct regs_info *
1422 x86_linux_regs_info (void)
1423 {
1424 #ifdef __x86_64__
1425 if (is_64bit_tdesc ())
1426 return &amd64_linux_regs_info;
1427 else
1428 #endif
1429 return &i386_linux_regs_info;
1430 }
1431
1432 /* Initialize the target description for the architecture of the
1433 inferior. */
1434
1435 static void
1436 x86_arch_setup (void)
1437 {
1438 current_process ()->tdesc = x86_linux_read_description ();
1439 }
1440
1441 static int
1442 x86_supports_tracepoints (void)
1443 {
1444 return 1;
1445 }
1446
1447 static void
1448 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1449 {
1450 write_inferior_memory (*to, buf, len);
1451 *to += len;
1452 }
1453
1454 static int
1455 push_opcode (unsigned char *buf, char *op)
1456 {
1457 unsigned char *buf_org = buf;
1458
1459 while (1)
1460 {
1461 char *endptr;
1462 unsigned long ul = strtoul (op, &endptr, 16);
1463
1464 if (endptr == op)
1465 break;
1466
1467 *buf++ = ul;
1468 op = endptr;
1469 }
1470
1471 return buf - buf_org;
1472 }
1473
1474 #ifdef __x86_64__
1475
1476 /* Build a jump pad that saves registers and calls a collection
1477 function. Writes a jump instruction to the jump pad to
1478 JJUMPAD_INSN. The caller is responsible to write it in at the
1479 tracepoint address. */
1480
1481 static int
1482 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1483 CORE_ADDR collector,
1484 CORE_ADDR lockaddr,
1485 ULONGEST orig_size,
1486 CORE_ADDR *jump_entry,
1487 CORE_ADDR *trampoline,
1488 ULONGEST *trampoline_size,
1489 unsigned char *jjump_pad_insn,
1490 ULONGEST *jjump_pad_insn_size,
1491 CORE_ADDR *adjusted_insn_addr,
1492 CORE_ADDR *adjusted_insn_addr_end,
1493 char *err)
1494 {
1495 unsigned char buf[40];
1496 int i, offset;
1497 int64_t loffset;
1498
1499 CORE_ADDR buildaddr = *jump_entry;
1500
1501 /* Build the jump pad. */
1502
1503 /* First, do tracepoint data collection. Save registers. */
1504 i = 0;
1505 /* Need to ensure stack pointer saved first. */
1506 buf[i++] = 0x54; /* push %rsp */
1507 buf[i++] = 0x55; /* push %rbp */
1508 buf[i++] = 0x57; /* push %rdi */
1509 buf[i++] = 0x56; /* push %rsi */
1510 buf[i++] = 0x52; /* push %rdx */
1511 buf[i++] = 0x51; /* push %rcx */
1512 buf[i++] = 0x53; /* push %rbx */
1513 buf[i++] = 0x50; /* push %rax */
1514 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1515 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1516 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1517 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1518 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1519 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1520 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1521 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1522 buf[i++] = 0x9c; /* pushfq */
1523 buf[i++] = 0x48; /* movl <addr>,%rdi */
1524 buf[i++] = 0xbf;
1525 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1526 i += sizeof (unsigned long);
1527 buf[i++] = 0x57; /* push %rdi */
1528 append_insns (&buildaddr, i, buf);
1529
1530 /* Stack space for the collecting_t object. */
1531 i = 0;
1532 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1533 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1534 memcpy (buf + i, &tpoint, 8);
1535 i += 8;
1536 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1537 i += push_opcode (&buf[i],
1538 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1539 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1540 append_insns (&buildaddr, i, buf);
1541
1542 /* spin-lock. */
1543 i = 0;
1544 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1545 memcpy (&buf[i], (void *) &lockaddr, 8);
1546 i += 8;
1547 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1548 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1549 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1550 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1551 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1552 append_insns (&buildaddr, i, buf);
1553
1554 /* Set up the gdb_collect call. */
1555 /* At this point, (stack pointer + 0x18) is the base of our saved
1556 register block. */
1557
1558 i = 0;
1559 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1560 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1561
1562 /* tpoint address may be 64-bit wide. */
1563 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1564 memcpy (buf + i, &tpoint, 8);
1565 i += 8;
1566 append_insns (&buildaddr, i, buf);
1567
1568 /* The collector function being in the shared library, may be
1569 >31-bits away off the jump pad. */
1570 i = 0;
1571 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1572 memcpy (buf + i, &collector, 8);
1573 i += 8;
1574 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1575 append_insns (&buildaddr, i, buf);
1576
1577 /* Clear the spin-lock. */
1578 i = 0;
1579 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1580 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1581 memcpy (buf + i, &lockaddr, 8);
1582 i += 8;
1583 append_insns (&buildaddr, i, buf);
1584
1585 /* Remove stack that had been used for the collect_t object. */
1586 i = 0;
1587 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1588 append_insns (&buildaddr, i, buf);
1589
1590 /* Restore register state. */
1591 i = 0;
1592 buf[i++] = 0x48; /* add $0x8,%rsp */
1593 buf[i++] = 0x83;
1594 buf[i++] = 0xc4;
1595 buf[i++] = 0x08;
1596 buf[i++] = 0x9d; /* popfq */
1597 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1598 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1599 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1600 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1601 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1602 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1603 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1604 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1605 buf[i++] = 0x58; /* pop %rax */
1606 buf[i++] = 0x5b; /* pop %rbx */
1607 buf[i++] = 0x59; /* pop %rcx */
1608 buf[i++] = 0x5a; /* pop %rdx */
1609 buf[i++] = 0x5e; /* pop %rsi */
1610 buf[i++] = 0x5f; /* pop %rdi */
1611 buf[i++] = 0x5d; /* pop %rbp */
1612 buf[i++] = 0x5c; /* pop %rsp */
1613 append_insns (&buildaddr, i, buf);
1614
1615 /* Now, adjust the original instruction to execute in the jump
1616 pad. */
1617 *adjusted_insn_addr = buildaddr;
1618 relocate_instruction (&buildaddr, tpaddr);
1619 *adjusted_insn_addr_end = buildaddr;
1620
1621 /* Finally, write a jump back to the program. */
1622
1623 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1624 if (loffset > INT_MAX || loffset < INT_MIN)
1625 {
1626 sprintf (err,
1627 "E.Jump back from jump pad too far from tracepoint "
1628 "(offset 0x%" PRIx64 " > int32).", loffset);
1629 return 1;
1630 }
1631
1632 offset = (int) loffset;
1633 memcpy (buf, jump_insn, sizeof (jump_insn));
1634 memcpy (buf + 1, &offset, 4);
1635 append_insns (&buildaddr, sizeof (jump_insn), buf);
1636
1637 /* The jump pad is now built. Wire in a jump to our jump pad. This
1638 is always done last (by our caller actually), so that we can
1639 install fast tracepoints with threads running. This relies on
1640 the agent's atomic write support. */
1641 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1642 if (loffset > INT_MAX || loffset < INT_MIN)
1643 {
1644 sprintf (err,
1645 "E.Jump pad too far from tracepoint "
1646 "(offset 0x%" PRIx64 " > int32).", loffset);
1647 return 1;
1648 }
1649
1650 offset = (int) loffset;
1651
1652 memcpy (buf, jump_insn, sizeof (jump_insn));
1653 memcpy (buf + 1, &offset, 4);
1654 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1655 *jjump_pad_insn_size = sizeof (jump_insn);
1656
1657 /* Return the end address of our pad. */
1658 *jump_entry = buildaddr;
1659
1660 return 0;
1661 }
1662
1663 #endif /* __x86_64__ */
1664
1665 /* Build a jump pad that saves registers and calls a collection
1666 function. Writes a jump instruction to the jump pad to
1667 JJUMPAD_INSN. The caller is responsible to write it in at the
1668 tracepoint address. */
1669
1670 static int
1671 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1672 CORE_ADDR collector,
1673 CORE_ADDR lockaddr,
1674 ULONGEST orig_size,
1675 CORE_ADDR *jump_entry,
1676 CORE_ADDR *trampoline,
1677 ULONGEST *trampoline_size,
1678 unsigned char *jjump_pad_insn,
1679 ULONGEST *jjump_pad_insn_size,
1680 CORE_ADDR *adjusted_insn_addr,
1681 CORE_ADDR *adjusted_insn_addr_end,
1682 char *err)
1683 {
1684 unsigned char buf[0x100];
1685 int i, offset;
1686 CORE_ADDR buildaddr = *jump_entry;
1687
1688 /* Build the jump pad. */
1689
1690 /* First, do tracepoint data collection. Save registers. */
1691 i = 0;
1692 buf[i++] = 0x60; /* pushad */
1693 buf[i++] = 0x68; /* push tpaddr aka $pc */
1694 *((int *)(buf + i)) = (int) tpaddr;
1695 i += 4;
1696 buf[i++] = 0x9c; /* pushf */
1697 buf[i++] = 0x1e; /* push %ds */
1698 buf[i++] = 0x06; /* push %es */
1699 buf[i++] = 0x0f; /* push %fs */
1700 buf[i++] = 0xa0;
1701 buf[i++] = 0x0f; /* push %gs */
1702 buf[i++] = 0xa8;
1703 buf[i++] = 0x16; /* push %ss */
1704 buf[i++] = 0x0e; /* push %cs */
1705 append_insns (&buildaddr, i, buf);
1706
1707 /* Stack space for the collecting_t object. */
1708 i = 0;
1709 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1710
1711 /* Build the object. */
1712 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1713 memcpy (buf + i, &tpoint, 4);
1714 i += 4;
1715 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1716
1717 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1718 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1719 append_insns (&buildaddr, i, buf);
1720
1721 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1722 If we cared for it, this could be using xchg alternatively. */
1723
1724 i = 0;
1725 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1726 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1727 %esp,<lockaddr> */
1728 memcpy (&buf[i], (void *) &lockaddr, 4);
1729 i += 4;
1730 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1731 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1732 append_insns (&buildaddr, i, buf);
1733
1734
1735 /* Set up arguments to the gdb_collect call. */
1736 i = 0;
1737 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1738 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1739 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1740 append_insns (&buildaddr, i, buf);
1741
1742 i = 0;
1743 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1744 append_insns (&buildaddr, i, buf);
1745
1746 i = 0;
1747 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1748 memcpy (&buf[i], (void *) &tpoint, 4);
1749 i += 4;
1750 append_insns (&buildaddr, i, buf);
1751
1752 buf[0] = 0xe8; /* call <reladdr> */
1753 offset = collector - (buildaddr + sizeof (jump_insn));
1754 memcpy (buf + 1, &offset, 4);
1755 append_insns (&buildaddr, 5, buf);
1756 /* Clean up after the call. */
1757 buf[0] = 0x83; /* add $0x8,%esp */
1758 buf[1] = 0xc4;
1759 buf[2] = 0x08;
1760 append_insns (&buildaddr, 3, buf);
1761
1762
1763 /* Clear the spin-lock. This would need the LOCK prefix on older
1764 broken archs. */
1765 i = 0;
1766 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1767 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1768 memcpy (buf + i, &lockaddr, 4);
1769 i += 4;
1770 append_insns (&buildaddr, i, buf);
1771
1772
1773 /* Remove stack that had been used for the collect_t object. */
1774 i = 0;
1775 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1776 append_insns (&buildaddr, i, buf);
1777
1778 i = 0;
1779 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1780 buf[i++] = 0xc4;
1781 buf[i++] = 0x04;
1782 buf[i++] = 0x17; /* pop %ss */
1783 buf[i++] = 0x0f; /* pop %gs */
1784 buf[i++] = 0xa9;
1785 buf[i++] = 0x0f; /* pop %fs */
1786 buf[i++] = 0xa1;
1787 buf[i++] = 0x07; /* pop %es */
1788 buf[i++] = 0x1f; /* pop %ds */
1789 buf[i++] = 0x9d; /* popf */
1790 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1791 buf[i++] = 0xc4;
1792 buf[i++] = 0x04;
1793 buf[i++] = 0x61; /* popad */
1794 append_insns (&buildaddr, i, buf);
1795
1796 /* Now, adjust the original instruction to execute in the jump
1797 pad. */
1798 *adjusted_insn_addr = buildaddr;
1799 relocate_instruction (&buildaddr, tpaddr);
1800 *adjusted_insn_addr_end = buildaddr;
1801
1802 /* Write the jump back to the program. */
1803 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1804 memcpy (buf, jump_insn, sizeof (jump_insn));
1805 memcpy (buf + 1, &offset, 4);
1806 append_insns (&buildaddr, sizeof (jump_insn), buf);
1807
1808 /* The jump pad is now built. Wire in a jump to our jump pad. This
1809 is always done last (by our caller actually), so that we can
1810 install fast tracepoints with threads running. This relies on
1811 the agent's atomic write support. */
1812 if (orig_size == 4)
1813 {
1814 /* Create a trampoline. */
1815 *trampoline_size = sizeof (jump_insn);
1816 if (!claim_trampoline_space (*trampoline_size, trampoline))
1817 {
1818 /* No trampoline space available. */
1819 strcpy (err,
1820 "E.Cannot allocate trampoline space needed for fast "
1821 "tracepoints on 4-byte instructions.");
1822 return 1;
1823 }
1824
1825 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1826 memcpy (buf, jump_insn, sizeof (jump_insn));
1827 memcpy (buf + 1, &offset, 4);
1828 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1829
1830 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1831 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1832 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1833 memcpy (buf + 2, &offset, 2);
1834 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1835 *jjump_pad_insn_size = sizeof (small_jump_insn);
1836 }
1837 else
1838 {
1839 /* Else use a 32-bit relative jump instruction. */
1840 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1841 memcpy (buf, jump_insn, sizeof (jump_insn));
1842 memcpy (buf + 1, &offset, 4);
1843 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1844 *jjump_pad_insn_size = sizeof (jump_insn);
1845 }
1846
1847 /* Return the end address of our pad. */
1848 *jump_entry = buildaddr;
1849
1850 return 0;
1851 }
1852
1853 static int
1854 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1855 CORE_ADDR collector,
1856 CORE_ADDR lockaddr,
1857 ULONGEST orig_size,
1858 CORE_ADDR *jump_entry,
1859 CORE_ADDR *trampoline,
1860 ULONGEST *trampoline_size,
1861 unsigned char *jjump_pad_insn,
1862 ULONGEST *jjump_pad_insn_size,
1863 CORE_ADDR *adjusted_insn_addr,
1864 CORE_ADDR *adjusted_insn_addr_end,
1865 char *err)
1866 {
1867 #ifdef __x86_64__
1868 if (is_64bit_tdesc ())
1869 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1870 collector, lockaddr,
1871 orig_size, jump_entry,
1872 trampoline, trampoline_size,
1873 jjump_pad_insn,
1874 jjump_pad_insn_size,
1875 adjusted_insn_addr,
1876 adjusted_insn_addr_end,
1877 err);
1878 #endif
1879
1880 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1881 collector, lockaddr,
1882 orig_size, jump_entry,
1883 trampoline, trampoline_size,
1884 jjump_pad_insn,
1885 jjump_pad_insn_size,
1886 adjusted_insn_addr,
1887 adjusted_insn_addr_end,
1888 err);
1889 }
1890
1891 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
1892 architectures. */
1893
1894 static int
1895 x86_get_min_fast_tracepoint_insn_len (void)
1896 {
1897 static int warned_about_fast_tracepoints = 0;
1898
1899 #ifdef __x86_64__
1900 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
1901 used for fast tracepoints. */
1902 if (is_64bit_tdesc ())
1903 return 5;
1904 #endif
1905
1906 if (agent_loaded_p ())
1907 {
1908 char errbuf[IPA_BUFSIZ];
1909
1910 errbuf[0] = '\0';
1911
1912 /* On x86, if trampolines are available, then 4-byte jump instructions
1913 with a 2-byte offset may be used, otherwise 5-byte jump instructions
1914 with a 4-byte offset are used instead. */
1915 if (have_fast_tracepoint_trampoline_buffer (errbuf))
1916 return 4;
1917 else
1918 {
1919 /* GDB has no channel to explain to user why a shorter fast
1920 tracepoint is not possible, but at least make GDBserver
1921 mention that something has gone awry. */
1922 if (!warned_about_fast_tracepoints)
1923 {
1924 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
1925 warned_about_fast_tracepoints = 1;
1926 }
1927 return 5;
1928 }
1929 }
1930 else
1931 {
1932 /* Indicate that the minimum length is currently unknown since the IPA
1933 has not loaded yet. */
1934 return 0;
1935 }
1936 }
1937
1938 static void
1939 add_insns (unsigned char *start, int len)
1940 {
1941 CORE_ADDR buildaddr = current_insn_ptr;
1942
1943 if (debug_threads)
1944 debug_printf ("Adding %d bytes of insn at %s\n",
1945 len, paddress (buildaddr));
1946
1947 append_insns (&buildaddr, len, start);
1948 current_insn_ptr = buildaddr;
1949 }
1950
1951 /* Our general strategy for emitting code is to avoid specifying raw
1952 bytes whenever possible, and instead copy a block of inline asm
1953 that is embedded in the function. This is a little messy, because
1954 we need to keep the compiler from discarding what looks like dead
1955 code, plus suppress various warnings. */
1956
1957 #define EMIT_ASM(NAME, INSNS) \
1958 do \
1959 { \
1960 extern unsigned char start_ ## NAME, end_ ## NAME; \
1961 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1962 __asm__ ("jmp end_" #NAME "\n" \
1963 "\t" "start_" #NAME ":" \
1964 "\t" INSNS "\n" \
1965 "\t" "end_" #NAME ":"); \
1966 } while (0)
1967
1968 #ifdef __x86_64__
1969
1970 #define EMIT_ASM32(NAME,INSNS) \
1971 do \
1972 { \
1973 extern unsigned char start_ ## NAME, end_ ## NAME; \
1974 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1975 __asm__ (".code32\n" \
1976 "\t" "jmp end_" #NAME "\n" \
1977 "\t" "start_" #NAME ":\n" \
1978 "\t" INSNS "\n" \
1979 "\t" "end_" #NAME ":\n" \
1980 ".code64\n"); \
1981 } while (0)
1982
1983 #else
1984
1985 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1986
1987 #endif
1988
1989 #ifdef __x86_64__
1990
1991 static void
1992 amd64_emit_prologue (void)
1993 {
1994 EMIT_ASM (amd64_prologue,
1995 "pushq %rbp\n\t"
1996 "movq %rsp,%rbp\n\t"
1997 "sub $0x20,%rsp\n\t"
1998 "movq %rdi,-8(%rbp)\n\t"
1999 "movq %rsi,-16(%rbp)");
2000 }
2001
2002
2003 static void
2004 amd64_emit_epilogue (void)
2005 {
2006 EMIT_ASM (amd64_epilogue,
2007 "movq -16(%rbp),%rdi\n\t"
2008 "movq %rax,(%rdi)\n\t"
2009 "xor %rax,%rax\n\t"
2010 "leave\n\t"
2011 "ret");
2012 }
2013
2014 static void
2015 amd64_emit_add (void)
2016 {
2017 EMIT_ASM (amd64_add,
2018 "add (%rsp),%rax\n\t"
2019 "lea 0x8(%rsp),%rsp");
2020 }
2021
2022 static void
2023 amd64_emit_sub (void)
2024 {
2025 EMIT_ASM (amd64_sub,
2026 "sub %rax,(%rsp)\n\t"
2027 "pop %rax");
2028 }
2029
2030 static void
2031 amd64_emit_mul (void)
2032 {
2033 emit_error = 1;
2034 }
2035
2036 static void
2037 amd64_emit_lsh (void)
2038 {
2039 emit_error = 1;
2040 }
2041
2042 static void
2043 amd64_emit_rsh_signed (void)
2044 {
2045 emit_error = 1;
2046 }
2047
2048 static void
2049 amd64_emit_rsh_unsigned (void)
2050 {
2051 emit_error = 1;
2052 }
2053
2054 static void
2055 amd64_emit_ext (int arg)
2056 {
2057 switch (arg)
2058 {
2059 case 8:
2060 EMIT_ASM (amd64_ext_8,
2061 "cbtw\n\t"
2062 "cwtl\n\t"
2063 "cltq");
2064 break;
2065 case 16:
2066 EMIT_ASM (amd64_ext_16,
2067 "cwtl\n\t"
2068 "cltq");
2069 break;
2070 case 32:
2071 EMIT_ASM (amd64_ext_32,
2072 "cltq");
2073 break;
2074 default:
2075 emit_error = 1;
2076 }
2077 }
2078
2079 static void
2080 amd64_emit_log_not (void)
2081 {
2082 EMIT_ASM (amd64_log_not,
2083 "test %rax,%rax\n\t"
2084 "sete %cl\n\t"
2085 "movzbq %cl,%rax");
2086 }
2087
2088 static void
2089 amd64_emit_bit_and (void)
2090 {
2091 EMIT_ASM (amd64_and,
2092 "and (%rsp),%rax\n\t"
2093 "lea 0x8(%rsp),%rsp");
2094 }
2095
2096 static void
2097 amd64_emit_bit_or (void)
2098 {
2099 EMIT_ASM (amd64_or,
2100 "or (%rsp),%rax\n\t"
2101 "lea 0x8(%rsp),%rsp");
2102 }
2103
2104 static void
2105 amd64_emit_bit_xor (void)
2106 {
2107 EMIT_ASM (amd64_xor,
2108 "xor (%rsp),%rax\n\t"
2109 "lea 0x8(%rsp),%rsp");
2110 }
2111
2112 static void
2113 amd64_emit_bit_not (void)
2114 {
2115 EMIT_ASM (amd64_bit_not,
2116 "xorq $0xffffffffffffffff,%rax");
2117 }
2118
2119 static void
2120 amd64_emit_equal (void)
2121 {
2122 EMIT_ASM (amd64_equal,
2123 "cmp %rax,(%rsp)\n\t"
2124 "je .Lamd64_equal_true\n\t"
2125 "xor %rax,%rax\n\t"
2126 "jmp .Lamd64_equal_end\n\t"
2127 ".Lamd64_equal_true:\n\t"
2128 "mov $0x1,%rax\n\t"
2129 ".Lamd64_equal_end:\n\t"
2130 "lea 0x8(%rsp),%rsp");
2131 }
2132
2133 static void
2134 amd64_emit_less_signed (void)
2135 {
2136 EMIT_ASM (amd64_less_signed,
2137 "cmp %rax,(%rsp)\n\t"
2138 "jl .Lamd64_less_signed_true\n\t"
2139 "xor %rax,%rax\n\t"
2140 "jmp .Lamd64_less_signed_end\n\t"
2141 ".Lamd64_less_signed_true:\n\t"
2142 "mov $1,%rax\n\t"
2143 ".Lamd64_less_signed_end:\n\t"
2144 "lea 0x8(%rsp),%rsp");
2145 }
2146
2147 static void
2148 amd64_emit_less_unsigned (void)
2149 {
2150 EMIT_ASM (amd64_less_unsigned,
2151 "cmp %rax,(%rsp)\n\t"
2152 "jb .Lamd64_less_unsigned_true\n\t"
2153 "xor %rax,%rax\n\t"
2154 "jmp .Lamd64_less_unsigned_end\n\t"
2155 ".Lamd64_less_unsigned_true:\n\t"
2156 "mov $1,%rax\n\t"
2157 ".Lamd64_less_unsigned_end:\n\t"
2158 "lea 0x8(%rsp),%rsp");
2159 }
2160
2161 static void
2162 amd64_emit_ref (int size)
2163 {
2164 switch (size)
2165 {
2166 case 1:
2167 EMIT_ASM (amd64_ref1,
2168 "movb (%rax),%al");
2169 break;
2170 case 2:
2171 EMIT_ASM (amd64_ref2,
2172 "movw (%rax),%ax");
2173 break;
2174 case 4:
2175 EMIT_ASM (amd64_ref4,
2176 "movl (%rax),%eax");
2177 break;
2178 case 8:
2179 EMIT_ASM (amd64_ref8,
2180 "movq (%rax),%rax");
2181 break;
2182 }
2183 }
2184
2185 static void
2186 amd64_emit_if_goto (int *offset_p, int *size_p)
2187 {
2188 EMIT_ASM (amd64_if_goto,
2189 "mov %rax,%rcx\n\t"
2190 "pop %rax\n\t"
2191 "cmp $0,%rcx\n\t"
2192 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2193 if (offset_p)
2194 *offset_p = 10;
2195 if (size_p)
2196 *size_p = 4;
2197 }
2198
2199 static void
2200 amd64_emit_goto (int *offset_p, int *size_p)
2201 {
2202 EMIT_ASM (amd64_goto,
2203 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2204 if (offset_p)
2205 *offset_p = 1;
2206 if (size_p)
2207 *size_p = 4;
2208 }
2209
2210 static void
2211 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2212 {
2213 int diff = (to - (from + size));
2214 unsigned char buf[sizeof (int)];
2215
2216 if (size != 4)
2217 {
2218 emit_error = 1;
2219 return;
2220 }
2221
2222 memcpy (buf, &diff, sizeof (int));
2223 write_inferior_memory (from, buf, sizeof (int));
2224 }
2225
2226 static void
2227 amd64_emit_const (LONGEST num)
2228 {
2229 unsigned char buf[16];
2230 int i;
2231 CORE_ADDR buildaddr = current_insn_ptr;
2232
2233 i = 0;
2234 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
2235 memcpy (&buf[i], &num, sizeof (num));
2236 i += 8;
2237 append_insns (&buildaddr, i, buf);
2238 current_insn_ptr = buildaddr;
2239 }
2240
2241 static void
2242 amd64_emit_call (CORE_ADDR fn)
2243 {
2244 unsigned char buf[16];
2245 int i;
2246 CORE_ADDR buildaddr;
2247 LONGEST offset64;
2248
2249 /* The destination function being in the shared library, may be
2250 >31-bits away off the compiled code pad. */
2251
2252 buildaddr = current_insn_ptr;
2253
2254 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2255
2256 i = 0;
2257
2258 if (offset64 > INT_MAX || offset64 < INT_MIN)
2259 {
2260 /* Offset is too large for a call. Use callq, but that requires
2261 a register, so avoid it if possible. Use r10, since it is
2262 call-clobbered, we don't have to push/pop it. */
2263 buf[i++] = 0x48; /* mov $fn,%r10 */
2264 buf[i++] = 0xba;
2265 memcpy (buf + i, &fn, 8);
2266 i += 8;
2267 buf[i++] = 0xff; /* callq *%r10 */
2268 buf[i++] = 0xd2;
2269 }
2270 else
2271 {
2272 int offset32 = offset64; /* we know we can't overflow here. */
2273 memcpy (buf + i, &offset32, 4);
2274 i += 4;
2275 }
2276
2277 append_insns (&buildaddr, i, buf);
2278 current_insn_ptr = buildaddr;
2279 }
2280
2281 static void
2282 amd64_emit_reg (int reg)
2283 {
2284 unsigned char buf[16];
2285 int i;
2286 CORE_ADDR buildaddr;
2287
2288 /* Assume raw_regs is still in %rdi. */
2289 buildaddr = current_insn_ptr;
2290 i = 0;
2291 buf[i++] = 0xbe; /* mov $<n>,%esi */
2292 memcpy (&buf[i], &reg, sizeof (reg));
2293 i += 4;
2294 append_insns (&buildaddr, i, buf);
2295 current_insn_ptr = buildaddr;
2296 amd64_emit_call (get_raw_reg_func_addr ());
2297 }
2298
2299 static void
2300 amd64_emit_pop (void)
2301 {
2302 EMIT_ASM (amd64_pop,
2303 "pop %rax");
2304 }
2305
2306 static void
2307 amd64_emit_stack_flush (void)
2308 {
2309 EMIT_ASM (amd64_stack_flush,
2310 "push %rax");
2311 }
2312
2313 static void
2314 amd64_emit_zero_ext (int arg)
2315 {
2316 switch (arg)
2317 {
2318 case 8:
2319 EMIT_ASM (amd64_zero_ext_8,
2320 "and $0xff,%rax");
2321 break;
2322 case 16:
2323 EMIT_ASM (amd64_zero_ext_16,
2324 "and $0xffff,%rax");
2325 break;
2326 case 32:
2327 EMIT_ASM (amd64_zero_ext_32,
2328 "mov $0xffffffff,%rcx\n\t"
2329 "and %rcx,%rax");
2330 break;
2331 default:
2332 emit_error = 1;
2333 }
2334 }
2335
2336 static void
2337 amd64_emit_swap (void)
2338 {
2339 EMIT_ASM (amd64_swap,
2340 "mov %rax,%rcx\n\t"
2341 "pop %rax\n\t"
2342 "push %rcx");
2343 }
2344
2345 static void
2346 amd64_emit_stack_adjust (int n)
2347 {
2348 unsigned char buf[16];
2349 int i;
2350 CORE_ADDR buildaddr = current_insn_ptr;
2351
2352 i = 0;
2353 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2354 buf[i++] = 0x8d;
2355 buf[i++] = 0x64;
2356 buf[i++] = 0x24;
2357 /* This only handles adjustments up to 16, but we don't expect any more. */
2358 buf[i++] = n * 8;
2359 append_insns (&buildaddr, i, buf);
2360 current_insn_ptr = buildaddr;
2361 }
2362
2363 /* FN's prototype is `LONGEST(*fn)(int)'. */
2364
2365 static void
2366 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2367 {
2368 unsigned char buf[16];
2369 int i;
2370 CORE_ADDR buildaddr;
2371
2372 buildaddr = current_insn_ptr;
2373 i = 0;
2374 buf[i++] = 0xbf; /* movl $<n>,%edi */
2375 memcpy (&buf[i], &arg1, sizeof (arg1));
2376 i += 4;
2377 append_insns (&buildaddr, i, buf);
2378 current_insn_ptr = buildaddr;
2379 amd64_emit_call (fn);
2380 }
2381
2382 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2383
2384 static void
2385 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2386 {
2387 unsigned char buf[16];
2388 int i;
2389 CORE_ADDR buildaddr;
2390
2391 buildaddr = current_insn_ptr;
2392 i = 0;
2393 buf[i++] = 0xbf; /* movl $<n>,%edi */
2394 memcpy (&buf[i], &arg1, sizeof (arg1));
2395 i += 4;
2396 append_insns (&buildaddr, i, buf);
2397 current_insn_ptr = buildaddr;
2398 EMIT_ASM (amd64_void_call_2_a,
2399 /* Save away a copy of the stack top. */
2400 "push %rax\n\t"
2401 /* Also pass top as the second argument. */
2402 "mov %rax,%rsi");
2403 amd64_emit_call (fn);
2404 EMIT_ASM (amd64_void_call_2_b,
2405 /* Restore the stack top, %rax may have been trashed. */
2406 "pop %rax");
2407 }
2408
2409 void
2410 amd64_emit_eq_goto (int *offset_p, int *size_p)
2411 {
2412 EMIT_ASM (amd64_eq,
2413 "cmp %rax,(%rsp)\n\t"
2414 "jne .Lamd64_eq_fallthru\n\t"
2415 "lea 0x8(%rsp),%rsp\n\t"
2416 "pop %rax\n\t"
2417 /* jmp, but don't trust the assembler to choose the right jump */
2418 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2419 ".Lamd64_eq_fallthru:\n\t"
2420 "lea 0x8(%rsp),%rsp\n\t"
2421 "pop %rax");
2422
2423 if (offset_p)
2424 *offset_p = 13;
2425 if (size_p)
2426 *size_p = 4;
2427 }
2428
2429 void
2430 amd64_emit_ne_goto (int *offset_p, int *size_p)
2431 {
2432 EMIT_ASM (amd64_ne,
2433 "cmp %rax,(%rsp)\n\t"
2434 "je .Lamd64_ne_fallthru\n\t"
2435 "lea 0x8(%rsp),%rsp\n\t"
2436 "pop %rax\n\t"
2437 /* jmp, but don't trust the assembler to choose the right jump */
2438 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2439 ".Lamd64_ne_fallthru:\n\t"
2440 "lea 0x8(%rsp),%rsp\n\t"
2441 "pop %rax");
2442
2443 if (offset_p)
2444 *offset_p = 13;
2445 if (size_p)
2446 *size_p = 4;
2447 }
2448
2449 void
2450 amd64_emit_lt_goto (int *offset_p, int *size_p)
2451 {
2452 EMIT_ASM (amd64_lt,
2453 "cmp %rax,(%rsp)\n\t"
2454 "jnl .Lamd64_lt_fallthru\n\t"
2455 "lea 0x8(%rsp),%rsp\n\t"
2456 "pop %rax\n\t"
2457 /* jmp, but don't trust the assembler to choose the right jump */
2458 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2459 ".Lamd64_lt_fallthru:\n\t"
2460 "lea 0x8(%rsp),%rsp\n\t"
2461 "pop %rax");
2462
2463 if (offset_p)
2464 *offset_p = 13;
2465 if (size_p)
2466 *size_p = 4;
2467 }
2468
2469 void
2470 amd64_emit_le_goto (int *offset_p, int *size_p)
2471 {
2472 EMIT_ASM (amd64_le,
2473 "cmp %rax,(%rsp)\n\t"
2474 "jnle .Lamd64_le_fallthru\n\t"
2475 "lea 0x8(%rsp),%rsp\n\t"
2476 "pop %rax\n\t"
2477 /* jmp, but don't trust the assembler to choose the right jump */
2478 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2479 ".Lamd64_le_fallthru:\n\t"
2480 "lea 0x8(%rsp),%rsp\n\t"
2481 "pop %rax");
2482
2483 if (offset_p)
2484 *offset_p = 13;
2485 if (size_p)
2486 *size_p = 4;
2487 }
2488
2489 void
2490 amd64_emit_gt_goto (int *offset_p, int *size_p)
2491 {
2492 EMIT_ASM (amd64_gt,
2493 "cmp %rax,(%rsp)\n\t"
2494 "jng .Lamd64_gt_fallthru\n\t"
2495 "lea 0x8(%rsp),%rsp\n\t"
2496 "pop %rax\n\t"
2497 /* jmp, but don't trust the assembler to choose the right jump */
2498 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2499 ".Lamd64_gt_fallthru:\n\t"
2500 "lea 0x8(%rsp),%rsp\n\t"
2501 "pop %rax");
2502
2503 if (offset_p)
2504 *offset_p = 13;
2505 if (size_p)
2506 *size_p = 4;
2507 }
2508
2509 void
2510 amd64_emit_ge_goto (int *offset_p, int *size_p)
2511 {
2512 EMIT_ASM (amd64_ge,
2513 "cmp %rax,(%rsp)\n\t"
2514 "jnge .Lamd64_ge_fallthru\n\t"
2515 ".Lamd64_ge_jump:\n\t"
2516 "lea 0x8(%rsp),%rsp\n\t"
2517 "pop %rax\n\t"
2518 /* jmp, but don't trust the assembler to choose the right jump */
2519 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2520 ".Lamd64_ge_fallthru:\n\t"
2521 "lea 0x8(%rsp),%rsp\n\t"
2522 "pop %rax");
2523
2524 if (offset_p)
2525 *offset_p = 13;
2526 if (size_p)
2527 *size_p = 4;
2528 }
2529
2530 struct emit_ops amd64_emit_ops =
2531 {
2532 amd64_emit_prologue,
2533 amd64_emit_epilogue,
2534 amd64_emit_add,
2535 amd64_emit_sub,
2536 amd64_emit_mul,
2537 amd64_emit_lsh,
2538 amd64_emit_rsh_signed,
2539 amd64_emit_rsh_unsigned,
2540 amd64_emit_ext,
2541 amd64_emit_log_not,
2542 amd64_emit_bit_and,
2543 amd64_emit_bit_or,
2544 amd64_emit_bit_xor,
2545 amd64_emit_bit_not,
2546 amd64_emit_equal,
2547 amd64_emit_less_signed,
2548 amd64_emit_less_unsigned,
2549 amd64_emit_ref,
2550 amd64_emit_if_goto,
2551 amd64_emit_goto,
2552 amd64_write_goto_address,
2553 amd64_emit_const,
2554 amd64_emit_call,
2555 amd64_emit_reg,
2556 amd64_emit_pop,
2557 amd64_emit_stack_flush,
2558 amd64_emit_zero_ext,
2559 amd64_emit_swap,
2560 amd64_emit_stack_adjust,
2561 amd64_emit_int_call_1,
2562 amd64_emit_void_call_2,
2563 amd64_emit_eq_goto,
2564 amd64_emit_ne_goto,
2565 amd64_emit_lt_goto,
2566 amd64_emit_le_goto,
2567 amd64_emit_gt_goto,
2568 amd64_emit_ge_goto
2569 };
2570
2571 #endif /* __x86_64__ */
2572
2573 static void
2574 i386_emit_prologue (void)
2575 {
2576 EMIT_ASM32 (i386_prologue,
2577 "push %ebp\n\t"
2578 "mov %esp,%ebp\n\t"
2579 "push %ebx");
2580 /* At this point, the raw regs base address is at 8(%ebp), and the
2581 value pointer is at 12(%ebp). */
2582 }
2583
2584 static void
2585 i386_emit_epilogue (void)
2586 {
2587 EMIT_ASM32 (i386_epilogue,
2588 "mov 12(%ebp),%ecx\n\t"
2589 "mov %eax,(%ecx)\n\t"
2590 "mov %ebx,0x4(%ecx)\n\t"
2591 "xor %eax,%eax\n\t"
2592 "pop %ebx\n\t"
2593 "pop %ebp\n\t"
2594 "ret");
2595 }
2596
2597 static void
2598 i386_emit_add (void)
2599 {
2600 EMIT_ASM32 (i386_add,
2601 "add (%esp),%eax\n\t"
2602 "adc 0x4(%esp),%ebx\n\t"
2603 "lea 0x8(%esp),%esp");
2604 }
2605
2606 static void
2607 i386_emit_sub (void)
2608 {
2609 EMIT_ASM32 (i386_sub,
2610 "subl %eax,(%esp)\n\t"
2611 "sbbl %ebx,4(%esp)\n\t"
2612 "pop %eax\n\t"
2613 "pop %ebx\n\t");
2614 }
2615
2616 static void
2617 i386_emit_mul (void)
2618 {
2619 emit_error = 1;
2620 }
2621
2622 static void
2623 i386_emit_lsh (void)
2624 {
2625 emit_error = 1;
2626 }
2627
2628 static void
2629 i386_emit_rsh_signed (void)
2630 {
2631 emit_error = 1;
2632 }
2633
2634 static void
2635 i386_emit_rsh_unsigned (void)
2636 {
2637 emit_error = 1;
2638 }
2639
2640 static void
2641 i386_emit_ext (int arg)
2642 {
2643 switch (arg)
2644 {
2645 case 8:
2646 EMIT_ASM32 (i386_ext_8,
2647 "cbtw\n\t"
2648 "cwtl\n\t"
2649 "movl %eax,%ebx\n\t"
2650 "sarl $31,%ebx");
2651 break;
2652 case 16:
2653 EMIT_ASM32 (i386_ext_16,
2654 "cwtl\n\t"
2655 "movl %eax,%ebx\n\t"
2656 "sarl $31,%ebx");
2657 break;
2658 case 32:
2659 EMIT_ASM32 (i386_ext_32,
2660 "movl %eax,%ebx\n\t"
2661 "sarl $31,%ebx");
2662 break;
2663 default:
2664 emit_error = 1;
2665 }
2666 }
2667
2668 static void
2669 i386_emit_log_not (void)
2670 {
2671 EMIT_ASM32 (i386_log_not,
2672 "or %ebx,%eax\n\t"
2673 "test %eax,%eax\n\t"
2674 "sete %cl\n\t"
2675 "xor %ebx,%ebx\n\t"
2676 "movzbl %cl,%eax");
2677 }
2678
2679 static void
2680 i386_emit_bit_and (void)
2681 {
2682 EMIT_ASM32 (i386_and,
2683 "and (%esp),%eax\n\t"
2684 "and 0x4(%esp),%ebx\n\t"
2685 "lea 0x8(%esp),%esp");
2686 }
2687
2688 static void
2689 i386_emit_bit_or (void)
2690 {
2691 EMIT_ASM32 (i386_or,
2692 "or (%esp),%eax\n\t"
2693 "or 0x4(%esp),%ebx\n\t"
2694 "lea 0x8(%esp),%esp");
2695 }
2696
2697 static void
2698 i386_emit_bit_xor (void)
2699 {
2700 EMIT_ASM32 (i386_xor,
2701 "xor (%esp),%eax\n\t"
2702 "xor 0x4(%esp),%ebx\n\t"
2703 "lea 0x8(%esp),%esp");
2704 }
2705
2706 static void
2707 i386_emit_bit_not (void)
2708 {
2709 EMIT_ASM32 (i386_bit_not,
2710 "xor $0xffffffff,%eax\n\t"
2711 "xor $0xffffffff,%ebx\n\t");
2712 }
2713
2714 static void
2715 i386_emit_equal (void)
2716 {
2717 EMIT_ASM32 (i386_equal,
2718 "cmpl %ebx,4(%esp)\n\t"
2719 "jne .Li386_equal_false\n\t"
2720 "cmpl %eax,(%esp)\n\t"
2721 "je .Li386_equal_true\n\t"
2722 ".Li386_equal_false:\n\t"
2723 "xor %eax,%eax\n\t"
2724 "jmp .Li386_equal_end\n\t"
2725 ".Li386_equal_true:\n\t"
2726 "mov $1,%eax\n\t"
2727 ".Li386_equal_end:\n\t"
2728 "xor %ebx,%ebx\n\t"
2729 "lea 0x8(%esp),%esp");
2730 }
2731
2732 static void
2733 i386_emit_less_signed (void)
2734 {
2735 EMIT_ASM32 (i386_less_signed,
2736 "cmpl %ebx,4(%esp)\n\t"
2737 "jl .Li386_less_signed_true\n\t"
2738 "jne .Li386_less_signed_false\n\t"
2739 "cmpl %eax,(%esp)\n\t"
2740 "jl .Li386_less_signed_true\n\t"
2741 ".Li386_less_signed_false:\n\t"
2742 "xor %eax,%eax\n\t"
2743 "jmp .Li386_less_signed_end\n\t"
2744 ".Li386_less_signed_true:\n\t"
2745 "mov $1,%eax\n\t"
2746 ".Li386_less_signed_end:\n\t"
2747 "xor %ebx,%ebx\n\t"
2748 "lea 0x8(%esp),%esp");
2749 }
2750
2751 static void
2752 i386_emit_less_unsigned (void)
2753 {
2754 EMIT_ASM32 (i386_less_unsigned,
2755 "cmpl %ebx,4(%esp)\n\t"
2756 "jb .Li386_less_unsigned_true\n\t"
2757 "jne .Li386_less_unsigned_false\n\t"
2758 "cmpl %eax,(%esp)\n\t"
2759 "jb .Li386_less_unsigned_true\n\t"
2760 ".Li386_less_unsigned_false:\n\t"
2761 "xor %eax,%eax\n\t"
2762 "jmp .Li386_less_unsigned_end\n\t"
2763 ".Li386_less_unsigned_true:\n\t"
2764 "mov $1,%eax\n\t"
2765 ".Li386_less_unsigned_end:\n\t"
2766 "xor %ebx,%ebx\n\t"
2767 "lea 0x8(%esp),%esp");
2768 }
2769
2770 static void
2771 i386_emit_ref (int size)
2772 {
2773 switch (size)
2774 {
2775 case 1:
2776 EMIT_ASM32 (i386_ref1,
2777 "movb (%eax),%al");
2778 break;
2779 case 2:
2780 EMIT_ASM32 (i386_ref2,
2781 "movw (%eax),%ax");
2782 break;
2783 case 4:
2784 EMIT_ASM32 (i386_ref4,
2785 "movl (%eax),%eax");
2786 break;
2787 case 8:
2788 EMIT_ASM32 (i386_ref8,
2789 "movl 4(%eax),%ebx\n\t"
2790 "movl (%eax),%eax");
2791 break;
2792 }
2793 }
2794
2795 static void
2796 i386_emit_if_goto (int *offset_p, int *size_p)
2797 {
2798 EMIT_ASM32 (i386_if_goto,
2799 "mov %eax,%ecx\n\t"
2800 "or %ebx,%ecx\n\t"
2801 "pop %eax\n\t"
2802 "pop %ebx\n\t"
2803 "cmpl $0,%ecx\n\t"
2804 /* Don't trust the assembler to choose the right jump */
2805 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2806
2807 if (offset_p)
2808 *offset_p = 11; /* be sure that this matches the sequence above */
2809 if (size_p)
2810 *size_p = 4;
2811 }
2812
2813 static void
2814 i386_emit_goto (int *offset_p, int *size_p)
2815 {
2816 EMIT_ASM32 (i386_goto,
2817 /* Don't trust the assembler to choose the right jump */
2818 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2819 if (offset_p)
2820 *offset_p = 1;
2821 if (size_p)
2822 *size_p = 4;
2823 }
2824
2825 static void
2826 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2827 {
2828 int diff = (to - (from + size));
2829 unsigned char buf[sizeof (int)];
2830
2831 /* We're only doing 4-byte sizes at the moment. */
2832 if (size != 4)
2833 {
2834 emit_error = 1;
2835 return;
2836 }
2837
2838 memcpy (buf, &diff, sizeof (int));
2839 write_inferior_memory (from, buf, sizeof (int));
2840 }
2841
2842 static void
2843 i386_emit_const (LONGEST num)
2844 {
2845 unsigned char buf[16];
2846 int i, hi, lo;
2847 CORE_ADDR buildaddr = current_insn_ptr;
2848
2849 i = 0;
2850 buf[i++] = 0xb8; /* mov $<n>,%eax */
2851 lo = num & 0xffffffff;
2852 memcpy (&buf[i], &lo, sizeof (lo));
2853 i += 4;
2854 hi = ((num >> 32) & 0xffffffff);
2855 if (hi)
2856 {
2857 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2858 memcpy (&buf[i], &hi, sizeof (hi));
2859 i += 4;
2860 }
2861 else
2862 {
2863 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2864 }
2865 append_insns (&buildaddr, i, buf);
2866 current_insn_ptr = buildaddr;
2867 }
2868
2869 static void
2870 i386_emit_call (CORE_ADDR fn)
2871 {
2872 unsigned char buf[16];
2873 int i, offset;
2874 CORE_ADDR buildaddr;
2875
2876 buildaddr = current_insn_ptr;
2877 i = 0;
2878 buf[i++] = 0xe8; /* call <reladdr> */
2879 offset = ((int) fn) - (buildaddr + 5);
2880 memcpy (buf + 1, &offset, 4);
2881 append_insns (&buildaddr, 5, buf);
2882 current_insn_ptr = buildaddr;
2883 }
2884
2885 static void
2886 i386_emit_reg (int reg)
2887 {
2888 unsigned char buf[16];
2889 int i;
2890 CORE_ADDR buildaddr;
2891
2892 EMIT_ASM32 (i386_reg_a,
2893 "sub $0x8,%esp");
2894 buildaddr = current_insn_ptr;
2895 i = 0;
2896 buf[i++] = 0xb8; /* mov $<n>,%eax */
2897 memcpy (&buf[i], &reg, sizeof (reg));
2898 i += 4;
2899 append_insns (&buildaddr, i, buf);
2900 current_insn_ptr = buildaddr;
2901 EMIT_ASM32 (i386_reg_b,
2902 "mov %eax,4(%esp)\n\t"
2903 "mov 8(%ebp),%eax\n\t"
2904 "mov %eax,(%esp)");
2905 i386_emit_call (get_raw_reg_func_addr ());
2906 EMIT_ASM32 (i386_reg_c,
2907 "xor %ebx,%ebx\n\t"
2908 "lea 0x8(%esp),%esp");
2909 }
2910
2911 static void
2912 i386_emit_pop (void)
2913 {
2914 EMIT_ASM32 (i386_pop,
2915 "pop %eax\n\t"
2916 "pop %ebx");
2917 }
2918
2919 static void
2920 i386_emit_stack_flush (void)
2921 {
2922 EMIT_ASM32 (i386_stack_flush,
2923 "push %ebx\n\t"
2924 "push %eax");
2925 }
2926
2927 static void
2928 i386_emit_zero_ext (int arg)
2929 {
2930 switch (arg)
2931 {
2932 case 8:
2933 EMIT_ASM32 (i386_zero_ext_8,
2934 "and $0xff,%eax\n\t"
2935 "xor %ebx,%ebx");
2936 break;
2937 case 16:
2938 EMIT_ASM32 (i386_zero_ext_16,
2939 "and $0xffff,%eax\n\t"
2940 "xor %ebx,%ebx");
2941 break;
2942 case 32:
2943 EMIT_ASM32 (i386_zero_ext_32,
2944 "xor %ebx,%ebx");
2945 break;
2946 default:
2947 emit_error = 1;
2948 }
2949 }
2950
2951 static void
2952 i386_emit_swap (void)
2953 {
2954 EMIT_ASM32 (i386_swap,
2955 "mov %eax,%ecx\n\t"
2956 "mov %ebx,%edx\n\t"
2957 "pop %eax\n\t"
2958 "pop %ebx\n\t"
2959 "push %edx\n\t"
2960 "push %ecx");
2961 }
2962
2963 static void
2964 i386_emit_stack_adjust (int n)
2965 {
2966 unsigned char buf[16];
2967 int i;
2968 CORE_ADDR buildaddr = current_insn_ptr;
2969
2970 i = 0;
2971 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2972 buf[i++] = 0x64;
2973 buf[i++] = 0x24;
2974 buf[i++] = n * 8;
2975 append_insns (&buildaddr, i, buf);
2976 current_insn_ptr = buildaddr;
2977 }
2978
2979 /* FN's prototype is `LONGEST(*fn)(int)'. */
2980
2981 static void
2982 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2983 {
2984 unsigned char buf[16];
2985 int i;
2986 CORE_ADDR buildaddr;
2987
2988 EMIT_ASM32 (i386_int_call_1_a,
2989 /* Reserve a bit of stack space. */
2990 "sub $0x8,%esp");
2991 /* Put the one argument on the stack. */
2992 buildaddr = current_insn_ptr;
2993 i = 0;
2994 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2995 buf[i++] = 0x04;
2996 buf[i++] = 0x24;
2997 memcpy (&buf[i], &arg1, sizeof (arg1));
2998 i += 4;
2999 append_insns (&buildaddr, i, buf);
3000 current_insn_ptr = buildaddr;
3001 i386_emit_call (fn);
3002 EMIT_ASM32 (i386_int_call_1_c,
3003 "mov %edx,%ebx\n\t"
3004 "lea 0x8(%esp),%esp");
3005 }
3006
3007 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3008
3009 static void
3010 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3011 {
3012 unsigned char buf[16];
3013 int i;
3014 CORE_ADDR buildaddr;
3015
3016 EMIT_ASM32 (i386_void_call_2_a,
3017 /* Preserve %eax only; we don't have to worry about %ebx. */
3018 "push %eax\n\t"
3019 /* Reserve a bit of stack space for arguments. */
3020 "sub $0x10,%esp\n\t"
3021 /* Copy "top" to the second argument position. (Note that
3022 we can't assume function won't scribble on its
3023 arguments, so don't try to restore from this.) */
3024 "mov %eax,4(%esp)\n\t"
3025 "mov %ebx,8(%esp)");
3026 /* Put the first argument on the stack. */
3027 buildaddr = current_insn_ptr;
3028 i = 0;
3029 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3030 buf[i++] = 0x04;
3031 buf[i++] = 0x24;
3032 memcpy (&buf[i], &arg1, sizeof (arg1));
3033 i += 4;
3034 append_insns (&buildaddr, i, buf);
3035 current_insn_ptr = buildaddr;
3036 i386_emit_call (fn);
3037 EMIT_ASM32 (i386_void_call_2_b,
3038 "lea 0x10(%esp),%esp\n\t"
3039 /* Restore original stack top. */
3040 "pop %eax");
3041 }
3042
3043
3044 void
3045 i386_emit_eq_goto (int *offset_p, int *size_p)
3046 {
3047 EMIT_ASM32 (eq,
3048 /* Check low half first, more likely to be decider */
3049 "cmpl %eax,(%esp)\n\t"
3050 "jne .Leq_fallthru\n\t"
3051 "cmpl %ebx,4(%esp)\n\t"
3052 "jne .Leq_fallthru\n\t"
3053 "lea 0x8(%esp),%esp\n\t"
3054 "pop %eax\n\t"
3055 "pop %ebx\n\t"
3056 /* jmp, but don't trust the assembler to choose the right jump */
3057 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3058 ".Leq_fallthru:\n\t"
3059 "lea 0x8(%esp),%esp\n\t"
3060 "pop %eax\n\t"
3061 "pop %ebx");
3062
3063 if (offset_p)
3064 *offset_p = 18;
3065 if (size_p)
3066 *size_p = 4;
3067 }
3068
3069 void
3070 i386_emit_ne_goto (int *offset_p, int *size_p)
3071 {
3072 EMIT_ASM32 (ne,
3073 /* Check low half first, more likely to be decider */
3074 "cmpl %eax,(%esp)\n\t"
3075 "jne .Lne_jump\n\t"
3076 "cmpl %ebx,4(%esp)\n\t"
3077 "je .Lne_fallthru\n\t"
3078 ".Lne_jump:\n\t"
3079 "lea 0x8(%esp),%esp\n\t"
3080 "pop %eax\n\t"
3081 "pop %ebx\n\t"
3082 /* jmp, but don't trust the assembler to choose the right jump */
3083 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3084 ".Lne_fallthru:\n\t"
3085 "lea 0x8(%esp),%esp\n\t"
3086 "pop %eax\n\t"
3087 "pop %ebx");
3088
3089 if (offset_p)
3090 *offset_p = 18;
3091 if (size_p)
3092 *size_p = 4;
3093 }
3094
3095 void
3096 i386_emit_lt_goto (int *offset_p, int *size_p)
3097 {
3098 EMIT_ASM32 (lt,
3099 "cmpl %ebx,4(%esp)\n\t"
3100 "jl .Llt_jump\n\t"
3101 "jne .Llt_fallthru\n\t"
3102 "cmpl %eax,(%esp)\n\t"
3103 "jnl .Llt_fallthru\n\t"
3104 ".Llt_jump:\n\t"
3105 "lea 0x8(%esp),%esp\n\t"
3106 "pop %eax\n\t"
3107 "pop %ebx\n\t"
3108 /* jmp, but don't trust the assembler to choose the right jump */
3109 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3110 ".Llt_fallthru:\n\t"
3111 "lea 0x8(%esp),%esp\n\t"
3112 "pop %eax\n\t"
3113 "pop %ebx");
3114
3115 if (offset_p)
3116 *offset_p = 20;
3117 if (size_p)
3118 *size_p = 4;
3119 }
3120
3121 void
3122 i386_emit_le_goto (int *offset_p, int *size_p)
3123 {
3124 EMIT_ASM32 (le,
3125 "cmpl %ebx,4(%esp)\n\t"
3126 "jle .Lle_jump\n\t"
3127 "jne .Lle_fallthru\n\t"
3128 "cmpl %eax,(%esp)\n\t"
3129 "jnle .Lle_fallthru\n\t"
3130 ".Lle_jump:\n\t"
3131 "lea 0x8(%esp),%esp\n\t"
3132 "pop %eax\n\t"
3133 "pop %ebx\n\t"
3134 /* jmp, but don't trust the assembler to choose the right jump */
3135 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3136 ".Lle_fallthru:\n\t"
3137 "lea 0x8(%esp),%esp\n\t"
3138 "pop %eax\n\t"
3139 "pop %ebx");
3140
3141 if (offset_p)
3142 *offset_p = 20;
3143 if (size_p)
3144 *size_p = 4;
3145 }
3146
3147 void
3148 i386_emit_gt_goto (int *offset_p, int *size_p)
3149 {
3150 EMIT_ASM32 (gt,
3151 "cmpl %ebx,4(%esp)\n\t"
3152 "jg .Lgt_jump\n\t"
3153 "jne .Lgt_fallthru\n\t"
3154 "cmpl %eax,(%esp)\n\t"
3155 "jng .Lgt_fallthru\n\t"
3156 ".Lgt_jump:\n\t"
3157 "lea 0x8(%esp),%esp\n\t"
3158 "pop %eax\n\t"
3159 "pop %ebx\n\t"
3160 /* jmp, but don't trust the assembler to choose the right jump */
3161 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3162 ".Lgt_fallthru:\n\t"
3163 "lea 0x8(%esp),%esp\n\t"
3164 "pop %eax\n\t"
3165 "pop %ebx");
3166
3167 if (offset_p)
3168 *offset_p = 20;
3169 if (size_p)
3170 *size_p = 4;
3171 }
3172
3173 void
3174 i386_emit_ge_goto (int *offset_p, int *size_p)
3175 {
3176 EMIT_ASM32 (ge,
3177 "cmpl %ebx,4(%esp)\n\t"
3178 "jge .Lge_jump\n\t"
3179 "jne .Lge_fallthru\n\t"
3180 "cmpl %eax,(%esp)\n\t"
3181 "jnge .Lge_fallthru\n\t"
3182 ".Lge_jump:\n\t"
3183 "lea 0x8(%esp),%esp\n\t"
3184 "pop %eax\n\t"
3185 "pop %ebx\n\t"
3186 /* jmp, but don't trust the assembler to choose the right jump */
3187 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3188 ".Lge_fallthru:\n\t"
3189 "lea 0x8(%esp),%esp\n\t"
3190 "pop %eax\n\t"
3191 "pop %ebx");
3192
3193 if (offset_p)
3194 *offset_p = 20;
3195 if (size_p)
3196 *size_p = 4;
3197 }
3198
3199 struct emit_ops i386_emit_ops =
3200 {
3201 i386_emit_prologue,
3202 i386_emit_epilogue,
3203 i386_emit_add,
3204 i386_emit_sub,
3205 i386_emit_mul,
3206 i386_emit_lsh,
3207 i386_emit_rsh_signed,
3208 i386_emit_rsh_unsigned,
3209 i386_emit_ext,
3210 i386_emit_log_not,
3211 i386_emit_bit_and,
3212 i386_emit_bit_or,
3213 i386_emit_bit_xor,
3214 i386_emit_bit_not,
3215 i386_emit_equal,
3216 i386_emit_less_signed,
3217 i386_emit_less_unsigned,
3218 i386_emit_ref,
3219 i386_emit_if_goto,
3220 i386_emit_goto,
3221 i386_write_goto_address,
3222 i386_emit_const,
3223 i386_emit_call,
3224 i386_emit_reg,
3225 i386_emit_pop,
3226 i386_emit_stack_flush,
3227 i386_emit_zero_ext,
3228 i386_emit_swap,
3229 i386_emit_stack_adjust,
3230 i386_emit_int_call_1,
3231 i386_emit_void_call_2,
3232 i386_emit_eq_goto,
3233 i386_emit_ne_goto,
3234 i386_emit_lt_goto,
3235 i386_emit_le_goto,
3236 i386_emit_gt_goto,
3237 i386_emit_ge_goto
3238 };
3239
3240
3241 static struct emit_ops *
3242 x86_emit_ops (void)
3243 {
3244 #ifdef __x86_64__
3245 if (is_64bit_tdesc ())
3246 return &amd64_emit_ops;
3247 else
3248 #endif
3249 return &i386_emit_ops;
3250 }
3251
3252 /* Implementation of linux_target_ops method "sw_breakpoint_from_kind". */
3253
3254 static const gdb_byte *
3255 x86_sw_breakpoint_from_kind (int kind, int *size)
3256 {
3257 *size = x86_breakpoint_len;
3258 return x86_breakpoint;
3259 }
3260
3261 static int
3262 x86_supports_range_stepping (void)
3263 {
3264 return 1;
3265 }
3266
3267 /* Implementation of linux_target_ops method "supports_hardware_single_step".
3268 */
3269
3270 static int
3271 x86_supports_hardware_single_step (void)
3272 {
3273 return 1;
3274 }
3275
3276 /* This is initialized assuming an amd64 target.
3277 x86_arch_setup will correct it for i386 or amd64 targets. */
3278
3279 struct linux_target_ops the_low_target =
3280 {
3281 x86_arch_setup,
3282 x86_linux_regs_info,
3283 x86_cannot_fetch_register,
3284 x86_cannot_store_register,
3285 NULL, /* fetch_register */
3286 x86_get_pc,
3287 x86_set_pc,
3288 NULL, /* breakpoint_kind_from_pc */
3289 x86_sw_breakpoint_from_kind,
3290 NULL,
3291 1,
3292 x86_breakpoint_at,
3293 x86_supports_z_point_type,
3294 x86_insert_point,
3295 x86_remove_point,
3296 x86_stopped_by_watchpoint,
3297 x86_stopped_data_address,
3298 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3299 native i386 case (no registers smaller than an xfer unit), and are not
3300 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3301 NULL,
3302 NULL,
3303 /* need to fix up i386 siginfo if host is amd64 */
3304 x86_siginfo_fixup,
3305 x86_linux_new_process,
3306 x86_linux_new_thread,
3307 x86_linux_new_fork,
3308 x86_linux_prepare_to_resume,
3309 x86_linux_process_qsupported,
3310 x86_supports_tracepoints,
3311 x86_get_thread_area,
3312 x86_install_fast_tracepoint_jump_pad,
3313 x86_emit_ops,
3314 x86_get_min_fast_tracepoint_insn_len,
3315 x86_supports_range_stepping,
3316 NULL, /* breakpoint_kind_from_current_state */
3317 x86_supports_hardware_single_step,
3318 };
3319
3320 void
3321 initialize_low_arch (void)
3322 {
3323 /* Initialize the Linux target descriptions. */
3324 #ifdef __x86_64__
3325 init_registers_amd64_linux ();
3326 init_registers_amd64_avx_linux ();
3327 init_registers_amd64_avx512_linux ();
3328 init_registers_amd64_mpx_linux ();
3329
3330 init_registers_x32_linux ();
3331 init_registers_x32_avx_linux ();
3332 init_registers_x32_avx512_linux ();
3333
3334 tdesc_amd64_linux_no_xml = XNEW (struct target_desc);
3335 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3336 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3337 #endif
3338 init_registers_i386_linux ();
3339 init_registers_i386_mmx_linux ();
3340 init_registers_i386_avx_linux ();
3341 init_registers_i386_avx512_linux ();
3342 init_registers_i386_mpx_linux ();
3343
3344 tdesc_i386_linux_no_xml = XNEW (struct target_desc);
3345 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3346 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3347
3348 initialize_regsets_info (&x86_regsets_info);
3349 }