]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-x86-low.c
Change signature of linux_target_ops.new_thread
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "x86-xstate.h"
28
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
32 #ifndef ELFMAG0
33 #include "elf/common.h"
34 #endif
35
36 #include "agent.h"
37 #include "tdesc.h"
38 #include "tracepoint.h"
39 #include "ax.h"
40 #include "nat/linux-nat.h"
41
42 #ifdef __x86_64__
43 /* Defined in auto-generated file amd64-linux.c. */
44 void init_registers_amd64_linux (void);
45 extern const struct target_desc *tdesc_amd64_linux;
46
47 /* Defined in auto-generated file amd64-avx-linux.c. */
48 void init_registers_amd64_avx_linux (void);
49 extern const struct target_desc *tdesc_amd64_avx_linux;
50
51 /* Defined in auto-generated file amd64-avx512-linux.c. */
52 void init_registers_amd64_avx512_linux (void);
53 extern const struct target_desc *tdesc_amd64_avx512_linux;
54
55 /* Defined in auto-generated file amd64-mpx-linux.c. */
56 void init_registers_amd64_mpx_linux (void);
57 extern const struct target_desc *tdesc_amd64_mpx_linux;
58
59 /* Defined in auto-generated file x32-linux.c. */
60 void init_registers_x32_linux (void);
61 extern const struct target_desc *tdesc_x32_linux;
62
63 /* Defined in auto-generated file x32-avx-linux.c. */
64 void init_registers_x32_avx_linux (void);
65 extern const struct target_desc *tdesc_x32_avx_linux;
66
67 /* Defined in auto-generated file x32-avx512-linux.c. */
68 void init_registers_x32_avx512_linux (void);
69 extern const struct target_desc *tdesc_x32_avx512_linux;
70
71 #endif
72
73 /* Defined in auto-generated file i386-linux.c. */
74 void init_registers_i386_linux (void);
75 extern const struct target_desc *tdesc_i386_linux;
76
77 /* Defined in auto-generated file i386-mmx-linux.c. */
78 void init_registers_i386_mmx_linux (void);
79 extern const struct target_desc *tdesc_i386_mmx_linux;
80
81 /* Defined in auto-generated file i386-avx-linux.c. */
82 void init_registers_i386_avx_linux (void);
83 extern const struct target_desc *tdesc_i386_avx_linux;
84
85 /* Defined in auto-generated file i386-avx512-linux.c. */
86 void init_registers_i386_avx512_linux (void);
87 extern const struct target_desc *tdesc_i386_avx512_linux;
88
89 /* Defined in auto-generated file i386-mpx-linux.c. */
90 void init_registers_i386_mpx_linux (void);
91 extern const struct target_desc *tdesc_i386_mpx_linux;
92
93 #ifdef __x86_64__
94 static struct target_desc *tdesc_amd64_linux_no_xml;
95 #endif
96 static struct target_desc *tdesc_i386_linux_no_xml;
97
98
99 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
100 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
101
102 /* Backward compatibility for gdb without XML support. */
103
104 static const char *xmltarget_i386_linux_no_xml = "@<target>\
105 <architecture>i386</architecture>\
106 <osabi>GNU/Linux</osabi>\
107 </target>";
108
109 #ifdef __x86_64__
110 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
111 <architecture>i386:x86-64</architecture>\
112 <osabi>GNU/Linux</osabi>\
113 </target>";
114 #endif
115
116 #include <sys/reg.h>
117 #include <sys/procfs.h>
118 #include <sys/ptrace.h>
119 #include <sys/uio.h>
120
121 #ifndef PTRACE_GETREGSET
122 #define PTRACE_GETREGSET 0x4204
123 #endif
124
125 #ifndef PTRACE_SETREGSET
126 #define PTRACE_SETREGSET 0x4205
127 #endif
128
129
130 #ifndef PTRACE_GET_THREAD_AREA
131 #define PTRACE_GET_THREAD_AREA 25
132 #endif
133
134 /* This definition comes from prctl.h, but some kernels may not have it. */
135 #ifndef PTRACE_ARCH_PRCTL
136 #define PTRACE_ARCH_PRCTL 30
137 #endif
138
139 /* The following definitions come from prctl.h, but may be absent
140 for certain configurations. */
141 #ifndef ARCH_GET_FS
142 #define ARCH_SET_GS 0x1001
143 #define ARCH_SET_FS 0x1002
144 #define ARCH_GET_FS 0x1003
145 #define ARCH_GET_GS 0x1004
146 #endif
147
148 /* Per-process arch-specific data we want to keep. */
149
150 struct arch_process_info
151 {
152 struct x86_debug_reg_state debug_reg_state;
153 };
154
155 /* Per-thread arch-specific data we want to keep. */
156
157 struct arch_lwp_info
158 {
159 /* Non-zero if our copy differs from what's recorded in the thread. */
160 int debug_registers_changed;
161 };
162
163 #ifdef __x86_64__
164
165 /* Mapping between the general-purpose registers in `struct user'
166 format and GDB's register array layout.
167 Note that the transfer layout uses 64-bit regs. */
168 static /*const*/ int i386_regmap[] =
169 {
170 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
171 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
172 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
173 DS * 8, ES * 8, FS * 8, GS * 8
174 };
175
176 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
177
178 /* So code below doesn't have to care, i386 or amd64. */
179 #define ORIG_EAX ORIG_RAX
180 #define REGSIZE 8
181
182 static const int x86_64_regmap[] =
183 {
184 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
185 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
186 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
187 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
188 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
189 DS * 8, ES * 8, FS * 8, GS * 8,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
192 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1,
194 -1, -1, -1, -1, -1, -1, -1, -1,
195 ORIG_RAX * 8,
196 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
197 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
198 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
201 -1, -1, -1, -1, -1, -1, -1, -1,
202 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1,
206 -1, -1, -1, -1, -1, -1, -1, -1
207 };
208
209 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
210 #define X86_64_USER_REGS (GS + 1)
211
212 #else /* ! __x86_64__ */
213
214 /* Mapping between the general-purpose registers in `struct user'
215 format and GDB's register array layout. */
216 static /*const*/ int i386_regmap[] =
217 {
218 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
219 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
220 EIP * 4, EFL * 4, CS * 4, SS * 4,
221 DS * 4, ES * 4, FS * 4, GS * 4
222 };
223
224 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
225
226 #define REGSIZE 4
227
228 #endif
229
230 #ifdef __x86_64__
231
232 /* Returns true if the current inferior belongs to a x86-64 process,
233 per the tdesc. */
234
235 static int
236 is_64bit_tdesc (void)
237 {
238 struct regcache *regcache = get_thread_regcache (current_thread, 0);
239
240 return register_size (regcache->tdesc, 0) == 8;
241 }
242
243 #endif
244
245 \f
246 /* Called by libthread_db. */
247
248 ps_err_e
249 ps_get_thread_area (const struct ps_prochandle *ph,
250 lwpid_t lwpid, int idx, void **base)
251 {
252 #ifdef __x86_64__
253 int use_64bit = is_64bit_tdesc ();
254
255 if (use_64bit)
256 {
257 switch (idx)
258 {
259 case FS:
260 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
261 return PS_OK;
262 break;
263 case GS:
264 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
265 return PS_OK;
266 break;
267 default:
268 return PS_BADADDR;
269 }
270 return PS_ERR;
271 }
272 #endif
273
274 {
275 unsigned int desc[4];
276
277 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
278 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
279 return PS_ERR;
280
281 /* Ensure we properly extend the value to 64-bits for x86_64. */
282 *base = (void *) (uintptr_t) desc[1];
283 return PS_OK;
284 }
285 }
286
287 /* Get the thread area address. This is used to recognize which
288 thread is which when tracing with the in-process agent library. We
289 don't read anything from the address, and treat it as opaque; it's
290 the address itself that we assume is unique per-thread. */
291
292 static int
293 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
294 {
295 #ifdef __x86_64__
296 int use_64bit = is_64bit_tdesc ();
297
298 if (use_64bit)
299 {
300 void *base;
301 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
302 {
303 *addr = (CORE_ADDR) (uintptr_t) base;
304 return 0;
305 }
306
307 return -1;
308 }
309 #endif
310
311 {
312 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
313 struct thread_info *thr = get_lwp_thread (lwp);
314 struct regcache *regcache = get_thread_regcache (thr, 1);
315 unsigned int desc[4];
316 ULONGEST gs = 0;
317 const int reg_thread_area = 3; /* bits to scale down register value. */
318 int idx;
319
320 collect_register_by_name (regcache, "gs", &gs);
321
322 idx = gs >> reg_thread_area;
323
324 if (ptrace (PTRACE_GET_THREAD_AREA,
325 lwpid_of (thr),
326 (void *) (long) idx, (unsigned long) &desc) < 0)
327 return -1;
328
329 *addr = desc[1];
330 return 0;
331 }
332 }
333
334
335 \f
336 static int
337 x86_cannot_store_register (int regno)
338 {
339 #ifdef __x86_64__
340 if (is_64bit_tdesc ())
341 return 0;
342 #endif
343
344 return regno >= I386_NUM_REGS;
345 }
346
347 static int
348 x86_cannot_fetch_register (int regno)
349 {
350 #ifdef __x86_64__
351 if (is_64bit_tdesc ())
352 return 0;
353 #endif
354
355 return regno >= I386_NUM_REGS;
356 }
357
358 static void
359 x86_fill_gregset (struct regcache *regcache, void *buf)
360 {
361 int i;
362
363 #ifdef __x86_64__
364 if (register_size (regcache->tdesc, 0) == 8)
365 {
366 for (i = 0; i < X86_64_NUM_REGS; i++)
367 if (x86_64_regmap[i] != -1)
368 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
369 return;
370 }
371
372 /* 32-bit inferior registers need to be zero-extended.
373 Callers would read uninitialized memory otherwise. */
374 memset (buf, 0x00, X86_64_USER_REGS * 8);
375 #endif
376
377 for (i = 0; i < I386_NUM_REGS; i++)
378 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
379
380 collect_register_by_name (regcache, "orig_eax",
381 ((char *) buf) + ORIG_EAX * REGSIZE);
382 }
383
384 static void
385 x86_store_gregset (struct regcache *regcache, const void *buf)
386 {
387 int i;
388
389 #ifdef __x86_64__
390 if (register_size (regcache->tdesc, 0) == 8)
391 {
392 for (i = 0; i < X86_64_NUM_REGS; i++)
393 if (x86_64_regmap[i] != -1)
394 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
395 return;
396 }
397 #endif
398
399 for (i = 0; i < I386_NUM_REGS; i++)
400 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
401
402 supply_register_by_name (regcache, "orig_eax",
403 ((char *) buf) + ORIG_EAX * REGSIZE);
404 }
405
406 static void
407 x86_fill_fpregset (struct regcache *regcache, void *buf)
408 {
409 #ifdef __x86_64__
410 i387_cache_to_fxsave (regcache, buf);
411 #else
412 i387_cache_to_fsave (regcache, buf);
413 #endif
414 }
415
416 static void
417 x86_store_fpregset (struct regcache *regcache, const void *buf)
418 {
419 #ifdef __x86_64__
420 i387_fxsave_to_cache (regcache, buf);
421 #else
422 i387_fsave_to_cache (regcache, buf);
423 #endif
424 }
425
426 #ifndef __x86_64__
427
428 static void
429 x86_fill_fpxregset (struct regcache *regcache, void *buf)
430 {
431 i387_cache_to_fxsave (regcache, buf);
432 }
433
434 static void
435 x86_store_fpxregset (struct regcache *regcache, const void *buf)
436 {
437 i387_fxsave_to_cache (regcache, buf);
438 }
439
440 #endif
441
442 static void
443 x86_fill_xstateregset (struct regcache *regcache, void *buf)
444 {
445 i387_cache_to_xsave (regcache, buf);
446 }
447
448 static void
449 x86_store_xstateregset (struct regcache *regcache, const void *buf)
450 {
451 i387_xsave_to_cache (regcache, buf);
452 }
453
454 /* ??? The non-biarch i386 case stores all the i387 regs twice.
455 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
456 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
457 doesn't work. IWBN to avoid the duplication in the case where it
458 does work. Maybe the arch_setup routine could check whether it works
459 and update the supported regsets accordingly. */
460
461 static struct regset_info x86_regsets[] =
462 {
463 #ifdef HAVE_PTRACE_GETREGS
464 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
465 GENERAL_REGS,
466 x86_fill_gregset, x86_store_gregset },
467 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
468 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
469 # ifndef __x86_64__
470 # ifdef HAVE_PTRACE_GETFPXREGS
471 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
472 EXTENDED_REGS,
473 x86_fill_fpxregset, x86_store_fpxregset },
474 # endif
475 # endif
476 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
477 FP_REGS,
478 x86_fill_fpregset, x86_store_fpregset },
479 #endif /* HAVE_PTRACE_GETREGS */
480 { 0, 0, 0, -1, -1, NULL, NULL }
481 };
482
483 static CORE_ADDR
484 x86_get_pc (struct regcache *regcache)
485 {
486 int use_64bit = register_size (regcache->tdesc, 0) == 8;
487
488 if (use_64bit)
489 {
490 unsigned long pc;
491 collect_register_by_name (regcache, "rip", &pc);
492 return (CORE_ADDR) pc;
493 }
494 else
495 {
496 unsigned int pc;
497 collect_register_by_name (regcache, "eip", &pc);
498 return (CORE_ADDR) pc;
499 }
500 }
501
502 static void
503 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
504 {
505 int use_64bit = register_size (regcache->tdesc, 0) == 8;
506
507 if (use_64bit)
508 {
509 unsigned long newpc = pc;
510 supply_register_by_name (regcache, "rip", &newpc);
511 }
512 else
513 {
514 unsigned int newpc = pc;
515 supply_register_by_name (regcache, "eip", &newpc);
516 }
517 }
518 \f
519 static const unsigned char x86_breakpoint[] = { 0xCC };
520 #define x86_breakpoint_len 1
521
522 static int
523 x86_breakpoint_at (CORE_ADDR pc)
524 {
525 unsigned char c;
526
527 (*the_target->read_memory) (pc, &c, 1);
528 if (c == 0xCC)
529 return 1;
530
531 return 0;
532 }
533 \f
534
535 /* Return the offset of REGNUM in the u_debugreg field of struct
536 user. */
537
538 static int
539 u_debugreg_offset (int regnum)
540 {
541 return (offsetof (struct user, u_debugreg)
542 + sizeof (((struct user *) 0)->u_debugreg[0]) * regnum);
543 }
544
545
546 /* Support for debug registers. */
547
548 static unsigned long
549 x86_linux_dr_get (ptid_t ptid, int regnum)
550 {
551 int tid;
552 unsigned long value;
553
554 tid = ptid_get_lwp (ptid);
555
556 errno = 0;
557 value = ptrace (PTRACE_PEEKUSER, tid, u_debugreg_offset (regnum), 0);
558 if (errno != 0)
559 error ("Couldn't read debug register");
560
561 return value;
562 }
563
564 static void
565 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
566 {
567 int tid;
568
569 tid = ptid_get_lwp (ptid);
570
571 errno = 0;
572 ptrace (PTRACE_POKEUSER, tid, u_debugreg_offset (regnum), value);
573 if (errno != 0)
574 error ("Couldn't write debug register");
575 }
576
577 static int
578 update_debug_registers_callback (struct lwp_info *lwp, void *arg)
579 {
580 /* The actual update is done later just before resuming the lwp,
581 we just mark that the registers need updating. */
582 lwp->arch_private->debug_registers_changed = 1;
583
584 /* If the lwp isn't stopped, force it to momentarily pause, so
585 we can update its debug registers. */
586 if (!lwp_is_stopped (lwp))
587 linux_stop_lwp (lwp);
588
589 return 0;
590 }
591
592 /* Update the inferior's debug register REGNUM from STATE. */
593
594 static void
595 x86_dr_low_set_addr (int regnum, CORE_ADDR addr)
596 {
597 /* Only update the threads of this process. */
598 ptid_t pid_ptid = pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
599
600 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
601
602 iterate_over_lwps (pid_ptid, update_debug_registers_callback, NULL);
603 }
604
605 /* Return the inferior's debug register REGNUM. */
606
607 static CORE_ADDR
608 x86_dr_low_get_addr (int regnum)
609 {
610 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
611
612 return x86_linux_dr_get (current_lwp_ptid (), regnum);
613 }
614
615 /* Update the inferior's DR7 debug control register from STATE. */
616
617 static void
618 x86_dr_low_set_control (unsigned long control)
619 {
620 /* Only update the threads of this process. */
621 ptid_t pid_ptid = pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
622
623 iterate_over_lwps (pid_ptid, update_debug_registers_callback, NULL);
624 }
625
626 /* Return the inferior's DR7 debug control register. */
627
628 static unsigned long
629 x86_dr_low_get_control (void)
630 {
631 return x86_linux_dr_get (current_lwp_ptid (), DR_CONTROL);
632 }
633
634 /* Get the value of the DR6 debug status register from the inferior
635 and record it in STATE. */
636
637 static unsigned long
638 x86_dr_low_get_status (void)
639 {
640 return x86_linux_dr_get (current_lwp_ptid (), DR_STATUS);
641 }
642
643 /* Low-level function vector. */
644 struct x86_dr_low_type x86_dr_low =
645 {
646 x86_dr_low_set_control,
647 x86_dr_low_set_addr,
648 x86_dr_low_get_addr,
649 x86_dr_low_get_status,
650 x86_dr_low_get_control,
651 sizeof (void *),
652 };
653 \f
654 /* Breakpoint/Watchpoint support. */
655
656 static int
657 x86_supports_z_point_type (char z_type)
658 {
659 switch (z_type)
660 {
661 case Z_PACKET_SW_BP:
662 case Z_PACKET_HW_BP:
663 case Z_PACKET_WRITE_WP:
664 case Z_PACKET_ACCESS_WP:
665 return 1;
666 default:
667 return 0;
668 }
669 }
670
671 static int
672 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
673 int size, struct raw_breakpoint *bp)
674 {
675 struct process_info *proc = current_process ();
676
677 switch (type)
678 {
679 case raw_bkpt_type_sw:
680 return insert_memory_breakpoint (bp);
681
682 case raw_bkpt_type_hw:
683 case raw_bkpt_type_write_wp:
684 case raw_bkpt_type_access_wp:
685 {
686 enum target_hw_bp_type hw_type
687 = raw_bkpt_type_to_target_hw_bp_type (type);
688 struct x86_debug_reg_state *state
689 = &proc->priv->arch_private->debug_reg_state;
690
691 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
692 }
693
694 default:
695 /* Unsupported. */
696 return 1;
697 }
698 }
699
700 static int
701 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
702 int size, struct raw_breakpoint *bp)
703 {
704 struct process_info *proc = current_process ();
705
706 switch (type)
707 {
708 case raw_bkpt_type_sw:
709 return remove_memory_breakpoint (bp);
710
711 case raw_bkpt_type_hw:
712 case raw_bkpt_type_write_wp:
713 case raw_bkpt_type_access_wp:
714 {
715 enum target_hw_bp_type hw_type
716 = raw_bkpt_type_to_target_hw_bp_type (type);
717 struct x86_debug_reg_state *state
718 = &proc->priv->arch_private->debug_reg_state;
719
720 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
721 }
722 default:
723 /* Unsupported. */
724 return 1;
725 }
726 }
727
728 static int
729 x86_stopped_by_watchpoint (void)
730 {
731 struct process_info *proc = current_process ();
732 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
733 }
734
735 static CORE_ADDR
736 x86_stopped_data_address (void)
737 {
738 struct process_info *proc = current_process ();
739 CORE_ADDR addr;
740 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
741 &addr))
742 return addr;
743 return 0;
744 }
745 \f
746 /* Called when a new process is created. */
747
748 static struct arch_process_info *
749 x86_linux_new_process (void)
750 {
751 struct arch_process_info *info = XCNEW (struct arch_process_info);
752
753 x86_low_init_dregs (&info->debug_reg_state);
754
755 return info;
756 }
757
758 /* Called when a new thread is detected. */
759
760 static void
761 x86_linux_new_thread (struct lwp_info *lwp)
762 {
763 struct arch_lwp_info *info = XCNEW (struct arch_lwp_info);
764
765 info->debug_registers_changed = 1;
766
767 lwp->arch_private = info;
768 }
769
770 /* See nat/x86-dregs.h. */
771
772 struct x86_debug_reg_state *
773 x86_debug_reg_state (pid_t pid)
774 {
775 struct process_info *proc = find_process_pid (pid);
776
777 return &proc->priv->arch_private->debug_reg_state;
778 }
779
780 /* Called when resuming a thread.
781 If the debug regs have changed, update the thread's copies. */
782
783 static void
784 x86_linux_prepare_to_resume (struct lwp_info *lwp)
785 {
786 ptid_t ptid = ptid_of_lwp (lwp);
787 int clear_status = 0;
788
789 if (lwp->arch_private->debug_registers_changed)
790 {
791 struct x86_debug_reg_state *state
792 = x86_debug_reg_state (ptid_get_pid (ptid));
793 int i;
794
795 x86_linux_dr_set (ptid, DR_CONTROL, 0);
796
797 ALL_DEBUG_ADDRESS_REGISTERS (i)
798 if (state->dr_ref_count[i] > 0)
799 {
800 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
801
802 /* If we're setting a watchpoint, any change the inferior
803 had done itself to the debug registers needs to be
804 discarded, otherwise, x86_dr_stopped_data_address can
805 get confused. */
806 clear_status = 1;
807 }
808
809 if (state->dr_control_mirror != 0)
810 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
811
812 lwp->arch_private->debug_registers_changed = 0;
813 }
814
815 if (clear_status
816 || lwp_stop_reason (lwp) == TARGET_STOPPED_BY_WATCHPOINT)
817 x86_linux_dr_set (ptid, DR_STATUS, 0);
818 }
819 \f
820 /* When GDBSERVER is built as a 64-bit application on linux, the
821 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
822 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
823 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
824 conversion in-place ourselves. */
825
826 /* These types below (compat_*) define a siginfo type that is layout
827 compatible with the siginfo type exported by the 32-bit userspace
828 support. */
829
830 #ifdef __x86_64__
831
832 typedef int compat_int_t;
833 typedef unsigned int compat_uptr_t;
834
835 typedef int compat_time_t;
836 typedef int compat_timer_t;
837 typedef int compat_clock_t;
838
839 struct compat_timeval
840 {
841 compat_time_t tv_sec;
842 int tv_usec;
843 };
844
845 typedef union compat_sigval
846 {
847 compat_int_t sival_int;
848 compat_uptr_t sival_ptr;
849 } compat_sigval_t;
850
851 typedef struct compat_siginfo
852 {
853 int si_signo;
854 int si_errno;
855 int si_code;
856
857 union
858 {
859 int _pad[((128 / sizeof (int)) - 3)];
860
861 /* kill() */
862 struct
863 {
864 unsigned int _pid;
865 unsigned int _uid;
866 } _kill;
867
868 /* POSIX.1b timers */
869 struct
870 {
871 compat_timer_t _tid;
872 int _overrun;
873 compat_sigval_t _sigval;
874 } _timer;
875
876 /* POSIX.1b signals */
877 struct
878 {
879 unsigned int _pid;
880 unsigned int _uid;
881 compat_sigval_t _sigval;
882 } _rt;
883
884 /* SIGCHLD */
885 struct
886 {
887 unsigned int _pid;
888 unsigned int _uid;
889 int _status;
890 compat_clock_t _utime;
891 compat_clock_t _stime;
892 } _sigchld;
893
894 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
895 struct
896 {
897 unsigned int _addr;
898 } _sigfault;
899
900 /* SIGPOLL */
901 struct
902 {
903 int _band;
904 int _fd;
905 } _sigpoll;
906 } _sifields;
907 } compat_siginfo_t;
908
909 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
910 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
911
912 typedef struct compat_x32_siginfo
913 {
914 int si_signo;
915 int si_errno;
916 int si_code;
917
918 union
919 {
920 int _pad[((128 / sizeof (int)) - 3)];
921
922 /* kill() */
923 struct
924 {
925 unsigned int _pid;
926 unsigned int _uid;
927 } _kill;
928
929 /* POSIX.1b timers */
930 struct
931 {
932 compat_timer_t _tid;
933 int _overrun;
934 compat_sigval_t _sigval;
935 } _timer;
936
937 /* POSIX.1b signals */
938 struct
939 {
940 unsigned int _pid;
941 unsigned int _uid;
942 compat_sigval_t _sigval;
943 } _rt;
944
945 /* SIGCHLD */
946 struct
947 {
948 unsigned int _pid;
949 unsigned int _uid;
950 int _status;
951 compat_x32_clock_t _utime;
952 compat_x32_clock_t _stime;
953 } _sigchld;
954
955 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
956 struct
957 {
958 unsigned int _addr;
959 } _sigfault;
960
961 /* SIGPOLL */
962 struct
963 {
964 int _band;
965 int _fd;
966 } _sigpoll;
967 } _sifields;
968 } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
969
970 #define cpt_si_pid _sifields._kill._pid
971 #define cpt_si_uid _sifields._kill._uid
972 #define cpt_si_timerid _sifields._timer._tid
973 #define cpt_si_overrun _sifields._timer._overrun
974 #define cpt_si_status _sifields._sigchld._status
975 #define cpt_si_utime _sifields._sigchld._utime
976 #define cpt_si_stime _sifields._sigchld._stime
977 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
978 #define cpt_si_addr _sifields._sigfault._addr
979 #define cpt_si_band _sifields._sigpoll._band
980 #define cpt_si_fd _sifields._sigpoll._fd
981
982 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
983 In their place is si_timer1,si_timer2. */
984 #ifndef si_timerid
985 #define si_timerid si_timer1
986 #endif
987 #ifndef si_overrun
988 #define si_overrun si_timer2
989 #endif
990
991 static void
992 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
993 {
994 memset (to, 0, sizeof (*to));
995
996 to->si_signo = from->si_signo;
997 to->si_errno = from->si_errno;
998 to->si_code = from->si_code;
999
1000 if (to->si_code == SI_TIMER)
1001 {
1002 to->cpt_si_timerid = from->si_timerid;
1003 to->cpt_si_overrun = from->si_overrun;
1004 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1005 }
1006 else if (to->si_code == SI_USER)
1007 {
1008 to->cpt_si_pid = from->si_pid;
1009 to->cpt_si_uid = from->si_uid;
1010 }
1011 else if (to->si_code < 0)
1012 {
1013 to->cpt_si_pid = from->si_pid;
1014 to->cpt_si_uid = from->si_uid;
1015 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1016 }
1017 else
1018 {
1019 switch (to->si_signo)
1020 {
1021 case SIGCHLD:
1022 to->cpt_si_pid = from->si_pid;
1023 to->cpt_si_uid = from->si_uid;
1024 to->cpt_si_status = from->si_status;
1025 to->cpt_si_utime = from->si_utime;
1026 to->cpt_si_stime = from->si_stime;
1027 break;
1028 case SIGILL:
1029 case SIGFPE:
1030 case SIGSEGV:
1031 case SIGBUS:
1032 to->cpt_si_addr = (intptr_t) from->si_addr;
1033 break;
1034 case SIGPOLL:
1035 to->cpt_si_band = from->si_band;
1036 to->cpt_si_fd = from->si_fd;
1037 break;
1038 default:
1039 to->cpt_si_pid = from->si_pid;
1040 to->cpt_si_uid = from->si_uid;
1041 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1042 break;
1043 }
1044 }
1045 }
1046
1047 static void
1048 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
1049 {
1050 memset (to, 0, sizeof (*to));
1051
1052 to->si_signo = from->si_signo;
1053 to->si_errno = from->si_errno;
1054 to->si_code = from->si_code;
1055
1056 if (to->si_code == SI_TIMER)
1057 {
1058 to->si_timerid = from->cpt_si_timerid;
1059 to->si_overrun = from->cpt_si_overrun;
1060 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1061 }
1062 else if (to->si_code == SI_USER)
1063 {
1064 to->si_pid = from->cpt_si_pid;
1065 to->si_uid = from->cpt_si_uid;
1066 }
1067 else if (to->si_code < 0)
1068 {
1069 to->si_pid = from->cpt_si_pid;
1070 to->si_uid = from->cpt_si_uid;
1071 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1072 }
1073 else
1074 {
1075 switch (to->si_signo)
1076 {
1077 case SIGCHLD:
1078 to->si_pid = from->cpt_si_pid;
1079 to->si_uid = from->cpt_si_uid;
1080 to->si_status = from->cpt_si_status;
1081 to->si_utime = from->cpt_si_utime;
1082 to->si_stime = from->cpt_si_stime;
1083 break;
1084 case SIGILL:
1085 case SIGFPE:
1086 case SIGSEGV:
1087 case SIGBUS:
1088 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1089 break;
1090 case SIGPOLL:
1091 to->si_band = from->cpt_si_band;
1092 to->si_fd = from->cpt_si_fd;
1093 break;
1094 default:
1095 to->si_pid = from->cpt_si_pid;
1096 to->si_uid = from->cpt_si_uid;
1097 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1098 break;
1099 }
1100 }
1101 }
1102
1103 static void
1104 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1105 siginfo_t *from)
1106 {
1107 memset (to, 0, sizeof (*to));
1108
1109 to->si_signo = from->si_signo;
1110 to->si_errno = from->si_errno;
1111 to->si_code = from->si_code;
1112
1113 if (to->si_code == SI_TIMER)
1114 {
1115 to->cpt_si_timerid = from->si_timerid;
1116 to->cpt_si_overrun = from->si_overrun;
1117 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1118 }
1119 else if (to->si_code == SI_USER)
1120 {
1121 to->cpt_si_pid = from->si_pid;
1122 to->cpt_si_uid = from->si_uid;
1123 }
1124 else if (to->si_code < 0)
1125 {
1126 to->cpt_si_pid = from->si_pid;
1127 to->cpt_si_uid = from->si_uid;
1128 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1129 }
1130 else
1131 {
1132 switch (to->si_signo)
1133 {
1134 case SIGCHLD:
1135 to->cpt_si_pid = from->si_pid;
1136 to->cpt_si_uid = from->si_uid;
1137 to->cpt_si_status = from->si_status;
1138 to->cpt_si_utime = from->si_utime;
1139 to->cpt_si_stime = from->si_stime;
1140 break;
1141 case SIGILL:
1142 case SIGFPE:
1143 case SIGSEGV:
1144 case SIGBUS:
1145 to->cpt_si_addr = (intptr_t) from->si_addr;
1146 break;
1147 case SIGPOLL:
1148 to->cpt_si_band = from->si_band;
1149 to->cpt_si_fd = from->si_fd;
1150 break;
1151 default:
1152 to->cpt_si_pid = from->si_pid;
1153 to->cpt_si_uid = from->si_uid;
1154 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1155 break;
1156 }
1157 }
1158 }
1159
1160 static void
1161 siginfo_from_compat_x32_siginfo (siginfo_t *to,
1162 compat_x32_siginfo_t *from)
1163 {
1164 memset (to, 0, sizeof (*to));
1165
1166 to->si_signo = from->si_signo;
1167 to->si_errno = from->si_errno;
1168 to->si_code = from->si_code;
1169
1170 if (to->si_code == SI_TIMER)
1171 {
1172 to->si_timerid = from->cpt_si_timerid;
1173 to->si_overrun = from->cpt_si_overrun;
1174 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1175 }
1176 else if (to->si_code == SI_USER)
1177 {
1178 to->si_pid = from->cpt_si_pid;
1179 to->si_uid = from->cpt_si_uid;
1180 }
1181 else if (to->si_code < 0)
1182 {
1183 to->si_pid = from->cpt_si_pid;
1184 to->si_uid = from->cpt_si_uid;
1185 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1186 }
1187 else
1188 {
1189 switch (to->si_signo)
1190 {
1191 case SIGCHLD:
1192 to->si_pid = from->cpt_si_pid;
1193 to->si_uid = from->cpt_si_uid;
1194 to->si_status = from->cpt_si_status;
1195 to->si_utime = from->cpt_si_utime;
1196 to->si_stime = from->cpt_si_stime;
1197 break;
1198 case SIGILL:
1199 case SIGFPE:
1200 case SIGSEGV:
1201 case SIGBUS:
1202 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1203 break;
1204 case SIGPOLL:
1205 to->si_band = from->cpt_si_band;
1206 to->si_fd = from->cpt_si_fd;
1207 break;
1208 default:
1209 to->si_pid = from->cpt_si_pid;
1210 to->si_uid = from->cpt_si_uid;
1211 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1212 break;
1213 }
1214 }
1215 }
1216
1217 #endif /* __x86_64__ */
1218
1219 /* Convert a native/host siginfo object, into/from the siginfo in the
1220 layout of the inferiors' architecture. Returns true if any
1221 conversion was done; false otherwise. If DIRECTION is 1, then copy
1222 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1223 INF. */
1224
1225 static int
1226 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
1227 {
1228 #ifdef __x86_64__
1229 unsigned int machine;
1230 int tid = lwpid_of (current_thread);
1231 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1232
1233 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1234 if (!is_64bit_tdesc ())
1235 {
1236 gdb_assert (sizeof (siginfo_t) == sizeof (compat_siginfo_t));
1237
1238 if (direction == 0)
1239 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1240 else
1241 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1242
1243 return 1;
1244 }
1245 /* No fixup for native x32 GDB. */
1246 else if (!is_elf64 && sizeof (void *) == 8)
1247 {
1248 gdb_assert (sizeof (siginfo_t) == sizeof (compat_x32_siginfo_t));
1249
1250 if (direction == 0)
1251 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1252 native);
1253 else
1254 siginfo_from_compat_x32_siginfo (native,
1255 (struct compat_x32_siginfo *) inf);
1256
1257 return 1;
1258 }
1259 #endif
1260
1261 return 0;
1262 }
1263 \f
1264 static int use_xml;
1265
1266 /* Format of XSAVE extended state is:
1267 struct
1268 {
1269 fxsave_bytes[0..463]
1270 sw_usable_bytes[464..511]
1271 xstate_hdr_bytes[512..575]
1272 avx_bytes[576..831]
1273 future_state etc
1274 };
1275
1276 Same memory layout will be used for the coredump NT_X86_XSTATE
1277 representing the XSAVE extended state registers.
1278
1279 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1280 extended state mask, which is the same as the extended control register
1281 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1282 together with the mask saved in the xstate_hdr_bytes to determine what
1283 states the processor/OS supports and what state, used or initialized,
1284 the process/thread is in. */
1285 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1286
1287 /* Does the current host support the GETFPXREGS request? The header
1288 file may or may not define it, and even if it is defined, the
1289 kernel will return EIO if it's running on a pre-SSE processor. */
1290 int have_ptrace_getfpxregs =
1291 #ifdef HAVE_PTRACE_GETFPXREGS
1292 -1
1293 #else
1294 0
1295 #endif
1296 ;
1297
1298 /* Does the current host support PTRACE_GETREGSET? */
1299 static int have_ptrace_getregset = -1;
1300
1301 /* Get Linux/x86 target description from running target. */
1302
1303 static const struct target_desc *
1304 x86_linux_read_description (void)
1305 {
1306 unsigned int machine;
1307 int is_elf64;
1308 int xcr0_features;
1309 int tid;
1310 static uint64_t xcr0;
1311 struct regset_info *regset;
1312
1313 tid = lwpid_of (current_thread);
1314
1315 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1316
1317 if (sizeof (void *) == 4)
1318 {
1319 if (is_elf64 > 0)
1320 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1321 #ifndef __x86_64__
1322 else if (machine == EM_X86_64)
1323 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1324 #endif
1325 }
1326
1327 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1328 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1329 {
1330 elf_fpxregset_t fpxregs;
1331
1332 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
1333 {
1334 have_ptrace_getfpxregs = 0;
1335 have_ptrace_getregset = 0;
1336 return tdesc_i386_mmx_linux;
1337 }
1338 else
1339 have_ptrace_getfpxregs = 1;
1340 }
1341 #endif
1342
1343 if (!use_xml)
1344 {
1345 x86_xcr0 = X86_XSTATE_SSE_MASK;
1346
1347 /* Don't use XML. */
1348 #ifdef __x86_64__
1349 if (machine == EM_X86_64)
1350 return tdesc_amd64_linux_no_xml;
1351 else
1352 #endif
1353 return tdesc_i386_linux_no_xml;
1354 }
1355
1356 if (have_ptrace_getregset == -1)
1357 {
1358 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1359 struct iovec iov;
1360
1361 iov.iov_base = xstateregs;
1362 iov.iov_len = sizeof (xstateregs);
1363
1364 /* Check if PTRACE_GETREGSET works. */
1365 if (ptrace (PTRACE_GETREGSET, tid,
1366 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1367 have_ptrace_getregset = 0;
1368 else
1369 {
1370 have_ptrace_getregset = 1;
1371
1372 /* Get XCR0 from XSAVE extended state. */
1373 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1374 / sizeof (uint64_t))];
1375
1376 /* Use PTRACE_GETREGSET if it is available. */
1377 for (regset = x86_regsets;
1378 regset->fill_function != NULL; regset++)
1379 if (regset->get_request == PTRACE_GETREGSET)
1380 regset->size = X86_XSTATE_SIZE (xcr0);
1381 else if (regset->type != GENERAL_REGS)
1382 regset->size = 0;
1383 }
1384 }
1385
1386 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1387 xcr0_features = (have_ptrace_getregset
1388 && (xcr0 & X86_XSTATE_ALL_MASK));
1389
1390 if (xcr0_features)
1391 x86_xcr0 = xcr0;
1392
1393 if (machine == EM_X86_64)
1394 {
1395 #ifdef __x86_64__
1396 if (is_elf64)
1397 {
1398 if (xcr0_features)
1399 {
1400 switch (xcr0 & X86_XSTATE_ALL_MASK)
1401 {
1402 case X86_XSTATE_AVX512_MASK:
1403 return tdesc_amd64_avx512_linux;
1404
1405 case X86_XSTATE_MPX_MASK:
1406 return tdesc_amd64_mpx_linux;
1407
1408 case X86_XSTATE_AVX_MASK:
1409 return tdesc_amd64_avx_linux;
1410
1411 default:
1412 return tdesc_amd64_linux;
1413 }
1414 }
1415 else
1416 return tdesc_amd64_linux;
1417 }
1418 else
1419 {
1420 if (xcr0_features)
1421 {
1422 switch (xcr0 & X86_XSTATE_ALL_MASK)
1423 {
1424 case X86_XSTATE_AVX512_MASK:
1425 return tdesc_x32_avx512_linux;
1426
1427 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
1428 case X86_XSTATE_AVX_MASK:
1429 return tdesc_x32_avx_linux;
1430
1431 default:
1432 return tdesc_x32_linux;
1433 }
1434 }
1435 else
1436 return tdesc_x32_linux;
1437 }
1438 #endif
1439 }
1440 else
1441 {
1442 if (xcr0_features)
1443 {
1444 switch (xcr0 & X86_XSTATE_ALL_MASK)
1445 {
1446 case (X86_XSTATE_AVX512_MASK):
1447 return tdesc_i386_avx512_linux;
1448
1449 case (X86_XSTATE_MPX_MASK):
1450 return tdesc_i386_mpx_linux;
1451
1452 case (X86_XSTATE_AVX_MASK):
1453 return tdesc_i386_avx_linux;
1454
1455 default:
1456 return tdesc_i386_linux;
1457 }
1458 }
1459 else
1460 return tdesc_i386_linux;
1461 }
1462
1463 gdb_assert_not_reached ("failed to return tdesc");
1464 }
1465
1466 /* Callback for find_inferior. Stops iteration when a thread with a
1467 given PID is found. */
1468
1469 static int
1470 same_process_callback (struct inferior_list_entry *entry, void *data)
1471 {
1472 int pid = *(int *) data;
1473
1474 return (ptid_get_pid (entry->id) == pid);
1475 }
1476
1477 /* Callback for for_each_inferior. Calls the arch_setup routine for
1478 each process. */
1479
1480 static void
1481 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1482 {
1483 int pid = ptid_get_pid (entry->id);
1484
1485 /* Look up any thread of this processes. */
1486 current_thread
1487 = (struct thread_info *) find_inferior (&all_threads,
1488 same_process_callback, &pid);
1489
1490 the_low_target.arch_setup ();
1491 }
1492
1493 /* Update all the target description of all processes; a new GDB
1494 connected, and it may or not support xml target descriptions. */
1495
1496 static void
1497 x86_linux_update_xmltarget (void)
1498 {
1499 struct thread_info *saved_thread = current_thread;
1500
1501 /* Before changing the register cache's internal layout, flush the
1502 contents of the current valid caches back to the threads, and
1503 release the current regcache objects. */
1504 regcache_release ();
1505
1506 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1507
1508 current_thread = saved_thread;
1509 }
1510
1511 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1512 PTRACE_GETREGSET. */
1513
1514 static void
1515 x86_linux_process_qsupported (const char *query)
1516 {
1517 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1518 with "i386" in qSupported query, it supports x86 XML target
1519 descriptions. */
1520 use_xml = 0;
1521 if (query != NULL && startswith (query, "xmlRegisters="))
1522 {
1523 char *copy = xstrdup (query + 13);
1524 char *p;
1525
1526 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1527 {
1528 if (strcmp (p, "i386") == 0)
1529 {
1530 use_xml = 1;
1531 break;
1532 }
1533 }
1534
1535 free (copy);
1536 }
1537
1538 x86_linux_update_xmltarget ();
1539 }
1540
1541 /* Common for x86/x86-64. */
1542
1543 static struct regsets_info x86_regsets_info =
1544 {
1545 x86_regsets, /* regsets */
1546 0, /* num_regsets */
1547 NULL, /* disabled_regsets */
1548 };
1549
1550 #ifdef __x86_64__
1551 static struct regs_info amd64_linux_regs_info =
1552 {
1553 NULL, /* regset_bitmap */
1554 NULL, /* usrregs_info */
1555 &x86_regsets_info
1556 };
1557 #endif
1558 static struct usrregs_info i386_linux_usrregs_info =
1559 {
1560 I386_NUM_REGS,
1561 i386_regmap,
1562 };
1563
1564 static struct regs_info i386_linux_regs_info =
1565 {
1566 NULL, /* regset_bitmap */
1567 &i386_linux_usrregs_info,
1568 &x86_regsets_info
1569 };
1570
1571 const struct regs_info *
1572 x86_linux_regs_info (void)
1573 {
1574 #ifdef __x86_64__
1575 if (is_64bit_tdesc ())
1576 return &amd64_linux_regs_info;
1577 else
1578 #endif
1579 return &i386_linux_regs_info;
1580 }
1581
1582 /* Initialize the target description for the architecture of the
1583 inferior. */
1584
1585 static void
1586 x86_arch_setup (void)
1587 {
1588 current_process ()->tdesc = x86_linux_read_description ();
1589 }
1590
1591 static int
1592 x86_supports_tracepoints (void)
1593 {
1594 return 1;
1595 }
1596
1597 static void
1598 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1599 {
1600 write_inferior_memory (*to, buf, len);
1601 *to += len;
1602 }
1603
1604 static int
1605 push_opcode (unsigned char *buf, char *op)
1606 {
1607 unsigned char *buf_org = buf;
1608
1609 while (1)
1610 {
1611 char *endptr;
1612 unsigned long ul = strtoul (op, &endptr, 16);
1613
1614 if (endptr == op)
1615 break;
1616
1617 *buf++ = ul;
1618 op = endptr;
1619 }
1620
1621 return buf - buf_org;
1622 }
1623
1624 #ifdef __x86_64__
1625
1626 /* Build a jump pad that saves registers and calls a collection
1627 function. Writes a jump instruction to the jump pad to
1628 JJUMPAD_INSN. The caller is responsible to write it in at the
1629 tracepoint address. */
1630
1631 static int
1632 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1633 CORE_ADDR collector,
1634 CORE_ADDR lockaddr,
1635 ULONGEST orig_size,
1636 CORE_ADDR *jump_entry,
1637 CORE_ADDR *trampoline,
1638 ULONGEST *trampoline_size,
1639 unsigned char *jjump_pad_insn,
1640 ULONGEST *jjump_pad_insn_size,
1641 CORE_ADDR *adjusted_insn_addr,
1642 CORE_ADDR *adjusted_insn_addr_end,
1643 char *err)
1644 {
1645 unsigned char buf[40];
1646 int i, offset;
1647 int64_t loffset;
1648
1649 CORE_ADDR buildaddr = *jump_entry;
1650
1651 /* Build the jump pad. */
1652
1653 /* First, do tracepoint data collection. Save registers. */
1654 i = 0;
1655 /* Need to ensure stack pointer saved first. */
1656 buf[i++] = 0x54; /* push %rsp */
1657 buf[i++] = 0x55; /* push %rbp */
1658 buf[i++] = 0x57; /* push %rdi */
1659 buf[i++] = 0x56; /* push %rsi */
1660 buf[i++] = 0x52; /* push %rdx */
1661 buf[i++] = 0x51; /* push %rcx */
1662 buf[i++] = 0x53; /* push %rbx */
1663 buf[i++] = 0x50; /* push %rax */
1664 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1665 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1666 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1667 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1668 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1669 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1670 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1671 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1672 buf[i++] = 0x9c; /* pushfq */
1673 buf[i++] = 0x48; /* movl <addr>,%rdi */
1674 buf[i++] = 0xbf;
1675 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1676 i += sizeof (unsigned long);
1677 buf[i++] = 0x57; /* push %rdi */
1678 append_insns (&buildaddr, i, buf);
1679
1680 /* Stack space for the collecting_t object. */
1681 i = 0;
1682 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1683 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1684 memcpy (buf + i, &tpoint, 8);
1685 i += 8;
1686 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1687 i += push_opcode (&buf[i],
1688 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1689 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1690 append_insns (&buildaddr, i, buf);
1691
1692 /* spin-lock. */
1693 i = 0;
1694 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1695 memcpy (&buf[i], (void *) &lockaddr, 8);
1696 i += 8;
1697 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1698 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1699 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1700 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1701 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1702 append_insns (&buildaddr, i, buf);
1703
1704 /* Set up the gdb_collect call. */
1705 /* At this point, (stack pointer + 0x18) is the base of our saved
1706 register block. */
1707
1708 i = 0;
1709 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1710 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1711
1712 /* tpoint address may be 64-bit wide. */
1713 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1714 memcpy (buf + i, &tpoint, 8);
1715 i += 8;
1716 append_insns (&buildaddr, i, buf);
1717
1718 /* The collector function being in the shared library, may be
1719 >31-bits away off the jump pad. */
1720 i = 0;
1721 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1722 memcpy (buf + i, &collector, 8);
1723 i += 8;
1724 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1725 append_insns (&buildaddr, i, buf);
1726
1727 /* Clear the spin-lock. */
1728 i = 0;
1729 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1730 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1731 memcpy (buf + i, &lockaddr, 8);
1732 i += 8;
1733 append_insns (&buildaddr, i, buf);
1734
1735 /* Remove stack that had been used for the collect_t object. */
1736 i = 0;
1737 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1738 append_insns (&buildaddr, i, buf);
1739
1740 /* Restore register state. */
1741 i = 0;
1742 buf[i++] = 0x48; /* add $0x8,%rsp */
1743 buf[i++] = 0x83;
1744 buf[i++] = 0xc4;
1745 buf[i++] = 0x08;
1746 buf[i++] = 0x9d; /* popfq */
1747 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1748 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1749 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1750 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1751 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1752 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1753 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1754 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1755 buf[i++] = 0x58; /* pop %rax */
1756 buf[i++] = 0x5b; /* pop %rbx */
1757 buf[i++] = 0x59; /* pop %rcx */
1758 buf[i++] = 0x5a; /* pop %rdx */
1759 buf[i++] = 0x5e; /* pop %rsi */
1760 buf[i++] = 0x5f; /* pop %rdi */
1761 buf[i++] = 0x5d; /* pop %rbp */
1762 buf[i++] = 0x5c; /* pop %rsp */
1763 append_insns (&buildaddr, i, buf);
1764
1765 /* Now, adjust the original instruction to execute in the jump
1766 pad. */
1767 *adjusted_insn_addr = buildaddr;
1768 relocate_instruction (&buildaddr, tpaddr);
1769 *adjusted_insn_addr_end = buildaddr;
1770
1771 /* Finally, write a jump back to the program. */
1772
1773 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1774 if (loffset > INT_MAX || loffset < INT_MIN)
1775 {
1776 sprintf (err,
1777 "E.Jump back from jump pad too far from tracepoint "
1778 "(offset 0x%" PRIx64 " > int32).", loffset);
1779 return 1;
1780 }
1781
1782 offset = (int) loffset;
1783 memcpy (buf, jump_insn, sizeof (jump_insn));
1784 memcpy (buf + 1, &offset, 4);
1785 append_insns (&buildaddr, sizeof (jump_insn), buf);
1786
1787 /* The jump pad is now built. Wire in a jump to our jump pad. This
1788 is always done last (by our caller actually), so that we can
1789 install fast tracepoints with threads running. This relies on
1790 the agent's atomic write support. */
1791 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1792 if (loffset > INT_MAX || loffset < INT_MIN)
1793 {
1794 sprintf (err,
1795 "E.Jump pad too far from tracepoint "
1796 "(offset 0x%" PRIx64 " > int32).", loffset);
1797 return 1;
1798 }
1799
1800 offset = (int) loffset;
1801
1802 memcpy (buf, jump_insn, sizeof (jump_insn));
1803 memcpy (buf + 1, &offset, 4);
1804 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1805 *jjump_pad_insn_size = sizeof (jump_insn);
1806
1807 /* Return the end address of our pad. */
1808 *jump_entry = buildaddr;
1809
1810 return 0;
1811 }
1812
1813 #endif /* __x86_64__ */
1814
1815 /* Build a jump pad that saves registers and calls a collection
1816 function. Writes a jump instruction to the jump pad to
1817 JJUMPAD_INSN. The caller is responsible to write it in at the
1818 tracepoint address. */
1819
1820 static int
1821 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1822 CORE_ADDR collector,
1823 CORE_ADDR lockaddr,
1824 ULONGEST orig_size,
1825 CORE_ADDR *jump_entry,
1826 CORE_ADDR *trampoline,
1827 ULONGEST *trampoline_size,
1828 unsigned char *jjump_pad_insn,
1829 ULONGEST *jjump_pad_insn_size,
1830 CORE_ADDR *adjusted_insn_addr,
1831 CORE_ADDR *adjusted_insn_addr_end,
1832 char *err)
1833 {
1834 unsigned char buf[0x100];
1835 int i, offset;
1836 CORE_ADDR buildaddr = *jump_entry;
1837
1838 /* Build the jump pad. */
1839
1840 /* First, do tracepoint data collection. Save registers. */
1841 i = 0;
1842 buf[i++] = 0x60; /* pushad */
1843 buf[i++] = 0x68; /* push tpaddr aka $pc */
1844 *((int *)(buf + i)) = (int) tpaddr;
1845 i += 4;
1846 buf[i++] = 0x9c; /* pushf */
1847 buf[i++] = 0x1e; /* push %ds */
1848 buf[i++] = 0x06; /* push %es */
1849 buf[i++] = 0x0f; /* push %fs */
1850 buf[i++] = 0xa0;
1851 buf[i++] = 0x0f; /* push %gs */
1852 buf[i++] = 0xa8;
1853 buf[i++] = 0x16; /* push %ss */
1854 buf[i++] = 0x0e; /* push %cs */
1855 append_insns (&buildaddr, i, buf);
1856
1857 /* Stack space for the collecting_t object. */
1858 i = 0;
1859 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1860
1861 /* Build the object. */
1862 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1863 memcpy (buf + i, &tpoint, 4);
1864 i += 4;
1865 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1866
1867 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1868 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1869 append_insns (&buildaddr, i, buf);
1870
1871 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1872 If we cared for it, this could be using xchg alternatively. */
1873
1874 i = 0;
1875 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1876 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1877 %esp,<lockaddr> */
1878 memcpy (&buf[i], (void *) &lockaddr, 4);
1879 i += 4;
1880 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1881 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1882 append_insns (&buildaddr, i, buf);
1883
1884
1885 /* Set up arguments to the gdb_collect call. */
1886 i = 0;
1887 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1888 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1889 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1890 append_insns (&buildaddr, i, buf);
1891
1892 i = 0;
1893 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1894 append_insns (&buildaddr, i, buf);
1895
1896 i = 0;
1897 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1898 memcpy (&buf[i], (void *) &tpoint, 4);
1899 i += 4;
1900 append_insns (&buildaddr, i, buf);
1901
1902 buf[0] = 0xe8; /* call <reladdr> */
1903 offset = collector - (buildaddr + sizeof (jump_insn));
1904 memcpy (buf + 1, &offset, 4);
1905 append_insns (&buildaddr, 5, buf);
1906 /* Clean up after the call. */
1907 buf[0] = 0x83; /* add $0x8,%esp */
1908 buf[1] = 0xc4;
1909 buf[2] = 0x08;
1910 append_insns (&buildaddr, 3, buf);
1911
1912
1913 /* Clear the spin-lock. This would need the LOCK prefix on older
1914 broken archs. */
1915 i = 0;
1916 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1917 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1918 memcpy (buf + i, &lockaddr, 4);
1919 i += 4;
1920 append_insns (&buildaddr, i, buf);
1921
1922
1923 /* Remove stack that had been used for the collect_t object. */
1924 i = 0;
1925 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1926 append_insns (&buildaddr, i, buf);
1927
1928 i = 0;
1929 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1930 buf[i++] = 0xc4;
1931 buf[i++] = 0x04;
1932 buf[i++] = 0x17; /* pop %ss */
1933 buf[i++] = 0x0f; /* pop %gs */
1934 buf[i++] = 0xa9;
1935 buf[i++] = 0x0f; /* pop %fs */
1936 buf[i++] = 0xa1;
1937 buf[i++] = 0x07; /* pop %es */
1938 buf[i++] = 0x1f; /* pop %ds */
1939 buf[i++] = 0x9d; /* popf */
1940 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1941 buf[i++] = 0xc4;
1942 buf[i++] = 0x04;
1943 buf[i++] = 0x61; /* popad */
1944 append_insns (&buildaddr, i, buf);
1945
1946 /* Now, adjust the original instruction to execute in the jump
1947 pad. */
1948 *adjusted_insn_addr = buildaddr;
1949 relocate_instruction (&buildaddr, tpaddr);
1950 *adjusted_insn_addr_end = buildaddr;
1951
1952 /* Write the jump back to the program. */
1953 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1954 memcpy (buf, jump_insn, sizeof (jump_insn));
1955 memcpy (buf + 1, &offset, 4);
1956 append_insns (&buildaddr, sizeof (jump_insn), buf);
1957
1958 /* The jump pad is now built. Wire in a jump to our jump pad. This
1959 is always done last (by our caller actually), so that we can
1960 install fast tracepoints with threads running. This relies on
1961 the agent's atomic write support. */
1962 if (orig_size == 4)
1963 {
1964 /* Create a trampoline. */
1965 *trampoline_size = sizeof (jump_insn);
1966 if (!claim_trampoline_space (*trampoline_size, trampoline))
1967 {
1968 /* No trampoline space available. */
1969 strcpy (err,
1970 "E.Cannot allocate trampoline space needed for fast "
1971 "tracepoints on 4-byte instructions.");
1972 return 1;
1973 }
1974
1975 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1976 memcpy (buf, jump_insn, sizeof (jump_insn));
1977 memcpy (buf + 1, &offset, 4);
1978 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1979
1980 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1981 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1982 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1983 memcpy (buf + 2, &offset, 2);
1984 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1985 *jjump_pad_insn_size = sizeof (small_jump_insn);
1986 }
1987 else
1988 {
1989 /* Else use a 32-bit relative jump instruction. */
1990 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1991 memcpy (buf, jump_insn, sizeof (jump_insn));
1992 memcpy (buf + 1, &offset, 4);
1993 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1994 *jjump_pad_insn_size = sizeof (jump_insn);
1995 }
1996
1997 /* Return the end address of our pad. */
1998 *jump_entry = buildaddr;
1999
2000 return 0;
2001 }
2002
2003 static int
2004 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
2005 CORE_ADDR collector,
2006 CORE_ADDR lockaddr,
2007 ULONGEST orig_size,
2008 CORE_ADDR *jump_entry,
2009 CORE_ADDR *trampoline,
2010 ULONGEST *trampoline_size,
2011 unsigned char *jjump_pad_insn,
2012 ULONGEST *jjump_pad_insn_size,
2013 CORE_ADDR *adjusted_insn_addr,
2014 CORE_ADDR *adjusted_insn_addr_end,
2015 char *err)
2016 {
2017 #ifdef __x86_64__
2018 if (is_64bit_tdesc ())
2019 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2020 collector, lockaddr,
2021 orig_size, jump_entry,
2022 trampoline, trampoline_size,
2023 jjump_pad_insn,
2024 jjump_pad_insn_size,
2025 adjusted_insn_addr,
2026 adjusted_insn_addr_end,
2027 err);
2028 #endif
2029
2030 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2031 collector, lockaddr,
2032 orig_size, jump_entry,
2033 trampoline, trampoline_size,
2034 jjump_pad_insn,
2035 jjump_pad_insn_size,
2036 adjusted_insn_addr,
2037 adjusted_insn_addr_end,
2038 err);
2039 }
2040
2041 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2042 architectures. */
2043
2044 static int
2045 x86_get_min_fast_tracepoint_insn_len (void)
2046 {
2047 static int warned_about_fast_tracepoints = 0;
2048
2049 #ifdef __x86_64__
2050 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2051 used for fast tracepoints. */
2052 if (is_64bit_tdesc ())
2053 return 5;
2054 #endif
2055
2056 if (agent_loaded_p ())
2057 {
2058 char errbuf[IPA_BUFSIZ];
2059
2060 errbuf[0] = '\0';
2061
2062 /* On x86, if trampolines are available, then 4-byte jump instructions
2063 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2064 with a 4-byte offset are used instead. */
2065 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2066 return 4;
2067 else
2068 {
2069 /* GDB has no channel to explain to user why a shorter fast
2070 tracepoint is not possible, but at least make GDBserver
2071 mention that something has gone awry. */
2072 if (!warned_about_fast_tracepoints)
2073 {
2074 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2075 warned_about_fast_tracepoints = 1;
2076 }
2077 return 5;
2078 }
2079 }
2080 else
2081 {
2082 /* Indicate that the minimum length is currently unknown since the IPA
2083 has not loaded yet. */
2084 return 0;
2085 }
2086 }
2087
2088 static void
2089 add_insns (unsigned char *start, int len)
2090 {
2091 CORE_ADDR buildaddr = current_insn_ptr;
2092
2093 if (debug_threads)
2094 debug_printf ("Adding %d bytes of insn at %s\n",
2095 len, paddress (buildaddr));
2096
2097 append_insns (&buildaddr, len, start);
2098 current_insn_ptr = buildaddr;
2099 }
2100
2101 /* Our general strategy for emitting code is to avoid specifying raw
2102 bytes whenever possible, and instead copy a block of inline asm
2103 that is embedded in the function. This is a little messy, because
2104 we need to keep the compiler from discarding what looks like dead
2105 code, plus suppress various warnings. */
2106
2107 #define EMIT_ASM(NAME, INSNS) \
2108 do \
2109 { \
2110 extern unsigned char start_ ## NAME, end_ ## NAME; \
2111 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2112 __asm__ ("jmp end_" #NAME "\n" \
2113 "\t" "start_" #NAME ":" \
2114 "\t" INSNS "\n" \
2115 "\t" "end_" #NAME ":"); \
2116 } while (0)
2117
2118 #ifdef __x86_64__
2119
2120 #define EMIT_ASM32(NAME,INSNS) \
2121 do \
2122 { \
2123 extern unsigned char start_ ## NAME, end_ ## NAME; \
2124 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2125 __asm__ (".code32\n" \
2126 "\t" "jmp end_" #NAME "\n" \
2127 "\t" "start_" #NAME ":\n" \
2128 "\t" INSNS "\n" \
2129 "\t" "end_" #NAME ":\n" \
2130 ".code64\n"); \
2131 } while (0)
2132
2133 #else
2134
2135 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2136
2137 #endif
2138
2139 #ifdef __x86_64__
2140
2141 static void
2142 amd64_emit_prologue (void)
2143 {
2144 EMIT_ASM (amd64_prologue,
2145 "pushq %rbp\n\t"
2146 "movq %rsp,%rbp\n\t"
2147 "sub $0x20,%rsp\n\t"
2148 "movq %rdi,-8(%rbp)\n\t"
2149 "movq %rsi,-16(%rbp)");
2150 }
2151
2152
2153 static void
2154 amd64_emit_epilogue (void)
2155 {
2156 EMIT_ASM (amd64_epilogue,
2157 "movq -16(%rbp),%rdi\n\t"
2158 "movq %rax,(%rdi)\n\t"
2159 "xor %rax,%rax\n\t"
2160 "leave\n\t"
2161 "ret");
2162 }
2163
2164 static void
2165 amd64_emit_add (void)
2166 {
2167 EMIT_ASM (amd64_add,
2168 "add (%rsp),%rax\n\t"
2169 "lea 0x8(%rsp),%rsp");
2170 }
2171
2172 static void
2173 amd64_emit_sub (void)
2174 {
2175 EMIT_ASM (amd64_sub,
2176 "sub %rax,(%rsp)\n\t"
2177 "pop %rax");
2178 }
2179
2180 static void
2181 amd64_emit_mul (void)
2182 {
2183 emit_error = 1;
2184 }
2185
2186 static void
2187 amd64_emit_lsh (void)
2188 {
2189 emit_error = 1;
2190 }
2191
2192 static void
2193 amd64_emit_rsh_signed (void)
2194 {
2195 emit_error = 1;
2196 }
2197
2198 static void
2199 amd64_emit_rsh_unsigned (void)
2200 {
2201 emit_error = 1;
2202 }
2203
2204 static void
2205 amd64_emit_ext (int arg)
2206 {
2207 switch (arg)
2208 {
2209 case 8:
2210 EMIT_ASM (amd64_ext_8,
2211 "cbtw\n\t"
2212 "cwtl\n\t"
2213 "cltq");
2214 break;
2215 case 16:
2216 EMIT_ASM (amd64_ext_16,
2217 "cwtl\n\t"
2218 "cltq");
2219 break;
2220 case 32:
2221 EMIT_ASM (amd64_ext_32,
2222 "cltq");
2223 break;
2224 default:
2225 emit_error = 1;
2226 }
2227 }
2228
2229 static void
2230 amd64_emit_log_not (void)
2231 {
2232 EMIT_ASM (amd64_log_not,
2233 "test %rax,%rax\n\t"
2234 "sete %cl\n\t"
2235 "movzbq %cl,%rax");
2236 }
2237
2238 static void
2239 amd64_emit_bit_and (void)
2240 {
2241 EMIT_ASM (amd64_and,
2242 "and (%rsp),%rax\n\t"
2243 "lea 0x8(%rsp),%rsp");
2244 }
2245
2246 static void
2247 amd64_emit_bit_or (void)
2248 {
2249 EMIT_ASM (amd64_or,
2250 "or (%rsp),%rax\n\t"
2251 "lea 0x8(%rsp),%rsp");
2252 }
2253
2254 static void
2255 amd64_emit_bit_xor (void)
2256 {
2257 EMIT_ASM (amd64_xor,
2258 "xor (%rsp),%rax\n\t"
2259 "lea 0x8(%rsp),%rsp");
2260 }
2261
2262 static void
2263 amd64_emit_bit_not (void)
2264 {
2265 EMIT_ASM (amd64_bit_not,
2266 "xorq $0xffffffffffffffff,%rax");
2267 }
2268
2269 static void
2270 amd64_emit_equal (void)
2271 {
2272 EMIT_ASM (amd64_equal,
2273 "cmp %rax,(%rsp)\n\t"
2274 "je .Lamd64_equal_true\n\t"
2275 "xor %rax,%rax\n\t"
2276 "jmp .Lamd64_equal_end\n\t"
2277 ".Lamd64_equal_true:\n\t"
2278 "mov $0x1,%rax\n\t"
2279 ".Lamd64_equal_end:\n\t"
2280 "lea 0x8(%rsp),%rsp");
2281 }
2282
2283 static void
2284 amd64_emit_less_signed (void)
2285 {
2286 EMIT_ASM (amd64_less_signed,
2287 "cmp %rax,(%rsp)\n\t"
2288 "jl .Lamd64_less_signed_true\n\t"
2289 "xor %rax,%rax\n\t"
2290 "jmp .Lamd64_less_signed_end\n\t"
2291 ".Lamd64_less_signed_true:\n\t"
2292 "mov $1,%rax\n\t"
2293 ".Lamd64_less_signed_end:\n\t"
2294 "lea 0x8(%rsp),%rsp");
2295 }
2296
2297 static void
2298 amd64_emit_less_unsigned (void)
2299 {
2300 EMIT_ASM (amd64_less_unsigned,
2301 "cmp %rax,(%rsp)\n\t"
2302 "jb .Lamd64_less_unsigned_true\n\t"
2303 "xor %rax,%rax\n\t"
2304 "jmp .Lamd64_less_unsigned_end\n\t"
2305 ".Lamd64_less_unsigned_true:\n\t"
2306 "mov $1,%rax\n\t"
2307 ".Lamd64_less_unsigned_end:\n\t"
2308 "lea 0x8(%rsp),%rsp");
2309 }
2310
2311 static void
2312 amd64_emit_ref (int size)
2313 {
2314 switch (size)
2315 {
2316 case 1:
2317 EMIT_ASM (amd64_ref1,
2318 "movb (%rax),%al");
2319 break;
2320 case 2:
2321 EMIT_ASM (amd64_ref2,
2322 "movw (%rax),%ax");
2323 break;
2324 case 4:
2325 EMIT_ASM (amd64_ref4,
2326 "movl (%rax),%eax");
2327 break;
2328 case 8:
2329 EMIT_ASM (amd64_ref8,
2330 "movq (%rax),%rax");
2331 break;
2332 }
2333 }
2334
2335 static void
2336 amd64_emit_if_goto (int *offset_p, int *size_p)
2337 {
2338 EMIT_ASM (amd64_if_goto,
2339 "mov %rax,%rcx\n\t"
2340 "pop %rax\n\t"
2341 "cmp $0,%rcx\n\t"
2342 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2343 if (offset_p)
2344 *offset_p = 10;
2345 if (size_p)
2346 *size_p = 4;
2347 }
2348
2349 static void
2350 amd64_emit_goto (int *offset_p, int *size_p)
2351 {
2352 EMIT_ASM (amd64_goto,
2353 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2354 if (offset_p)
2355 *offset_p = 1;
2356 if (size_p)
2357 *size_p = 4;
2358 }
2359
2360 static void
2361 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2362 {
2363 int diff = (to - (from + size));
2364 unsigned char buf[sizeof (int)];
2365
2366 if (size != 4)
2367 {
2368 emit_error = 1;
2369 return;
2370 }
2371
2372 memcpy (buf, &diff, sizeof (int));
2373 write_inferior_memory (from, buf, sizeof (int));
2374 }
2375
2376 static void
2377 amd64_emit_const (LONGEST num)
2378 {
2379 unsigned char buf[16];
2380 int i;
2381 CORE_ADDR buildaddr = current_insn_ptr;
2382
2383 i = 0;
2384 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
2385 memcpy (&buf[i], &num, sizeof (num));
2386 i += 8;
2387 append_insns (&buildaddr, i, buf);
2388 current_insn_ptr = buildaddr;
2389 }
2390
2391 static void
2392 amd64_emit_call (CORE_ADDR fn)
2393 {
2394 unsigned char buf[16];
2395 int i;
2396 CORE_ADDR buildaddr;
2397 LONGEST offset64;
2398
2399 /* The destination function being in the shared library, may be
2400 >31-bits away off the compiled code pad. */
2401
2402 buildaddr = current_insn_ptr;
2403
2404 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2405
2406 i = 0;
2407
2408 if (offset64 > INT_MAX || offset64 < INT_MIN)
2409 {
2410 /* Offset is too large for a call. Use callq, but that requires
2411 a register, so avoid it if possible. Use r10, since it is
2412 call-clobbered, we don't have to push/pop it. */
2413 buf[i++] = 0x48; /* mov $fn,%r10 */
2414 buf[i++] = 0xba;
2415 memcpy (buf + i, &fn, 8);
2416 i += 8;
2417 buf[i++] = 0xff; /* callq *%r10 */
2418 buf[i++] = 0xd2;
2419 }
2420 else
2421 {
2422 int offset32 = offset64; /* we know we can't overflow here. */
2423 memcpy (buf + i, &offset32, 4);
2424 i += 4;
2425 }
2426
2427 append_insns (&buildaddr, i, buf);
2428 current_insn_ptr = buildaddr;
2429 }
2430
2431 static void
2432 amd64_emit_reg (int reg)
2433 {
2434 unsigned char buf[16];
2435 int i;
2436 CORE_ADDR buildaddr;
2437
2438 /* Assume raw_regs is still in %rdi. */
2439 buildaddr = current_insn_ptr;
2440 i = 0;
2441 buf[i++] = 0xbe; /* mov $<n>,%esi */
2442 memcpy (&buf[i], &reg, sizeof (reg));
2443 i += 4;
2444 append_insns (&buildaddr, i, buf);
2445 current_insn_ptr = buildaddr;
2446 amd64_emit_call (get_raw_reg_func_addr ());
2447 }
2448
2449 static void
2450 amd64_emit_pop (void)
2451 {
2452 EMIT_ASM (amd64_pop,
2453 "pop %rax");
2454 }
2455
2456 static void
2457 amd64_emit_stack_flush (void)
2458 {
2459 EMIT_ASM (amd64_stack_flush,
2460 "push %rax");
2461 }
2462
2463 static void
2464 amd64_emit_zero_ext (int arg)
2465 {
2466 switch (arg)
2467 {
2468 case 8:
2469 EMIT_ASM (amd64_zero_ext_8,
2470 "and $0xff,%rax");
2471 break;
2472 case 16:
2473 EMIT_ASM (amd64_zero_ext_16,
2474 "and $0xffff,%rax");
2475 break;
2476 case 32:
2477 EMIT_ASM (amd64_zero_ext_32,
2478 "mov $0xffffffff,%rcx\n\t"
2479 "and %rcx,%rax");
2480 break;
2481 default:
2482 emit_error = 1;
2483 }
2484 }
2485
2486 static void
2487 amd64_emit_swap (void)
2488 {
2489 EMIT_ASM (amd64_swap,
2490 "mov %rax,%rcx\n\t"
2491 "pop %rax\n\t"
2492 "push %rcx");
2493 }
2494
2495 static void
2496 amd64_emit_stack_adjust (int n)
2497 {
2498 unsigned char buf[16];
2499 int i;
2500 CORE_ADDR buildaddr = current_insn_ptr;
2501
2502 i = 0;
2503 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2504 buf[i++] = 0x8d;
2505 buf[i++] = 0x64;
2506 buf[i++] = 0x24;
2507 /* This only handles adjustments up to 16, but we don't expect any more. */
2508 buf[i++] = n * 8;
2509 append_insns (&buildaddr, i, buf);
2510 current_insn_ptr = buildaddr;
2511 }
2512
2513 /* FN's prototype is `LONGEST(*fn)(int)'. */
2514
2515 static void
2516 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2517 {
2518 unsigned char buf[16];
2519 int i;
2520 CORE_ADDR buildaddr;
2521
2522 buildaddr = current_insn_ptr;
2523 i = 0;
2524 buf[i++] = 0xbf; /* movl $<n>,%edi */
2525 memcpy (&buf[i], &arg1, sizeof (arg1));
2526 i += 4;
2527 append_insns (&buildaddr, i, buf);
2528 current_insn_ptr = buildaddr;
2529 amd64_emit_call (fn);
2530 }
2531
2532 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2533
2534 static void
2535 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2536 {
2537 unsigned char buf[16];
2538 int i;
2539 CORE_ADDR buildaddr;
2540
2541 buildaddr = current_insn_ptr;
2542 i = 0;
2543 buf[i++] = 0xbf; /* movl $<n>,%edi */
2544 memcpy (&buf[i], &arg1, sizeof (arg1));
2545 i += 4;
2546 append_insns (&buildaddr, i, buf);
2547 current_insn_ptr = buildaddr;
2548 EMIT_ASM (amd64_void_call_2_a,
2549 /* Save away a copy of the stack top. */
2550 "push %rax\n\t"
2551 /* Also pass top as the second argument. */
2552 "mov %rax,%rsi");
2553 amd64_emit_call (fn);
2554 EMIT_ASM (amd64_void_call_2_b,
2555 /* Restore the stack top, %rax may have been trashed. */
2556 "pop %rax");
2557 }
2558
2559 void
2560 amd64_emit_eq_goto (int *offset_p, int *size_p)
2561 {
2562 EMIT_ASM (amd64_eq,
2563 "cmp %rax,(%rsp)\n\t"
2564 "jne .Lamd64_eq_fallthru\n\t"
2565 "lea 0x8(%rsp),%rsp\n\t"
2566 "pop %rax\n\t"
2567 /* jmp, but don't trust the assembler to choose the right jump */
2568 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2569 ".Lamd64_eq_fallthru:\n\t"
2570 "lea 0x8(%rsp),%rsp\n\t"
2571 "pop %rax");
2572
2573 if (offset_p)
2574 *offset_p = 13;
2575 if (size_p)
2576 *size_p = 4;
2577 }
2578
2579 void
2580 amd64_emit_ne_goto (int *offset_p, int *size_p)
2581 {
2582 EMIT_ASM (amd64_ne,
2583 "cmp %rax,(%rsp)\n\t"
2584 "je .Lamd64_ne_fallthru\n\t"
2585 "lea 0x8(%rsp),%rsp\n\t"
2586 "pop %rax\n\t"
2587 /* jmp, but don't trust the assembler to choose the right jump */
2588 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2589 ".Lamd64_ne_fallthru:\n\t"
2590 "lea 0x8(%rsp),%rsp\n\t"
2591 "pop %rax");
2592
2593 if (offset_p)
2594 *offset_p = 13;
2595 if (size_p)
2596 *size_p = 4;
2597 }
2598
2599 void
2600 amd64_emit_lt_goto (int *offset_p, int *size_p)
2601 {
2602 EMIT_ASM (amd64_lt,
2603 "cmp %rax,(%rsp)\n\t"
2604 "jnl .Lamd64_lt_fallthru\n\t"
2605 "lea 0x8(%rsp),%rsp\n\t"
2606 "pop %rax\n\t"
2607 /* jmp, but don't trust the assembler to choose the right jump */
2608 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2609 ".Lamd64_lt_fallthru:\n\t"
2610 "lea 0x8(%rsp),%rsp\n\t"
2611 "pop %rax");
2612
2613 if (offset_p)
2614 *offset_p = 13;
2615 if (size_p)
2616 *size_p = 4;
2617 }
2618
2619 void
2620 amd64_emit_le_goto (int *offset_p, int *size_p)
2621 {
2622 EMIT_ASM (amd64_le,
2623 "cmp %rax,(%rsp)\n\t"
2624 "jnle .Lamd64_le_fallthru\n\t"
2625 "lea 0x8(%rsp),%rsp\n\t"
2626 "pop %rax\n\t"
2627 /* jmp, but don't trust the assembler to choose the right jump */
2628 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2629 ".Lamd64_le_fallthru:\n\t"
2630 "lea 0x8(%rsp),%rsp\n\t"
2631 "pop %rax");
2632
2633 if (offset_p)
2634 *offset_p = 13;
2635 if (size_p)
2636 *size_p = 4;
2637 }
2638
2639 void
2640 amd64_emit_gt_goto (int *offset_p, int *size_p)
2641 {
2642 EMIT_ASM (amd64_gt,
2643 "cmp %rax,(%rsp)\n\t"
2644 "jng .Lamd64_gt_fallthru\n\t"
2645 "lea 0x8(%rsp),%rsp\n\t"
2646 "pop %rax\n\t"
2647 /* jmp, but don't trust the assembler to choose the right jump */
2648 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2649 ".Lamd64_gt_fallthru:\n\t"
2650 "lea 0x8(%rsp),%rsp\n\t"
2651 "pop %rax");
2652
2653 if (offset_p)
2654 *offset_p = 13;
2655 if (size_p)
2656 *size_p = 4;
2657 }
2658
2659 void
2660 amd64_emit_ge_goto (int *offset_p, int *size_p)
2661 {
2662 EMIT_ASM (amd64_ge,
2663 "cmp %rax,(%rsp)\n\t"
2664 "jnge .Lamd64_ge_fallthru\n\t"
2665 ".Lamd64_ge_jump:\n\t"
2666 "lea 0x8(%rsp),%rsp\n\t"
2667 "pop %rax\n\t"
2668 /* jmp, but don't trust the assembler to choose the right jump */
2669 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2670 ".Lamd64_ge_fallthru:\n\t"
2671 "lea 0x8(%rsp),%rsp\n\t"
2672 "pop %rax");
2673
2674 if (offset_p)
2675 *offset_p = 13;
2676 if (size_p)
2677 *size_p = 4;
2678 }
2679
2680 struct emit_ops amd64_emit_ops =
2681 {
2682 amd64_emit_prologue,
2683 amd64_emit_epilogue,
2684 amd64_emit_add,
2685 amd64_emit_sub,
2686 amd64_emit_mul,
2687 amd64_emit_lsh,
2688 amd64_emit_rsh_signed,
2689 amd64_emit_rsh_unsigned,
2690 amd64_emit_ext,
2691 amd64_emit_log_not,
2692 amd64_emit_bit_and,
2693 amd64_emit_bit_or,
2694 amd64_emit_bit_xor,
2695 amd64_emit_bit_not,
2696 amd64_emit_equal,
2697 amd64_emit_less_signed,
2698 amd64_emit_less_unsigned,
2699 amd64_emit_ref,
2700 amd64_emit_if_goto,
2701 amd64_emit_goto,
2702 amd64_write_goto_address,
2703 amd64_emit_const,
2704 amd64_emit_call,
2705 amd64_emit_reg,
2706 amd64_emit_pop,
2707 amd64_emit_stack_flush,
2708 amd64_emit_zero_ext,
2709 amd64_emit_swap,
2710 amd64_emit_stack_adjust,
2711 amd64_emit_int_call_1,
2712 amd64_emit_void_call_2,
2713 amd64_emit_eq_goto,
2714 amd64_emit_ne_goto,
2715 amd64_emit_lt_goto,
2716 amd64_emit_le_goto,
2717 amd64_emit_gt_goto,
2718 amd64_emit_ge_goto
2719 };
2720
2721 #endif /* __x86_64__ */
2722
2723 static void
2724 i386_emit_prologue (void)
2725 {
2726 EMIT_ASM32 (i386_prologue,
2727 "push %ebp\n\t"
2728 "mov %esp,%ebp\n\t"
2729 "push %ebx");
2730 /* At this point, the raw regs base address is at 8(%ebp), and the
2731 value pointer is at 12(%ebp). */
2732 }
2733
2734 static void
2735 i386_emit_epilogue (void)
2736 {
2737 EMIT_ASM32 (i386_epilogue,
2738 "mov 12(%ebp),%ecx\n\t"
2739 "mov %eax,(%ecx)\n\t"
2740 "mov %ebx,0x4(%ecx)\n\t"
2741 "xor %eax,%eax\n\t"
2742 "pop %ebx\n\t"
2743 "pop %ebp\n\t"
2744 "ret");
2745 }
2746
2747 static void
2748 i386_emit_add (void)
2749 {
2750 EMIT_ASM32 (i386_add,
2751 "add (%esp),%eax\n\t"
2752 "adc 0x4(%esp),%ebx\n\t"
2753 "lea 0x8(%esp),%esp");
2754 }
2755
2756 static void
2757 i386_emit_sub (void)
2758 {
2759 EMIT_ASM32 (i386_sub,
2760 "subl %eax,(%esp)\n\t"
2761 "sbbl %ebx,4(%esp)\n\t"
2762 "pop %eax\n\t"
2763 "pop %ebx\n\t");
2764 }
2765
2766 static void
2767 i386_emit_mul (void)
2768 {
2769 emit_error = 1;
2770 }
2771
2772 static void
2773 i386_emit_lsh (void)
2774 {
2775 emit_error = 1;
2776 }
2777
2778 static void
2779 i386_emit_rsh_signed (void)
2780 {
2781 emit_error = 1;
2782 }
2783
2784 static void
2785 i386_emit_rsh_unsigned (void)
2786 {
2787 emit_error = 1;
2788 }
2789
2790 static void
2791 i386_emit_ext (int arg)
2792 {
2793 switch (arg)
2794 {
2795 case 8:
2796 EMIT_ASM32 (i386_ext_8,
2797 "cbtw\n\t"
2798 "cwtl\n\t"
2799 "movl %eax,%ebx\n\t"
2800 "sarl $31,%ebx");
2801 break;
2802 case 16:
2803 EMIT_ASM32 (i386_ext_16,
2804 "cwtl\n\t"
2805 "movl %eax,%ebx\n\t"
2806 "sarl $31,%ebx");
2807 break;
2808 case 32:
2809 EMIT_ASM32 (i386_ext_32,
2810 "movl %eax,%ebx\n\t"
2811 "sarl $31,%ebx");
2812 break;
2813 default:
2814 emit_error = 1;
2815 }
2816 }
2817
2818 static void
2819 i386_emit_log_not (void)
2820 {
2821 EMIT_ASM32 (i386_log_not,
2822 "or %ebx,%eax\n\t"
2823 "test %eax,%eax\n\t"
2824 "sete %cl\n\t"
2825 "xor %ebx,%ebx\n\t"
2826 "movzbl %cl,%eax");
2827 }
2828
2829 static void
2830 i386_emit_bit_and (void)
2831 {
2832 EMIT_ASM32 (i386_and,
2833 "and (%esp),%eax\n\t"
2834 "and 0x4(%esp),%ebx\n\t"
2835 "lea 0x8(%esp),%esp");
2836 }
2837
2838 static void
2839 i386_emit_bit_or (void)
2840 {
2841 EMIT_ASM32 (i386_or,
2842 "or (%esp),%eax\n\t"
2843 "or 0x4(%esp),%ebx\n\t"
2844 "lea 0x8(%esp),%esp");
2845 }
2846
2847 static void
2848 i386_emit_bit_xor (void)
2849 {
2850 EMIT_ASM32 (i386_xor,
2851 "xor (%esp),%eax\n\t"
2852 "xor 0x4(%esp),%ebx\n\t"
2853 "lea 0x8(%esp),%esp");
2854 }
2855
2856 static void
2857 i386_emit_bit_not (void)
2858 {
2859 EMIT_ASM32 (i386_bit_not,
2860 "xor $0xffffffff,%eax\n\t"
2861 "xor $0xffffffff,%ebx\n\t");
2862 }
2863
2864 static void
2865 i386_emit_equal (void)
2866 {
2867 EMIT_ASM32 (i386_equal,
2868 "cmpl %ebx,4(%esp)\n\t"
2869 "jne .Li386_equal_false\n\t"
2870 "cmpl %eax,(%esp)\n\t"
2871 "je .Li386_equal_true\n\t"
2872 ".Li386_equal_false:\n\t"
2873 "xor %eax,%eax\n\t"
2874 "jmp .Li386_equal_end\n\t"
2875 ".Li386_equal_true:\n\t"
2876 "mov $1,%eax\n\t"
2877 ".Li386_equal_end:\n\t"
2878 "xor %ebx,%ebx\n\t"
2879 "lea 0x8(%esp),%esp");
2880 }
2881
2882 static void
2883 i386_emit_less_signed (void)
2884 {
2885 EMIT_ASM32 (i386_less_signed,
2886 "cmpl %ebx,4(%esp)\n\t"
2887 "jl .Li386_less_signed_true\n\t"
2888 "jne .Li386_less_signed_false\n\t"
2889 "cmpl %eax,(%esp)\n\t"
2890 "jl .Li386_less_signed_true\n\t"
2891 ".Li386_less_signed_false:\n\t"
2892 "xor %eax,%eax\n\t"
2893 "jmp .Li386_less_signed_end\n\t"
2894 ".Li386_less_signed_true:\n\t"
2895 "mov $1,%eax\n\t"
2896 ".Li386_less_signed_end:\n\t"
2897 "xor %ebx,%ebx\n\t"
2898 "lea 0x8(%esp),%esp");
2899 }
2900
2901 static void
2902 i386_emit_less_unsigned (void)
2903 {
2904 EMIT_ASM32 (i386_less_unsigned,
2905 "cmpl %ebx,4(%esp)\n\t"
2906 "jb .Li386_less_unsigned_true\n\t"
2907 "jne .Li386_less_unsigned_false\n\t"
2908 "cmpl %eax,(%esp)\n\t"
2909 "jb .Li386_less_unsigned_true\n\t"
2910 ".Li386_less_unsigned_false:\n\t"
2911 "xor %eax,%eax\n\t"
2912 "jmp .Li386_less_unsigned_end\n\t"
2913 ".Li386_less_unsigned_true:\n\t"
2914 "mov $1,%eax\n\t"
2915 ".Li386_less_unsigned_end:\n\t"
2916 "xor %ebx,%ebx\n\t"
2917 "lea 0x8(%esp),%esp");
2918 }
2919
2920 static void
2921 i386_emit_ref (int size)
2922 {
2923 switch (size)
2924 {
2925 case 1:
2926 EMIT_ASM32 (i386_ref1,
2927 "movb (%eax),%al");
2928 break;
2929 case 2:
2930 EMIT_ASM32 (i386_ref2,
2931 "movw (%eax),%ax");
2932 break;
2933 case 4:
2934 EMIT_ASM32 (i386_ref4,
2935 "movl (%eax),%eax");
2936 break;
2937 case 8:
2938 EMIT_ASM32 (i386_ref8,
2939 "movl 4(%eax),%ebx\n\t"
2940 "movl (%eax),%eax");
2941 break;
2942 }
2943 }
2944
2945 static void
2946 i386_emit_if_goto (int *offset_p, int *size_p)
2947 {
2948 EMIT_ASM32 (i386_if_goto,
2949 "mov %eax,%ecx\n\t"
2950 "or %ebx,%ecx\n\t"
2951 "pop %eax\n\t"
2952 "pop %ebx\n\t"
2953 "cmpl $0,%ecx\n\t"
2954 /* Don't trust the assembler to choose the right jump */
2955 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2956
2957 if (offset_p)
2958 *offset_p = 11; /* be sure that this matches the sequence above */
2959 if (size_p)
2960 *size_p = 4;
2961 }
2962
2963 static void
2964 i386_emit_goto (int *offset_p, int *size_p)
2965 {
2966 EMIT_ASM32 (i386_goto,
2967 /* Don't trust the assembler to choose the right jump */
2968 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2969 if (offset_p)
2970 *offset_p = 1;
2971 if (size_p)
2972 *size_p = 4;
2973 }
2974
2975 static void
2976 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2977 {
2978 int diff = (to - (from + size));
2979 unsigned char buf[sizeof (int)];
2980
2981 /* We're only doing 4-byte sizes at the moment. */
2982 if (size != 4)
2983 {
2984 emit_error = 1;
2985 return;
2986 }
2987
2988 memcpy (buf, &diff, sizeof (int));
2989 write_inferior_memory (from, buf, sizeof (int));
2990 }
2991
2992 static void
2993 i386_emit_const (LONGEST num)
2994 {
2995 unsigned char buf[16];
2996 int i, hi, lo;
2997 CORE_ADDR buildaddr = current_insn_ptr;
2998
2999 i = 0;
3000 buf[i++] = 0xb8; /* mov $<n>,%eax */
3001 lo = num & 0xffffffff;
3002 memcpy (&buf[i], &lo, sizeof (lo));
3003 i += 4;
3004 hi = ((num >> 32) & 0xffffffff);
3005 if (hi)
3006 {
3007 buf[i++] = 0xbb; /* mov $<n>,%ebx */
3008 memcpy (&buf[i], &hi, sizeof (hi));
3009 i += 4;
3010 }
3011 else
3012 {
3013 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
3014 }
3015 append_insns (&buildaddr, i, buf);
3016 current_insn_ptr = buildaddr;
3017 }
3018
3019 static void
3020 i386_emit_call (CORE_ADDR fn)
3021 {
3022 unsigned char buf[16];
3023 int i, offset;
3024 CORE_ADDR buildaddr;
3025
3026 buildaddr = current_insn_ptr;
3027 i = 0;
3028 buf[i++] = 0xe8; /* call <reladdr> */
3029 offset = ((int) fn) - (buildaddr + 5);
3030 memcpy (buf + 1, &offset, 4);
3031 append_insns (&buildaddr, 5, buf);
3032 current_insn_ptr = buildaddr;
3033 }
3034
3035 static void
3036 i386_emit_reg (int reg)
3037 {
3038 unsigned char buf[16];
3039 int i;
3040 CORE_ADDR buildaddr;
3041
3042 EMIT_ASM32 (i386_reg_a,
3043 "sub $0x8,%esp");
3044 buildaddr = current_insn_ptr;
3045 i = 0;
3046 buf[i++] = 0xb8; /* mov $<n>,%eax */
3047 memcpy (&buf[i], &reg, sizeof (reg));
3048 i += 4;
3049 append_insns (&buildaddr, i, buf);
3050 current_insn_ptr = buildaddr;
3051 EMIT_ASM32 (i386_reg_b,
3052 "mov %eax,4(%esp)\n\t"
3053 "mov 8(%ebp),%eax\n\t"
3054 "mov %eax,(%esp)");
3055 i386_emit_call (get_raw_reg_func_addr ());
3056 EMIT_ASM32 (i386_reg_c,
3057 "xor %ebx,%ebx\n\t"
3058 "lea 0x8(%esp),%esp");
3059 }
3060
3061 static void
3062 i386_emit_pop (void)
3063 {
3064 EMIT_ASM32 (i386_pop,
3065 "pop %eax\n\t"
3066 "pop %ebx");
3067 }
3068
3069 static void
3070 i386_emit_stack_flush (void)
3071 {
3072 EMIT_ASM32 (i386_stack_flush,
3073 "push %ebx\n\t"
3074 "push %eax");
3075 }
3076
3077 static void
3078 i386_emit_zero_ext (int arg)
3079 {
3080 switch (arg)
3081 {
3082 case 8:
3083 EMIT_ASM32 (i386_zero_ext_8,
3084 "and $0xff,%eax\n\t"
3085 "xor %ebx,%ebx");
3086 break;
3087 case 16:
3088 EMIT_ASM32 (i386_zero_ext_16,
3089 "and $0xffff,%eax\n\t"
3090 "xor %ebx,%ebx");
3091 break;
3092 case 32:
3093 EMIT_ASM32 (i386_zero_ext_32,
3094 "xor %ebx,%ebx");
3095 break;
3096 default:
3097 emit_error = 1;
3098 }
3099 }
3100
3101 static void
3102 i386_emit_swap (void)
3103 {
3104 EMIT_ASM32 (i386_swap,
3105 "mov %eax,%ecx\n\t"
3106 "mov %ebx,%edx\n\t"
3107 "pop %eax\n\t"
3108 "pop %ebx\n\t"
3109 "push %edx\n\t"
3110 "push %ecx");
3111 }
3112
3113 static void
3114 i386_emit_stack_adjust (int n)
3115 {
3116 unsigned char buf[16];
3117 int i;
3118 CORE_ADDR buildaddr = current_insn_ptr;
3119
3120 i = 0;
3121 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3122 buf[i++] = 0x64;
3123 buf[i++] = 0x24;
3124 buf[i++] = n * 8;
3125 append_insns (&buildaddr, i, buf);
3126 current_insn_ptr = buildaddr;
3127 }
3128
3129 /* FN's prototype is `LONGEST(*fn)(int)'. */
3130
3131 static void
3132 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3133 {
3134 unsigned char buf[16];
3135 int i;
3136 CORE_ADDR buildaddr;
3137
3138 EMIT_ASM32 (i386_int_call_1_a,
3139 /* Reserve a bit of stack space. */
3140 "sub $0x8,%esp");
3141 /* Put the one argument on the stack. */
3142 buildaddr = current_insn_ptr;
3143 i = 0;
3144 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3145 buf[i++] = 0x04;
3146 buf[i++] = 0x24;
3147 memcpy (&buf[i], &arg1, sizeof (arg1));
3148 i += 4;
3149 append_insns (&buildaddr, i, buf);
3150 current_insn_ptr = buildaddr;
3151 i386_emit_call (fn);
3152 EMIT_ASM32 (i386_int_call_1_c,
3153 "mov %edx,%ebx\n\t"
3154 "lea 0x8(%esp),%esp");
3155 }
3156
3157 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3158
3159 static void
3160 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3161 {
3162 unsigned char buf[16];
3163 int i;
3164 CORE_ADDR buildaddr;
3165
3166 EMIT_ASM32 (i386_void_call_2_a,
3167 /* Preserve %eax only; we don't have to worry about %ebx. */
3168 "push %eax\n\t"
3169 /* Reserve a bit of stack space for arguments. */
3170 "sub $0x10,%esp\n\t"
3171 /* Copy "top" to the second argument position. (Note that
3172 we can't assume function won't scribble on its
3173 arguments, so don't try to restore from this.) */
3174 "mov %eax,4(%esp)\n\t"
3175 "mov %ebx,8(%esp)");
3176 /* Put the first argument on the stack. */
3177 buildaddr = current_insn_ptr;
3178 i = 0;
3179 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3180 buf[i++] = 0x04;
3181 buf[i++] = 0x24;
3182 memcpy (&buf[i], &arg1, sizeof (arg1));
3183 i += 4;
3184 append_insns (&buildaddr, i, buf);
3185 current_insn_ptr = buildaddr;
3186 i386_emit_call (fn);
3187 EMIT_ASM32 (i386_void_call_2_b,
3188 "lea 0x10(%esp),%esp\n\t"
3189 /* Restore original stack top. */
3190 "pop %eax");
3191 }
3192
3193
3194 void
3195 i386_emit_eq_goto (int *offset_p, int *size_p)
3196 {
3197 EMIT_ASM32 (eq,
3198 /* Check low half first, more likely to be decider */
3199 "cmpl %eax,(%esp)\n\t"
3200 "jne .Leq_fallthru\n\t"
3201 "cmpl %ebx,4(%esp)\n\t"
3202 "jne .Leq_fallthru\n\t"
3203 "lea 0x8(%esp),%esp\n\t"
3204 "pop %eax\n\t"
3205 "pop %ebx\n\t"
3206 /* jmp, but don't trust the assembler to choose the right jump */
3207 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3208 ".Leq_fallthru:\n\t"
3209 "lea 0x8(%esp),%esp\n\t"
3210 "pop %eax\n\t"
3211 "pop %ebx");
3212
3213 if (offset_p)
3214 *offset_p = 18;
3215 if (size_p)
3216 *size_p = 4;
3217 }
3218
3219 void
3220 i386_emit_ne_goto (int *offset_p, int *size_p)
3221 {
3222 EMIT_ASM32 (ne,
3223 /* Check low half first, more likely to be decider */
3224 "cmpl %eax,(%esp)\n\t"
3225 "jne .Lne_jump\n\t"
3226 "cmpl %ebx,4(%esp)\n\t"
3227 "je .Lne_fallthru\n\t"
3228 ".Lne_jump:\n\t"
3229 "lea 0x8(%esp),%esp\n\t"
3230 "pop %eax\n\t"
3231 "pop %ebx\n\t"
3232 /* jmp, but don't trust the assembler to choose the right jump */
3233 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3234 ".Lne_fallthru:\n\t"
3235 "lea 0x8(%esp),%esp\n\t"
3236 "pop %eax\n\t"
3237 "pop %ebx");
3238
3239 if (offset_p)
3240 *offset_p = 18;
3241 if (size_p)
3242 *size_p = 4;
3243 }
3244
3245 void
3246 i386_emit_lt_goto (int *offset_p, int *size_p)
3247 {
3248 EMIT_ASM32 (lt,
3249 "cmpl %ebx,4(%esp)\n\t"
3250 "jl .Llt_jump\n\t"
3251 "jne .Llt_fallthru\n\t"
3252 "cmpl %eax,(%esp)\n\t"
3253 "jnl .Llt_fallthru\n\t"
3254 ".Llt_jump:\n\t"
3255 "lea 0x8(%esp),%esp\n\t"
3256 "pop %eax\n\t"
3257 "pop %ebx\n\t"
3258 /* jmp, but don't trust the assembler to choose the right jump */
3259 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3260 ".Llt_fallthru:\n\t"
3261 "lea 0x8(%esp),%esp\n\t"
3262 "pop %eax\n\t"
3263 "pop %ebx");
3264
3265 if (offset_p)
3266 *offset_p = 20;
3267 if (size_p)
3268 *size_p = 4;
3269 }
3270
3271 void
3272 i386_emit_le_goto (int *offset_p, int *size_p)
3273 {
3274 EMIT_ASM32 (le,
3275 "cmpl %ebx,4(%esp)\n\t"
3276 "jle .Lle_jump\n\t"
3277 "jne .Lle_fallthru\n\t"
3278 "cmpl %eax,(%esp)\n\t"
3279 "jnle .Lle_fallthru\n\t"
3280 ".Lle_jump:\n\t"
3281 "lea 0x8(%esp),%esp\n\t"
3282 "pop %eax\n\t"
3283 "pop %ebx\n\t"
3284 /* jmp, but don't trust the assembler to choose the right jump */
3285 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3286 ".Lle_fallthru:\n\t"
3287 "lea 0x8(%esp),%esp\n\t"
3288 "pop %eax\n\t"
3289 "pop %ebx");
3290
3291 if (offset_p)
3292 *offset_p = 20;
3293 if (size_p)
3294 *size_p = 4;
3295 }
3296
3297 void
3298 i386_emit_gt_goto (int *offset_p, int *size_p)
3299 {
3300 EMIT_ASM32 (gt,
3301 "cmpl %ebx,4(%esp)\n\t"
3302 "jg .Lgt_jump\n\t"
3303 "jne .Lgt_fallthru\n\t"
3304 "cmpl %eax,(%esp)\n\t"
3305 "jng .Lgt_fallthru\n\t"
3306 ".Lgt_jump:\n\t"
3307 "lea 0x8(%esp),%esp\n\t"
3308 "pop %eax\n\t"
3309 "pop %ebx\n\t"
3310 /* jmp, but don't trust the assembler to choose the right jump */
3311 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3312 ".Lgt_fallthru:\n\t"
3313 "lea 0x8(%esp),%esp\n\t"
3314 "pop %eax\n\t"
3315 "pop %ebx");
3316
3317 if (offset_p)
3318 *offset_p = 20;
3319 if (size_p)
3320 *size_p = 4;
3321 }
3322
3323 void
3324 i386_emit_ge_goto (int *offset_p, int *size_p)
3325 {
3326 EMIT_ASM32 (ge,
3327 "cmpl %ebx,4(%esp)\n\t"
3328 "jge .Lge_jump\n\t"
3329 "jne .Lge_fallthru\n\t"
3330 "cmpl %eax,(%esp)\n\t"
3331 "jnge .Lge_fallthru\n\t"
3332 ".Lge_jump:\n\t"
3333 "lea 0x8(%esp),%esp\n\t"
3334 "pop %eax\n\t"
3335 "pop %ebx\n\t"
3336 /* jmp, but don't trust the assembler to choose the right jump */
3337 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3338 ".Lge_fallthru:\n\t"
3339 "lea 0x8(%esp),%esp\n\t"
3340 "pop %eax\n\t"
3341 "pop %ebx");
3342
3343 if (offset_p)
3344 *offset_p = 20;
3345 if (size_p)
3346 *size_p = 4;
3347 }
3348
3349 struct emit_ops i386_emit_ops =
3350 {
3351 i386_emit_prologue,
3352 i386_emit_epilogue,
3353 i386_emit_add,
3354 i386_emit_sub,
3355 i386_emit_mul,
3356 i386_emit_lsh,
3357 i386_emit_rsh_signed,
3358 i386_emit_rsh_unsigned,
3359 i386_emit_ext,
3360 i386_emit_log_not,
3361 i386_emit_bit_and,
3362 i386_emit_bit_or,
3363 i386_emit_bit_xor,
3364 i386_emit_bit_not,
3365 i386_emit_equal,
3366 i386_emit_less_signed,
3367 i386_emit_less_unsigned,
3368 i386_emit_ref,
3369 i386_emit_if_goto,
3370 i386_emit_goto,
3371 i386_write_goto_address,
3372 i386_emit_const,
3373 i386_emit_call,
3374 i386_emit_reg,
3375 i386_emit_pop,
3376 i386_emit_stack_flush,
3377 i386_emit_zero_ext,
3378 i386_emit_swap,
3379 i386_emit_stack_adjust,
3380 i386_emit_int_call_1,
3381 i386_emit_void_call_2,
3382 i386_emit_eq_goto,
3383 i386_emit_ne_goto,
3384 i386_emit_lt_goto,
3385 i386_emit_le_goto,
3386 i386_emit_gt_goto,
3387 i386_emit_ge_goto
3388 };
3389
3390
3391 static struct emit_ops *
3392 x86_emit_ops (void)
3393 {
3394 #ifdef __x86_64__
3395 if (is_64bit_tdesc ())
3396 return &amd64_emit_ops;
3397 else
3398 #endif
3399 return &i386_emit_ops;
3400 }
3401
3402 static int
3403 x86_supports_range_stepping (void)
3404 {
3405 return 1;
3406 }
3407
3408 /* This is initialized assuming an amd64 target.
3409 x86_arch_setup will correct it for i386 or amd64 targets. */
3410
3411 struct linux_target_ops the_low_target =
3412 {
3413 x86_arch_setup,
3414 x86_linux_regs_info,
3415 x86_cannot_fetch_register,
3416 x86_cannot_store_register,
3417 NULL, /* fetch_register */
3418 x86_get_pc,
3419 x86_set_pc,
3420 x86_breakpoint,
3421 x86_breakpoint_len,
3422 NULL,
3423 1,
3424 x86_breakpoint_at,
3425 x86_supports_z_point_type,
3426 x86_insert_point,
3427 x86_remove_point,
3428 x86_stopped_by_watchpoint,
3429 x86_stopped_data_address,
3430 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3431 native i386 case (no registers smaller than an xfer unit), and are not
3432 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3433 NULL,
3434 NULL,
3435 /* need to fix up i386 siginfo if host is amd64 */
3436 x86_siginfo_fixup,
3437 x86_linux_new_process,
3438 x86_linux_new_thread,
3439 x86_linux_prepare_to_resume,
3440 x86_linux_process_qsupported,
3441 x86_supports_tracepoints,
3442 x86_get_thread_area,
3443 x86_install_fast_tracepoint_jump_pad,
3444 x86_emit_ops,
3445 x86_get_min_fast_tracepoint_insn_len,
3446 x86_supports_range_stepping,
3447 };
3448
3449 void
3450 initialize_low_arch (void)
3451 {
3452 /* Initialize the Linux target descriptions. */
3453 #ifdef __x86_64__
3454 init_registers_amd64_linux ();
3455 init_registers_amd64_avx_linux ();
3456 init_registers_amd64_avx512_linux ();
3457 init_registers_amd64_mpx_linux ();
3458
3459 init_registers_x32_linux ();
3460 init_registers_x32_avx_linux ();
3461 init_registers_x32_avx512_linux ();
3462
3463 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3464 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3465 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3466 #endif
3467 init_registers_i386_linux ();
3468 init_registers_i386_mmx_linux ();
3469 init_registers_i386_avx_linux ();
3470 init_registers_i386_avx512_linux ();
3471 init_registers_i386_mpx_linux ();
3472
3473 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3474 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3475 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3476
3477 initialize_regsets_info (&x86_regsets_info);
3478 }