]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-x86-low.c
x86 debug address register clarifications
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2014 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "x86-xstate.h"
28
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
32 #ifndef ELFMAG0
33 #include "elf/common.h"
34 #endif
35
36 #include "agent.h"
37 #include "tdesc.h"
38 #include "tracepoint.h"
39 #include "ax.h"
40
41 #ifdef __x86_64__
42 /* Defined in auto-generated file amd64-linux.c. */
43 void init_registers_amd64_linux (void);
44 extern const struct target_desc *tdesc_amd64_linux;
45
46 /* Defined in auto-generated file amd64-avx-linux.c. */
47 void init_registers_amd64_avx_linux (void);
48 extern const struct target_desc *tdesc_amd64_avx_linux;
49
50 /* Defined in auto-generated file amd64-avx512-linux.c. */
51 void init_registers_amd64_avx512_linux (void);
52 extern const struct target_desc *tdesc_amd64_avx512_linux;
53
54 /* Defined in auto-generated file amd64-mpx-linux.c. */
55 void init_registers_amd64_mpx_linux (void);
56 extern const struct target_desc *tdesc_amd64_mpx_linux;
57
58 /* Defined in auto-generated file x32-linux.c. */
59 void init_registers_x32_linux (void);
60 extern const struct target_desc *tdesc_x32_linux;
61
62 /* Defined in auto-generated file x32-avx-linux.c. */
63 void init_registers_x32_avx_linux (void);
64 extern const struct target_desc *tdesc_x32_avx_linux;
65
66 /* Defined in auto-generated file x32-avx512-linux.c. */
67 void init_registers_x32_avx512_linux (void);
68 extern const struct target_desc *tdesc_x32_avx512_linux;
69
70 #endif
71
72 /* Defined in auto-generated file i386-linux.c. */
73 void init_registers_i386_linux (void);
74 extern const struct target_desc *tdesc_i386_linux;
75
76 /* Defined in auto-generated file i386-mmx-linux.c. */
77 void init_registers_i386_mmx_linux (void);
78 extern const struct target_desc *tdesc_i386_mmx_linux;
79
80 /* Defined in auto-generated file i386-avx-linux.c. */
81 void init_registers_i386_avx_linux (void);
82 extern const struct target_desc *tdesc_i386_avx_linux;
83
84 /* Defined in auto-generated file i386-avx512-linux.c. */
85 void init_registers_i386_avx512_linux (void);
86 extern const struct target_desc *tdesc_i386_avx512_linux;
87
88 /* Defined in auto-generated file i386-mpx-linux.c. */
89 void init_registers_i386_mpx_linux (void);
90 extern const struct target_desc *tdesc_i386_mpx_linux;
91
92 #ifdef __x86_64__
93 static struct target_desc *tdesc_amd64_linux_no_xml;
94 #endif
95 static struct target_desc *tdesc_i386_linux_no_xml;
96
97
98 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
99 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
100
101 /* Backward compatibility for gdb without XML support. */
102
103 static const char *xmltarget_i386_linux_no_xml = "@<target>\
104 <architecture>i386</architecture>\
105 <osabi>GNU/Linux</osabi>\
106 </target>";
107
108 #ifdef __x86_64__
109 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
110 <architecture>i386:x86-64</architecture>\
111 <osabi>GNU/Linux</osabi>\
112 </target>";
113 #endif
114
115 #include <sys/reg.h>
116 #include <sys/procfs.h>
117 #include <sys/ptrace.h>
118 #include <sys/uio.h>
119
120 #ifndef PTRACE_GETREGSET
121 #define PTRACE_GETREGSET 0x4204
122 #endif
123
124 #ifndef PTRACE_SETREGSET
125 #define PTRACE_SETREGSET 0x4205
126 #endif
127
128
129 #ifndef PTRACE_GET_THREAD_AREA
130 #define PTRACE_GET_THREAD_AREA 25
131 #endif
132
133 /* This definition comes from prctl.h, but some kernels may not have it. */
134 #ifndef PTRACE_ARCH_PRCTL
135 #define PTRACE_ARCH_PRCTL 30
136 #endif
137
138 /* The following definitions come from prctl.h, but may be absent
139 for certain configurations. */
140 #ifndef ARCH_GET_FS
141 #define ARCH_SET_GS 0x1001
142 #define ARCH_SET_FS 0x1002
143 #define ARCH_GET_FS 0x1003
144 #define ARCH_GET_GS 0x1004
145 #endif
146
147 /* Per-process arch-specific data we want to keep. */
148
149 struct arch_process_info
150 {
151 struct x86_debug_reg_state debug_reg_state;
152 };
153
154 /* Per-thread arch-specific data we want to keep. */
155
156 struct arch_lwp_info
157 {
158 /* Non-zero if our copy differs from what's recorded in the thread. */
159 int debug_registers_changed;
160 };
161
162 #ifdef __x86_64__
163
164 /* Mapping between the general-purpose registers in `struct user'
165 format and GDB's register array layout.
166 Note that the transfer layout uses 64-bit regs. */
167 static /*const*/ int i386_regmap[] =
168 {
169 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
170 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
171 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
172 DS * 8, ES * 8, FS * 8, GS * 8
173 };
174
175 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
176
177 /* So code below doesn't have to care, i386 or amd64. */
178 #define ORIG_EAX ORIG_RAX
179
180 static const int x86_64_regmap[] =
181 {
182 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
183 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
184 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
185 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
186 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
187 DS * 8, ES * 8, FS * 8, GS * 8,
188 -1, -1, -1, -1, -1, -1, -1, -1,
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1,
192 -1, -1, -1, -1, -1, -1, -1, -1,
193 ORIG_RAX * 8,
194 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
195 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
196 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
197 -1, -1, -1, -1, -1, -1, -1, -1,
198 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
199 -1, -1, -1, -1, -1, -1, -1, -1,
200 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
201 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
202 -1, -1, -1, -1, -1, -1, -1, -1,
203 -1, -1, -1, -1, -1, -1, -1, -1,
204 -1, -1, -1, -1, -1, -1, -1, -1
205 };
206
207 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
208 #define X86_64_USER_REGS (GS + 1)
209
210 #else /* ! __x86_64__ */
211
212 /* Mapping between the general-purpose registers in `struct user'
213 format and GDB's register array layout. */
214 static /*const*/ int i386_regmap[] =
215 {
216 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
217 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
218 EIP * 4, EFL * 4, CS * 4, SS * 4,
219 DS * 4, ES * 4, FS * 4, GS * 4
220 };
221
222 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
223
224 #endif
225
226 #ifdef __x86_64__
227
228 /* Returns true if the current inferior belongs to a x86-64 process,
229 per the tdesc. */
230
231 static int
232 is_64bit_tdesc (void)
233 {
234 struct regcache *regcache = get_thread_regcache (current_inferior, 0);
235
236 return register_size (regcache->tdesc, 0) == 8;
237 }
238
239 #endif
240
241 \f
242 /* Called by libthread_db. */
243
244 ps_err_e
245 ps_get_thread_area (const struct ps_prochandle *ph,
246 lwpid_t lwpid, int idx, void **base)
247 {
248 #ifdef __x86_64__
249 int use_64bit = is_64bit_tdesc ();
250
251 if (use_64bit)
252 {
253 switch (idx)
254 {
255 case FS:
256 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
257 return PS_OK;
258 break;
259 case GS:
260 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
261 return PS_OK;
262 break;
263 default:
264 return PS_BADADDR;
265 }
266 return PS_ERR;
267 }
268 #endif
269
270 {
271 unsigned int desc[4];
272
273 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
274 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
275 return PS_ERR;
276
277 /* Ensure we properly extend the value to 64-bits for x86_64. */
278 *base = (void *) (uintptr_t) desc[1];
279 return PS_OK;
280 }
281 }
282
283 /* Get the thread area address. This is used to recognize which
284 thread is which when tracing with the in-process agent library. We
285 don't read anything from the address, and treat it as opaque; it's
286 the address itself that we assume is unique per-thread. */
287
288 static int
289 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
290 {
291 #ifdef __x86_64__
292 int use_64bit = is_64bit_tdesc ();
293
294 if (use_64bit)
295 {
296 void *base;
297 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
298 {
299 *addr = (CORE_ADDR) (uintptr_t) base;
300 return 0;
301 }
302
303 return -1;
304 }
305 #endif
306
307 {
308 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
309 struct thread_info *thr = get_lwp_thread (lwp);
310 struct regcache *regcache = get_thread_regcache (thr, 1);
311 unsigned int desc[4];
312 ULONGEST gs = 0;
313 const int reg_thread_area = 3; /* bits to scale down register value. */
314 int idx;
315
316 collect_register_by_name (regcache, "gs", &gs);
317
318 idx = gs >> reg_thread_area;
319
320 if (ptrace (PTRACE_GET_THREAD_AREA,
321 lwpid_of (thr),
322 (void *) (long) idx, (unsigned long) &desc) < 0)
323 return -1;
324
325 *addr = desc[1];
326 return 0;
327 }
328 }
329
330
331 \f
332 static int
333 x86_cannot_store_register (int regno)
334 {
335 #ifdef __x86_64__
336 if (is_64bit_tdesc ())
337 return 0;
338 #endif
339
340 return regno >= I386_NUM_REGS;
341 }
342
343 static int
344 x86_cannot_fetch_register (int regno)
345 {
346 #ifdef __x86_64__
347 if (is_64bit_tdesc ())
348 return 0;
349 #endif
350
351 return regno >= I386_NUM_REGS;
352 }
353
354 static void
355 x86_fill_gregset (struct regcache *regcache, void *buf)
356 {
357 int i;
358
359 #ifdef __x86_64__
360 if (register_size (regcache->tdesc, 0) == 8)
361 {
362 for (i = 0; i < X86_64_NUM_REGS; i++)
363 if (x86_64_regmap[i] != -1)
364 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
365 return;
366 }
367
368 /* 32-bit inferior registers need to be zero-extended.
369 Callers would read uninitialized memory otherwise. */
370 memset (buf, 0x00, X86_64_USER_REGS * 8);
371 #endif
372
373 for (i = 0; i < I386_NUM_REGS; i++)
374 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
375
376 collect_register_by_name (regcache, "orig_eax",
377 ((char *) buf) + ORIG_EAX * 4);
378 }
379
380 static void
381 x86_store_gregset (struct regcache *regcache, const void *buf)
382 {
383 int i;
384
385 #ifdef __x86_64__
386 if (register_size (regcache->tdesc, 0) == 8)
387 {
388 for (i = 0; i < X86_64_NUM_REGS; i++)
389 if (x86_64_regmap[i] != -1)
390 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
391 return;
392 }
393 #endif
394
395 for (i = 0; i < I386_NUM_REGS; i++)
396 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
397
398 supply_register_by_name (regcache, "orig_eax",
399 ((char *) buf) + ORIG_EAX * 4);
400 }
401
402 static void
403 x86_fill_fpregset (struct regcache *regcache, void *buf)
404 {
405 #ifdef __x86_64__
406 i387_cache_to_fxsave (regcache, buf);
407 #else
408 i387_cache_to_fsave (regcache, buf);
409 #endif
410 }
411
412 static void
413 x86_store_fpregset (struct regcache *regcache, const void *buf)
414 {
415 #ifdef __x86_64__
416 i387_fxsave_to_cache (regcache, buf);
417 #else
418 i387_fsave_to_cache (regcache, buf);
419 #endif
420 }
421
422 #ifndef __x86_64__
423
424 static void
425 x86_fill_fpxregset (struct regcache *regcache, void *buf)
426 {
427 i387_cache_to_fxsave (regcache, buf);
428 }
429
430 static void
431 x86_store_fpxregset (struct regcache *regcache, const void *buf)
432 {
433 i387_fxsave_to_cache (regcache, buf);
434 }
435
436 #endif
437
438 static void
439 x86_fill_xstateregset (struct regcache *regcache, void *buf)
440 {
441 i387_cache_to_xsave (regcache, buf);
442 }
443
444 static void
445 x86_store_xstateregset (struct regcache *regcache, const void *buf)
446 {
447 i387_xsave_to_cache (regcache, buf);
448 }
449
450 /* ??? The non-biarch i386 case stores all the i387 regs twice.
451 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
452 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
453 doesn't work. IWBN to avoid the duplication in the case where it
454 does work. Maybe the arch_setup routine could check whether it works
455 and update the supported regsets accordingly. */
456
457 static struct regset_info x86_regsets[] =
458 {
459 #ifdef HAVE_PTRACE_GETREGS
460 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
461 GENERAL_REGS,
462 x86_fill_gregset, x86_store_gregset },
463 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
464 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
465 # ifndef __x86_64__
466 # ifdef HAVE_PTRACE_GETFPXREGS
467 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
468 EXTENDED_REGS,
469 x86_fill_fpxregset, x86_store_fpxregset },
470 # endif
471 # endif
472 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
473 FP_REGS,
474 x86_fill_fpregset, x86_store_fpregset },
475 #endif /* HAVE_PTRACE_GETREGS */
476 { 0, 0, 0, -1, -1, NULL, NULL }
477 };
478
479 static CORE_ADDR
480 x86_get_pc (struct regcache *regcache)
481 {
482 int use_64bit = register_size (regcache->tdesc, 0) == 8;
483
484 if (use_64bit)
485 {
486 unsigned long pc;
487 collect_register_by_name (regcache, "rip", &pc);
488 return (CORE_ADDR) pc;
489 }
490 else
491 {
492 unsigned int pc;
493 collect_register_by_name (regcache, "eip", &pc);
494 return (CORE_ADDR) pc;
495 }
496 }
497
498 static void
499 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
500 {
501 int use_64bit = register_size (regcache->tdesc, 0) == 8;
502
503 if (use_64bit)
504 {
505 unsigned long newpc = pc;
506 supply_register_by_name (regcache, "rip", &newpc);
507 }
508 else
509 {
510 unsigned int newpc = pc;
511 supply_register_by_name (regcache, "eip", &newpc);
512 }
513 }
514 \f
515 static const unsigned char x86_breakpoint[] = { 0xCC };
516 #define x86_breakpoint_len 1
517
518 static int
519 x86_breakpoint_at (CORE_ADDR pc)
520 {
521 unsigned char c;
522
523 (*the_target->read_memory) (pc, &c, 1);
524 if (c == 0xCC)
525 return 1;
526
527 return 0;
528 }
529 \f
530 /* Support for debug registers. */
531
532 static unsigned long
533 x86_linux_dr_get (ptid_t ptid, int regnum)
534 {
535 int tid;
536 unsigned long value;
537
538 tid = ptid_get_lwp (ptid);
539
540 errno = 0;
541 value = ptrace (PTRACE_PEEKUSER, tid,
542 offsetof (struct user, u_debugreg[regnum]), 0);
543 if (errno != 0)
544 error ("Couldn't read debug register");
545
546 return value;
547 }
548
549 static void
550 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
551 {
552 int tid;
553
554 tid = ptid_get_lwp (ptid);
555
556 errno = 0;
557 ptrace (PTRACE_POKEUSER, tid,
558 offsetof (struct user, u_debugreg[regnum]), value);
559 if (errno != 0)
560 error ("Couldn't write debug register");
561 }
562
563 static int
564 update_debug_registers_callback (struct inferior_list_entry *entry,
565 void *pid_p)
566 {
567 struct thread_info *thr = (struct thread_info *) entry;
568 struct lwp_info *lwp = get_thread_lwp (thr);
569 int pid = *(int *) pid_p;
570
571 /* Only update the threads of this process. */
572 if (pid_of (thr) == pid)
573 {
574 /* The actual update is done later just before resuming the lwp,
575 we just mark that the registers need updating. */
576 lwp->arch_private->debug_registers_changed = 1;
577
578 /* If the lwp isn't stopped, force it to momentarily pause, so
579 we can update its debug registers. */
580 if (!lwp->stopped)
581 linux_stop_lwp (lwp);
582 }
583
584 return 0;
585 }
586
587 /* Update the inferior's debug register REGNUM from STATE. */
588
589 static void
590 x86_dr_low_set_addr (int regnum, CORE_ADDR addr)
591 {
592 /* Only update the threads of this process. */
593 int pid = pid_of (current_inferior);
594
595 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
596
597 find_inferior (&all_threads, update_debug_registers_callback, &pid);
598 }
599
600 /* Return the inferior's debug register REGNUM. */
601
602 static CORE_ADDR
603 x86_dr_low_get_addr (int regnum)
604 {
605 ptid_t ptid = ptid_of (current_inferior);
606
607 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
608
609 return x86_linux_dr_get (ptid, regnum);
610 }
611
612 /* Update the inferior's DR7 debug control register from STATE. */
613
614 static void
615 x86_dr_low_set_control (unsigned long control)
616 {
617 /* Only update the threads of this process. */
618 int pid = pid_of (current_inferior);
619
620 find_inferior (&all_threads, update_debug_registers_callback, &pid);
621 }
622
623 /* Return the inferior's DR7 debug control register. */
624
625 static unsigned long
626 x86_dr_low_get_control (void)
627 {
628 ptid_t ptid = ptid_of (current_inferior);
629
630 return x86_linux_dr_get (ptid, DR_CONTROL);
631 }
632
633 /* Get the value of the DR6 debug status register from the inferior
634 and record it in STATE. */
635
636 static unsigned long
637 x86_dr_low_get_status (void)
638 {
639 ptid_t ptid = ptid_of (current_inferior);
640
641 return x86_linux_dr_get (ptid, DR_STATUS);
642 }
643
644 /* Low-level function vector. */
645 struct x86_dr_low_type x86_dr_low =
646 {
647 x86_dr_low_set_control,
648 x86_dr_low_set_addr,
649 x86_dr_low_get_addr,
650 x86_dr_low_get_status,
651 x86_dr_low_get_control,
652 sizeof (void *),
653 };
654 \f
655 /* Breakpoint/Watchpoint support. */
656
657 static int
658 x86_supports_z_point_type (char z_type)
659 {
660 switch (z_type)
661 {
662 case Z_PACKET_SW_BP:
663 case Z_PACKET_HW_BP:
664 case Z_PACKET_WRITE_WP:
665 case Z_PACKET_ACCESS_WP:
666 return 1;
667 default:
668 return 0;
669 }
670 }
671
672 static int
673 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
674 int size, struct raw_breakpoint *bp)
675 {
676 struct process_info *proc = current_process ();
677
678 switch (type)
679 {
680 case raw_bkpt_type_sw:
681 return insert_memory_breakpoint (bp);
682
683 case raw_bkpt_type_hw:
684 case raw_bkpt_type_write_wp:
685 case raw_bkpt_type_access_wp:
686 {
687 enum target_hw_bp_type hw_type
688 = raw_bkpt_type_to_target_hw_bp_type (type);
689 struct x86_debug_reg_state *state
690 = &proc->private->arch_private->debug_reg_state;
691
692 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
693 }
694
695 default:
696 /* Unsupported. */
697 return 1;
698 }
699 }
700
701 static int
702 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
703 int size, struct raw_breakpoint *bp)
704 {
705 struct process_info *proc = current_process ();
706
707 switch (type)
708 {
709 case raw_bkpt_type_sw:
710 return remove_memory_breakpoint (bp);
711
712 case raw_bkpt_type_hw:
713 case raw_bkpt_type_write_wp:
714 case raw_bkpt_type_access_wp:
715 {
716 enum target_hw_bp_type hw_type
717 = raw_bkpt_type_to_target_hw_bp_type (type);
718 struct x86_debug_reg_state *state
719 = &proc->private->arch_private->debug_reg_state;
720
721 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
722 }
723 default:
724 /* Unsupported. */
725 return 1;
726 }
727 }
728
729 static int
730 x86_stopped_by_watchpoint (void)
731 {
732 struct process_info *proc = current_process ();
733 return x86_dr_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
734 }
735
736 static CORE_ADDR
737 x86_stopped_data_address (void)
738 {
739 struct process_info *proc = current_process ();
740 CORE_ADDR addr;
741 if (x86_dr_stopped_data_address (&proc->private->arch_private->debug_reg_state,
742 &addr))
743 return addr;
744 return 0;
745 }
746 \f
747 /* Called when a new process is created. */
748
749 static struct arch_process_info *
750 x86_linux_new_process (void)
751 {
752 struct arch_process_info *info = XCNEW (struct arch_process_info);
753
754 x86_low_init_dregs (&info->debug_reg_state);
755
756 return info;
757 }
758
759 /* Called when a new thread is detected. */
760
761 static struct arch_lwp_info *
762 x86_linux_new_thread (void)
763 {
764 struct arch_lwp_info *info = XCNEW (struct arch_lwp_info);
765
766 info->debug_registers_changed = 1;
767
768 return info;
769 }
770
771 /* Called when resuming a thread.
772 If the debug regs have changed, update the thread's copies. */
773
774 static void
775 x86_linux_prepare_to_resume (struct lwp_info *lwp)
776 {
777 ptid_t ptid = ptid_of (get_lwp_thread (lwp));
778 int clear_status = 0;
779
780 if (lwp->arch_private->debug_registers_changed)
781 {
782 int i;
783 int pid = ptid_get_pid (ptid);
784 struct process_info *proc = find_process_pid (pid);
785 struct x86_debug_reg_state *state
786 = &proc->private->arch_private->debug_reg_state;
787
788 x86_linux_dr_set (ptid, DR_CONTROL, 0);
789
790 ALL_DEBUG_ADDRESS_REGISTERS (i)
791 if (state->dr_ref_count[i] > 0)
792 {
793 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
794
795 /* If we're setting a watchpoint, any change the inferior
796 had done itself to the debug registers needs to be
797 discarded, otherwise, x86_dr_stopped_data_address can
798 get confused. */
799 clear_status = 1;
800 }
801
802 if (state->dr_control_mirror != 0)
803 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
804
805 lwp->arch_private->debug_registers_changed = 0;
806 }
807
808 if (clear_status || lwp->stopped_by_watchpoint)
809 x86_linux_dr_set (ptid, DR_STATUS, 0);
810 }
811 \f
812 /* When GDBSERVER is built as a 64-bit application on linux, the
813 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
814 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
815 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
816 conversion in-place ourselves. */
817
818 /* These types below (compat_*) define a siginfo type that is layout
819 compatible with the siginfo type exported by the 32-bit userspace
820 support. */
821
822 #ifdef __x86_64__
823
824 typedef int compat_int_t;
825 typedef unsigned int compat_uptr_t;
826
827 typedef int compat_time_t;
828 typedef int compat_timer_t;
829 typedef int compat_clock_t;
830
831 struct compat_timeval
832 {
833 compat_time_t tv_sec;
834 int tv_usec;
835 };
836
837 typedef union compat_sigval
838 {
839 compat_int_t sival_int;
840 compat_uptr_t sival_ptr;
841 } compat_sigval_t;
842
843 typedef struct compat_siginfo
844 {
845 int si_signo;
846 int si_errno;
847 int si_code;
848
849 union
850 {
851 int _pad[((128 / sizeof (int)) - 3)];
852
853 /* kill() */
854 struct
855 {
856 unsigned int _pid;
857 unsigned int _uid;
858 } _kill;
859
860 /* POSIX.1b timers */
861 struct
862 {
863 compat_timer_t _tid;
864 int _overrun;
865 compat_sigval_t _sigval;
866 } _timer;
867
868 /* POSIX.1b signals */
869 struct
870 {
871 unsigned int _pid;
872 unsigned int _uid;
873 compat_sigval_t _sigval;
874 } _rt;
875
876 /* SIGCHLD */
877 struct
878 {
879 unsigned int _pid;
880 unsigned int _uid;
881 int _status;
882 compat_clock_t _utime;
883 compat_clock_t _stime;
884 } _sigchld;
885
886 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
887 struct
888 {
889 unsigned int _addr;
890 } _sigfault;
891
892 /* SIGPOLL */
893 struct
894 {
895 int _band;
896 int _fd;
897 } _sigpoll;
898 } _sifields;
899 } compat_siginfo_t;
900
901 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
902 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
903
904 typedef struct compat_x32_siginfo
905 {
906 int si_signo;
907 int si_errno;
908 int si_code;
909
910 union
911 {
912 int _pad[((128 / sizeof (int)) - 3)];
913
914 /* kill() */
915 struct
916 {
917 unsigned int _pid;
918 unsigned int _uid;
919 } _kill;
920
921 /* POSIX.1b timers */
922 struct
923 {
924 compat_timer_t _tid;
925 int _overrun;
926 compat_sigval_t _sigval;
927 } _timer;
928
929 /* POSIX.1b signals */
930 struct
931 {
932 unsigned int _pid;
933 unsigned int _uid;
934 compat_sigval_t _sigval;
935 } _rt;
936
937 /* SIGCHLD */
938 struct
939 {
940 unsigned int _pid;
941 unsigned int _uid;
942 int _status;
943 compat_x32_clock_t _utime;
944 compat_x32_clock_t _stime;
945 } _sigchld;
946
947 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
948 struct
949 {
950 unsigned int _addr;
951 } _sigfault;
952
953 /* SIGPOLL */
954 struct
955 {
956 int _band;
957 int _fd;
958 } _sigpoll;
959 } _sifields;
960 } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
961
962 #define cpt_si_pid _sifields._kill._pid
963 #define cpt_si_uid _sifields._kill._uid
964 #define cpt_si_timerid _sifields._timer._tid
965 #define cpt_si_overrun _sifields._timer._overrun
966 #define cpt_si_status _sifields._sigchld._status
967 #define cpt_si_utime _sifields._sigchld._utime
968 #define cpt_si_stime _sifields._sigchld._stime
969 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
970 #define cpt_si_addr _sifields._sigfault._addr
971 #define cpt_si_band _sifields._sigpoll._band
972 #define cpt_si_fd _sifields._sigpoll._fd
973
974 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
975 In their place is si_timer1,si_timer2. */
976 #ifndef si_timerid
977 #define si_timerid si_timer1
978 #endif
979 #ifndef si_overrun
980 #define si_overrun si_timer2
981 #endif
982
983 static void
984 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
985 {
986 memset (to, 0, sizeof (*to));
987
988 to->si_signo = from->si_signo;
989 to->si_errno = from->si_errno;
990 to->si_code = from->si_code;
991
992 if (to->si_code == SI_TIMER)
993 {
994 to->cpt_si_timerid = from->si_timerid;
995 to->cpt_si_overrun = from->si_overrun;
996 to->cpt_si_ptr = (intptr_t) from->si_ptr;
997 }
998 else if (to->si_code == SI_USER)
999 {
1000 to->cpt_si_pid = from->si_pid;
1001 to->cpt_si_uid = from->si_uid;
1002 }
1003 else if (to->si_code < 0)
1004 {
1005 to->cpt_si_pid = from->si_pid;
1006 to->cpt_si_uid = from->si_uid;
1007 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1008 }
1009 else
1010 {
1011 switch (to->si_signo)
1012 {
1013 case SIGCHLD:
1014 to->cpt_si_pid = from->si_pid;
1015 to->cpt_si_uid = from->si_uid;
1016 to->cpt_si_status = from->si_status;
1017 to->cpt_si_utime = from->si_utime;
1018 to->cpt_si_stime = from->si_stime;
1019 break;
1020 case SIGILL:
1021 case SIGFPE:
1022 case SIGSEGV:
1023 case SIGBUS:
1024 to->cpt_si_addr = (intptr_t) from->si_addr;
1025 break;
1026 case SIGPOLL:
1027 to->cpt_si_band = from->si_band;
1028 to->cpt_si_fd = from->si_fd;
1029 break;
1030 default:
1031 to->cpt_si_pid = from->si_pid;
1032 to->cpt_si_uid = from->si_uid;
1033 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1034 break;
1035 }
1036 }
1037 }
1038
1039 static void
1040 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
1041 {
1042 memset (to, 0, sizeof (*to));
1043
1044 to->si_signo = from->si_signo;
1045 to->si_errno = from->si_errno;
1046 to->si_code = from->si_code;
1047
1048 if (to->si_code == SI_TIMER)
1049 {
1050 to->si_timerid = from->cpt_si_timerid;
1051 to->si_overrun = from->cpt_si_overrun;
1052 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1053 }
1054 else if (to->si_code == SI_USER)
1055 {
1056 to->si_pid = from->cpt_si_pid;
1057 to->si_uid = from->cpt_si_uid;
1058 }
1059 else if (to->si_code < 0)
1060 {
1061 to->si_pid = from->cpt_si_pid;
1062 to->si_uid = from->cpt_si_uid;
1063 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1064 }
1065 else
1066 {
1067 switch (to->si_signo)
1068 {
1069 case SIGCHLD:
1070 to->si_pid = from->cpt_si_pid;
1071 to->si_uid = from->cpt_si_uid;
1072 to->si_status = from->cpt_si_status;
1073 to->si_utime = from->cpt_si_utime;
1074 to->si_stime = from->cpt_si_stime;
1075 break;
1076 case SIGILL:
1077 case SIGFPE:
1078 case SIGSEGV:
1079 case SIGBUS:
1080 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1081 break;
1082 case SIGPOLL:
1083 to->si_band = from->cpt_si_band;
1084 to->si_fd = from->cpt_si_fd;
1085 break;
1086 default:
1087 to->si_pid = from->cpt_si_pid;
1088 to->si_uid = from->cpt_si_uid;
1089 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1090 break;
1091 }
1092 }
1093 }
1094
1095 static void
1096 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1097 siginfo_t *from)
1098 {
1099 memset (to, 0, sizeof (*to));
1100
1101 to->si_signo = from->si_signo;
1102 to->si_errno = from->si_errno;
1103 to->si_code = from->si_code;
1104
1105 if (to->si_code == SI_TIMER)
1106 {
1107 to->cpt_si_timerid = from->si_timerid;
1108 to->cpt_si_overrun = from->si_overrun;
1109 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1110 }
1111 else if (to->si_code == SI_USER)
1112 {
1113 to->cpt_si_pid = from->si_pid;
1114 to->cpt_si_uid = from->si_uid;
1115 }
1116 else if (to->si_code < 0)
1117 {
1118 to->cpt_si_pid = from->si_pid;
1119 to->cpt_si_uid = from->si_uid;
1120 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1121 }
1122 else
1123 {
1124 switch (to->si_signo)
1125 {
1126 case SIGCHLD:
1127 to->cpt_si_pid = from->si_pid;
1128 to->cpt_si_uid = from->si_uid;
1129 to->cpt_si_status = from->si_status;
1130 to->cpt_si_utime = from->si_utime;
1131 to->cpt_si_stime = from->si_stime;
1132 break;
1133 case SIGILL:
1134 case SIGFPE:
1135 case SIGSEGV:
1136 case SIGBUS:
1137 to->cpt_si_addr = (intptr_t) from->si_addr;
1138 break;
1139 case SIGPOLL:
1140 to->cpt_si_band = from->si_band;
1141 to->cpt_si_fd = from->si_fd;
1142 break;
1143 default:
1144 to->cpt_si_pid = from->si_pid;
1145 to->cpt_si_uid = from->si_uid;
1146 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1147 break;
1148 }
1149 }
1150 }
1151
1152 static void
1153 siginfo_from_compat_x32_siginfo (siginfo_t *to,
1154 compat_x32_siginfo_t *from)
1155 {
1156 memset (to, 0, sizeof (*to));
1157
1158 to->si_signo = from->si_signo;
1159 to->si_errno = from->si_errno;
1160 to->si_code = from->si_code;
1161
1162 if (to->si_code == SI_TIMER)
1163 {
1164 to->si_timerid = from->cpt_si_timerid;
1165 to->si_overrun = from->cpt_si_overrun;
1166 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1167 }
1168 else if (to->si_code == SI_USER)
1169 {
1170 to->si_pid = from->cpt_si_pid;
1171 to->si_uid = from->cpt_si_uid;
1172 }
1173 else if (to->si_code < 0)
1174 {
1175 to->si_pid = from->cpt_si_pid;
1176 to->si_uid = from->cpt_si_uid;
1177 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1178 }
1179 else
1180 {
1181 switch (to->si_signo)
1182 {
1183 case SIGCHLD:
1184 to->si_pid = from->cpt_si_pid;
1185 to->si_uid = from->cpt_si_uid;
1186 to->si_status = from->cpt_si_status;
1187 to->si_utime = from->cpt_si_utime;
1188 to->si_stime = from->cpt_si_stime;
1189 break;
1190 case SIGILL:
1191 case SIGFPE:
1192 case SIGSEGV:
1193 case SIGBUS:
1194 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1195 break;
1196 case SIGPOLL:
1197 to->si_band = from->cpt_si_band;
1198 to->si_fd = from->cpt_si_fd;
1199 break;
1200 default:
1201 to->si_pid = from->cpt_si_pid;
1202 to->si_uid = from->cpt_si_uid;
1203 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1204 break;
1205 }
1206 }
1207 }
1208
1209 #endif /* __x86_64__ */
1210
1211 /* Convert a native/host siginfo object, into/from the siginfo in the
1212 layout of the inferiors' architecture. Returns true if any
1213 conversion was done; false otherwise. If DIRECTION is 1, then copy
1214 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1215 INF. */
1216
1217 static int
1218 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
1219 {
1220 #ifdef __x86_64__
1221 unsigned int machine;
1222 int tid = lwpid_of (current_inferior);
1223 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1224
1225 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1226 if (!is_64bit_tdesc ())
1227 {
1228 gdb_assert (sizeof (siginfo_t) == sizeof (compat_siginfo_t));
1229
1230 if (direction == 0)
1231 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1232 else
1233 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1234
1235 return 1;
1236 }
1237 /* No fixup for native x32 GDB. */
1238 else if (!is_elf64 && sizeof (void *) == 8)
1239 {
1240 gdb_assert (sizeof (siginfo_t) == sizeof (compat_x32_siginfo_t));
1241
1242 if (direction == 0)
1243 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1244 native);
1245 else
1246 siginfo_from_compat_x32_siginfo (native,
1247 (struct compat_x32_siginfo *) inf);
1248
1249 return 1;
1250 }
1251 #endif
1252
1253 return 0;
1254 }
1255 \f
1256 static int use_xml;
1257
1258 /* Format of XSAVE extended state is:
1259 struct
1260 {
1261 fxsave_bytes[0..463]
1262 sw_usable_bytes[464..511]
1263 xstate_hdr_bytes[512..575]
1264 avx_bytes[576..831]
1265 future_state etc
1266 };
1267
1268 Same memory layout will be used for the coredump NT_X86_XSTATE
1269 representing the XSAVE extended state registers.
1270
1271 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1272 extended state mask, which is the same as the extended control register
1273 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1274 together with the mask saved in the xstate_hdr_bytes to determine what
1275 states the processor/OS supports and what state, used or initialized,
1276 the process/thread is in. */
1277 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1278
1279 /* Does the current host support the GETFPXREGS request? The header
1280 file may or may not define it, and even if it is defined, the
1281 kernel will return EIO if it's running on a pre-SSE processor. */
1282 int have_ptrace_getfpxregs =
1283 #ifdef HAVE_PTRACE_GETFPXREGS
1284 -1
1285 #else
1286 0
1287 #endif
1288 ;
1289
1290 /* Does the current host support PTRACE_GETREGSET? */
1291 static int have_ptrace_getregset = -1;
1292
1293 /* Get Linux/x86 target description from running target. */
1294
1295 static const struct target_desc *
1296 x86_linux_read_description (void)
1297 {
1298 unsigned int machine;
1299 int is_elf64;
1300 int xcr0_features;
1301 int tid;
1302 static uint64_t xcr0;
1303 struct regset_info *regset;
1304
1305 tid = lwpid_of (current_inferior);
1306
1307 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1308
1309 if (sizeof (void *) == 4)
1310 {
1311 if (is_elf64 > 0)
1312 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1313 #ifndef __x86_64__
1314 else if (machine == EM_X86_64)
1315 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1316 #endif
1317 }
1318
1319 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1320 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1321 {
1322 elf_fpxregset_t fpxregs;
1323
1324 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
1325 {
1326 have_ptrace_getfpxregs = 0;
1327 have_ptrace_getregset = 0;
1328 return tdesc_i386_mmx_linux;
1329 }
1330 else
1331 have_ptrace_getfpxregs = 1;
1332 }
1333 #endif
1334
1335 if (!use_xml)
1336 {
1337 x86_xcr0 = X86_XSTATE_SSE_MASK;
1338
1339 /* Don't use XML. */
1340 #ifdef __x86_64__
1341 if (machine == EM_X86_64)
1342 return tdesc_amd64_linux_no_xml;
1343 else
1344 #endif
1345 return tdesc_i386_linux_no_xml;
1346 }
1347
1348 if (have_ptrace_getregset == -1)
1349 {
1350 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1351 struct iovec iov;
1352
1353 iov.iov_base = xstateregs;
1354 iov.iov_len = sizeof (xstateregs);
1355
1356 /* Check if PTRACE_GETREGSET works. */
1357 if (ptrace (PTRACE_GETREGSET, tid,
1358 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1359 have_ptrace_getregset = 0;
1360 else
1361 {
1362 have_ptrace_getregset = 1;
1363
1364 /* Get XCR0 from XSAVE extended state. */
1365 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1366 / sizeof (uint64_t))];
1367
1368 /* Use PTRACE_GETREGSET if it is available. */
1369 for (regset = x86_regsets;
1370 regset->fill_function != NULL; regset++)
1371 if (regset->get_request == PTRACE_GETREGSET)
1372 regset->size = X86_XSTATE_SIZE (xcr0);
1373 else if (regset->type != GENERAL_REGS)
1374 regset->size = 0;
1375 }
1376 }
1377
1378 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1379 xcr0_features = (have_ptrace_getregset
1380 && (xcr0 & X86_XSTATE_ALL_MASK));
1381
1382 if (xcr0_features)
1383 x86_xcr0 = xcr0;
1384
1385 if (machine == EM_X86_64)
1386 {
1387 #ifdef __x86_64__
1388 if (is_elf64)
1389 {
1390 if (xcr0_features)
1391 {
1392 switch (xcr0 & X86_XSTATE_ALL_MASK)
1393 {
1394 case X86_XSTATE_AVX512_MASK:
1395 return tdesc_amd64_avx512_linux;
1396
1397 case X86_XSTATE_MPX_MASK:
1398 return tdesc_amd64_mpx_linux;
1399
1400 case X86_XSTATE_AVX_MASK:
1401 return tdesc_amd64_avx_linux;
1402
1403 default:
1404 return tdesc_amd64_linux;
1405 }
1406 }
1407 else
1408 return tdesc_amd64_linux;
1409 }
1410 else
1411 {
1412 if (xcr0_features)
1413 {
1414 switch (xcr0 & X86_XSTATE_ALL_MASK)
1415 {
1416 case X86_XSTATE_AVX512_MASK:
1417 return tdesc_x32_avx512_linux;
1418
1419 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
1420 case X86_XSTATE_AVX_MASK:
1421 return tdesc_x32_avx_linux;
1422
1423 default:
1424 return tdesc_x32_linux;
1425 }
1426 }
1427 else
1428 return tdesc_x32_linux;
1429 }
1430 #endif
1431 }
1432 else
1433 {
1434 if (xcr0_features)
1435 {
1436 switch (xcr0 & X86_XSTATE_ALL_MASK)
1437 {
1438 case (X86_XSTATE_AVX512_MASK):
1439 return tdesc_i386_avx512_linux;
1440
1441 case (X86_XSTATE_MPX_MASK):
1442 return tdesc_i386_mpx_linux;
1443
1444 case (X86_XSTATE_AVX_MASK):
1445 return tdesc_i386_avx_linux;
1446
1447 default:
1448 return tdesc_i386_linux;
1449 }
1450 }
1451 else
1452 return tdesc_i386_linux;
1453 }
1454
1455 gdb_assert_not_reached ("failed to return tdesc");
1456 }
1457
1458 /* Callback for find_inferior. Stops iteration when a thread with a
1459 given PID is found. */
1460
1461 static int
1462 same_process_callback (struct inferior_list_entry *entry, void *data)
1463 {
1464 int pid = *(int *) data;
1465
1466 return (ptid_get_pid (entry->id) == pid);
1467 }
1468
1469 /* Callback for for_each_inferior. Calls the arch_setup routine for
1470 each process. */
1471
1472 static void
1473 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1474 {
1475 int pid = ptid_get_pid (entry->id);
1476
1477 /* Look up any thread of this processes. */
1478 current_inferior
1479 = (struct thread_info *) find_inferior (&all_threads,
1480 same_process_callback, &pid);
1481
1482 the_low_target.arch_setup ();
1483 }
1484
1485 /* Update all the target description of all processes; a new GDB
1486 connected, and it may or not support xml target descriptions. */
1487
1488 static void
1489 x86_linux_update_xmltarget (void)
1490 {
1491 struct thread_info *save_inferior = current_inferior;
1492
1493 /* Before changing the register cache's internal layout, flush the
1494 contents of the current valid caches back to the threads, and
1495 release the current regcache objects. */
1496 regcache_release ();
1497
1498 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1499
1500 current_inferior = save_inferior;
1501 }
1502
1503 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1504 PTRACE_GETREGSET. */
1505
1506 static void
1507 x86_linux_process_qsupported (const char *query)
1508 {
1509 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1510 with "i386" in qSupported query, it supports x86 XML target
1511 descriptions. */
1512 use_xml = 0;
1513 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1514 {
1515 char *copy = xstrdup (query + 13);
1516 char *p;
1517
1518 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1519 {
1520 if (strcmp (p, "i386") == 0)
1521 {
1522 use_xml = 1;
1523 break;
1524 }
1525 }
1526
1527 free (copy);
1528 }
1529
1530 x86_linux_update_xmltarget ();
1531 }
1532
1533 /* Common for x86/x86-64. */
1534
1535 static struct regsets_info x86_regsets_info =
1536 {
1537 x86_regsets, /* regsets */
1538 0, /* num_regsets */
1539 NULL, /* disabled_regsets */
1540 };
1541
1542 #ifdef __x86_64__
1543 static struct regs_info amd64_linux_regs_info =
1544 {
1545 NULL, /* regset_bitmap */
1546 NULL, /* usrregs_info */
1547 &x86_regsets_info
1548 };
1549 #endif
1550 static struct usrregs_info i386_linux_usrregs_info =
1551 {
1552 I386_NUM_REGS,
1553 i386_regmap,
1554 };
1555
1556 static struct regs_info i386_linux_regs_info =
1557 {
1558 NULL, /* regset_bitmap */
1559 &i386_linux_usrregs_info,
1560 &x86_regsets_info
1561 };
1562
1563 const struct regs_info *
1564 x86_linux_regs_info (void)
1565 {
1566 #ifdef __x86_64__
1567 if (is_64bit_tdesc ())
1568 return &amd64_linux_regs_info;
1569 else
1570 #endif
1571 return &i386_linux_regs_info;
1572 }
1573
1574 /* Initialize the target description for the architecture of the
1575 inferior. */
1576
1577 static void
1578 x86_arch_setup (void)
1579 {
1580 current_process ()->tdesc = x86_linux_read_description ();
1581 }
1582
1583 static int
1584 x86_supports_tracepoints (void)
1585 {
1586 return 1;
1587 }
1588
1589 static void
1590 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1591 {
1592 write_inferior_memory (*to, buf, len);
1593 *to += len;
1594 }
1595
1596 static int
1597 push_opcode (unsigned char *buf, char *op)
1598 {
1599 unsigned char *buf_org = buf;
1600
1601 while (1)
1602 {
1603 char *endptr;
1604 unsigned long ul = strtoul (op, &endptr, 16);
1605
1606 if (endptr == op)
1607 break;
1608
1609 *buf++ = ul;
1610 op = endptr;
1611 }
1612
1613 return buf - buf_org;
1614 }
1615
1616 #ifdef __x86_64__
1617
1618 /* Build a jump pad that saves registers and calls a collection
1619 function. Writes a jump instruction to the jump pad to
1620 JJUMPAD_INSN. The caller is responsible to write it in at the
1621 tracepoint address. */
1622
1623 static int
1624 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1625 CORE_ADDR collector,
1626 CORE_ADDR lockaddr,
1627 ULONGEST orig_size,
1628 CORE_ADDR *jump_entry,
1629 CORE_ADDR *trampoline,
1630 ULONGEST *trampoline_size,
1631 unsigned char *jjump_pad_insn,
1632 ULONGEST *jjump_pad_insn_size,
1633 CORE_ADDR *adjusted_insn_addr,
1634 CORE_ADDR *adjusted_insn_addr_end,
1635 char *err)
1636 {
1637 unsigned char buf[40];
1638 int i, offset;
1639 int64_t loffset;
1640
1641 CORE_ADDR buildaddr = *jump_entry;
1642
1643 /* Build the jump pad. */
1644
1645 /* First, do tracepoint data collection. Save registers. */
1646 i = 0;
1647 /* Need to ensure stack pointer saved first. */
1648 buf[i++] = 0x54; /* push %rsp */
1649 buf[i++] = 0x55; /* push %rbp */
1650 buf[i++] = 0x57; /* push %rdi */
1651 buf[i++] = 0x56; /* push %rsi */
1652 buf[i++] = 0x52; /* push %rdx */
1653 buf[i++] = 0x51; /* push %rcx */
1654 buf[i++] = 0x53; /* push %rbx */
1655 buf[i++] = 0x50; /* push %rax */
1656 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1657 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1658 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1659 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1660 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1661 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1662 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1663 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1664 buf[i++] = 0x9c; /* pushfq */
1665 buf[i++] = 0x48; /* movl <addr>,%rdi */
1666 buf[i++] = 0xbf;
1667 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1668 i += sizeof (unsigned long);
1669 buf[i++] = 0x57; /* push %rdi */
1670 append_insns (&buildaddr, i, buf);
1671
1672 /* Stack space for the collecting_t object. */
1673 i = 0;
1674 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1675 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1676 memcpy (buf + i, &tpoint, 8);
1677 i += 8;
1678 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1679 i += push_opcode (&buf[i],
1680 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1681 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1682 append_insns (&buildaddr, i, buf);
1683
1684 /* spin-lock. */
1685 i = 0;
1686 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1687 memcpy (&buf[i], (void *) &lockaddr, 8);
1688 i += 8;
1689 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1690 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1691 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1692 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1693 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1694 append_insns (&buildaddr, i, buf);
1695
1696 /* Set up the gdb_collect call. */
1697 /* At this point, (stack pointer + 0x18) is the base of our saved
1698 register block. */
1699
1700 i = 0;
1701 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1702 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1703
1704 /* tpoint address may be 64-bit wide. */
1705 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1706 memcpy (buf + i, &tpoint, 8);
1707 i += 8;
1708 append_insns (&buildaddr, i, buf);
1709
1710 /* The collector function being in the shared library, may be
1711 >31-bits away off the jump pad. */
1712 i = 0;
1713 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1714 memcpy (buf + i, &collector, 8);
1715 i += 8;
1716 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1717 append_insns (&buildaddr, i, buf);
1718
1719 /* Clear the spin-lock. */
1720 i = 0;
1721 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1722 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1723 memcpy (buf + i, &lockaddr, 8);
1724 i += 8;
1725 append_insns (&buildaddr, i, buf);
1726
1727 /* Remove stack that had been used for the collect_t object. */
1728 i = 0;
1729 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1730 append_insns (&buildaddr, i, buf);
1731
1732 /* Restore register state. */
1733 i = 0;
1734 buf[i++] = 0x48; /* add $0x8,%rsp */
1735 buf[i++] = 0x83;
1736 buf[i++] = 0xc4;
1737 buf[i++] = 0x08;
1738 buf[i++] = 0x9d; /* popfq */
1739 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1740 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1741 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1742 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1743 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1744 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1745 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1746 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1747 buf[i++] = 0x58; /* pop %rax */
1748 buf[i++] = 0x5b; /* pop %rbx */
1749 buf[i++] = 0x59; /* pop %rcx */
1750 buf[i++] = 0x5a; /* pop %rdx */
1751 buf[i++] = 0x5e; /* pop %rsi */
1752 buf[i++] = 0x5f; /* pop %rdi */
1753 buf[i++] = 0x5d; /* pop %rbp */
1754 buf[i++] = 0x5c; /* pop %rsp */
1755 append_insns (&buildaddr, i, buf);
1756
1757 /* Now, adjust the original instruction to execute in the jump
1758 pad. */
1759 *adjusted_insn_addr = buildaddr;
1760 relocate_instruction (&buildaddr, tpaddr);
1761 *adjusted_insn_addr_end = buildaddr;
1762
1763 /* Finally, write a jump back to the program. */
1764
1765 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1766 if (loffset > INT_MAX || loffset < INT_MIN)
1767 {
1768 sprintf (err,
1769 "E.Jump back from jump pad too far from tracepoint "
1770 "(offset 0x%" PRIx64 " > int32).", loffset);
1771 return 1;
1772 }
1773
1774 offset = (int) loffset;
1775 memcpy (buf, jump_insn, sizeof (jump_insn));
1776 memcpy (buf + 1, &offset, 4);
1777 append_insns (&buildaddr, sizeof (jump_insn), buf);
1778
1779 /* The jump pad is now built. Wire in a jump to our jump pad. This
1780 is always done last (by our caller actually), so that we can
1781 install fast tracepoints with threads running. This relies on
1782 the agent's atomic write support. */
1783 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1784 if (loffset > INT_MAX || loffset < INT_MIN)
1785 {
1786 sprintf (err,
1787 "E.Jump pad too far from tracepoint "
1788 "(offset 0x%" PRIx64 " > int32).", loffset);
1789 return 1;
1790 }
1791
1792 offset = (int) loffset;
1793
1794 memcpy (buf, jump_insn, sizeof (jump_insn));
1795 memcpy (buf + 1, &offset, 4);
1796 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1797 *jjump_pad_insn_size = sizeof (jump_insn);
1798
1799 /* Return the end address of our pad. */
1800 *jump_entry = buildaddr;
1801
1802 return 0;
1803 }
1804
1805 #endif /* __x86_64__ */
1806
1807 /* Build a jump pad that saves registers and calls a collection
1808 function. Writes a jump instruction to the jump pad to
1809 JJUMPAD_INSN. The caller is responsible to write it in at the
1810 tracepoint address. */
1811
1812 static int
1813 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1814 CORE_ADDR collector,
1815 CORE_ADDR lockaddr,
1816 ULONGEST orig_size,
1817 CORE_ADDR *jump_entry,
1818 CORE_ADDR *trampoline,
1819 ULONGEST *trampoline_size,
1820 unsigned char *jjump_pad_insn,
1821 ULONGEST *jjump_pad_insn_size,
1822 CORE_ADDR *adjusted_insn_addr,
1823 CORE_ADDR *adjusted_insn_addr_end,
1824 char *err)
1825 {
1826 unsigned char buf[0x100];
1827 int i, offset;
1828 CORE_ADDR buildaddr = *jump_entry;
1829
1830 /* Build the jump pad. */
1831
1832 /* First, do tracepoint data collection. Save registers. */
1833 i = 0;
1834 buf[i++] = 0x60; /* pushad */
1835 buf[i++] = 0x68; /* push tpaddr aka $pc */
1836 *((int *)(buf + i)) = (int) tpaddr;
1837 i += 4;
1838 buf[i++] = 0x9c; /* pushf */
1839 buf[i++] = 0x1e; /* push %ds */
1840 buf[i++] = 0x06; /* push %es */
1841 buf[i++] = 0x0f; /* push %fs */
1842 buf[i++] = 0xa0;
1843 buf[i++] = 0x0f; /* push %gs */
1844 buf[i++] = 0xa8;
1845 buf[i++] = 0x16; /* push %ss */
1846 buf[i++] = 0x0e; /* push %cs */
1847 append_insns (&buildaddr, i, buf);
1848
1849 /* Stack space for the collecting_t object. */
1850 i = 0;
1851 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1852
1853 /* Build the object. */
1854 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1855 memcpy (buf + i, &tpoint, 4);
1856 i += 4;
1857 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1858
1859 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1860 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1861 append_insns (&buildaddr, i, buf);
1862
1863 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1864 If we cared for it, this could be using xchg alternatively. */
1865
1866 i = 0;
1867 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1868 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1869 %esp,<lockaddr> */
1870 memcpy (&buf[i], (void *) &lockaddr, 4);
1871 i += 4;
1872 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1873 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1874 append_insns (&buildaddr, i, buf);
1875
1876
1877 /* Set up arguments to the gdb_collect call. */
1878 i = 0;
1879 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1880 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1881 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1882 append_insns (&buildaddr, i, buf);
1883
1884 i = 0;
1885 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1886 append_insns (&buildaddr, i, buf);
1887
1888 i = 0;
1889 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1890 memcpy (&buf[i], (void *) &tpoint, 4);
1891 i += 4;
1892 append_insns (&buildaddr, i, buf);
1893
1894 buf[0] = 0xe8; /* call <reladdr> */
1895 offset = collector - (buildaddr + sizeof (jump_insn));
1896 memcpy (buf + 1, &offset, 4);
1897 append_insns (&buildaddr, 5, buf);
1898 /* Clean up after the call. */
1899 buf[0] = 0x83; /* add $0x8,%esp */
1900 buf[1] = 0xc4;
1901 buf[2] = 0x08;
1902 append_insns (&buildaddr, 3, buf);
1903
1904
1905 /* Clear the spin-lock. This would need the LOCK prefix on older
1906 broken archs. */
1907 i = 0;
1908 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1909 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1910 memcpy (buf + i, &lockaddr, 4);
1911 i += 4;
1912 append_insns (&buildaddr, i, buf);
1913
1914
1915 /* Remove stack that had been used for the collect_t object. */
1916 i = 0;
1917 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1918 append_insns (&buildaddr, i, buf);
1919
1920 i = 0;
1921 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1922 buf[i++] = 0xc4;
1923 buf[i++] = 0x04;
1924 buf[i++] = 0x17; /* pop %ss */
1925 buf[i++] = 0x0f; /* pop %gs */
1926 buf[i++] = 0xa9;
1927 buf[i++] = 0x0f; /* pop %fs */
1928 buf[i++] = 0xa1;
1929 buf[i++] = 0x07; /* pop %es */
1930 buf[i++] = 0x1f; /* pop %ds */
1931 buf[i++] = 0x9d; /* popf */
1932 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1933 buf[i++] = 0xc4;
1934 buf[i++] = 0x04;
1935 buf[i++] = 0x61; /* popad */
1936 append_insns (&buildaddr, i, buf);
1937
1938 /* Now, adjust the original instruction to execute in the jump
1939 pad. */
1940 *adjusted_insn_addr = buildaddr;
1941 relocate_instruction (&buildaddr, tpaddr);
1942 *adjusted_insn_addr_end = buildaddr;
1943
1944 /* Write the jump back to the program. */
1945 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1946 memcpy (buf, jump_insn, sizeof (jump_insn));
1947 memcpy (buf + 1, &offset, 4);
1948 append_insns (&buildaddr, sizeof (jump_insn), buf);
1949
1950 /* The jump pad is now built. Wire in a jump to our jump pad. This
1951 is always done last (by our caller actually), so that we can
1952 install fast tracepoints with threads running. This relies on
1953 the agent's atomic write support. */
1954 if (orig_size == 4)
1955 {
1956 /* Create a trampoline. */
1957 *trampoline_size = sizeof (jump_insn);
1958 if (!claim_trampoline_space (*trampoline_size, trampoline))
1959 {
1960 /* No trampoline space available. */
1961 strcpy (err,
1962 "E.Cannot allocate trampoline space needed for fast "
1963 "tracepoints on 4-byte instructions.");
1964 return 1;
1965 }
1966
1967 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1968 memcpy (buf, jump_insn, sizeof (jump_insn));
1969 memcpy (buf + 1, &offset, 4);
1970 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1971
1972 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1973 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1974 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1975 memcpy (buf + 2, &offset, 2);
1976 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1977 *jjump_pad_insn_size = sizeof (small_jump_insn);
1978 }
1979 else
1980 {
1981 /* Else use a 32-bit relative jump instruction. */
1982 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1983 memcpy (buf, jump_insn, sizeof (jump_insn));
1984 memcpy (buf + 1, &offset, 4);
1985 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1986 *jjump_pad_insn_size = sizeof (jump_insn);
1987 }
1988
1989 /* Return the end address of our pad. */
1990 *jump_entry = buildaddr;
1991
1992 return 0;
1993 }
1994
1995 static int
1996 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1997 CORE_ADDR collector,
1998 CORE_ADDR lockaddr,
1999 ULONGEST orig_size,
2000 CORE_ADDR *jump_entry,
2001 CORE_ADDR *trampoline,
2002 ULONGEST *trampoline_size,
2003 unsigned char *jjump_pad_insn,
2004 ULONGEST *jjump_pad_insn_size,
2005 CORE_ADDR *adjusted_insn_addr,
2006 CORE_ADDR *adjusted_insn_addr_end,
2007 char *err)
2008 {
2009 #ifdef __x86_64__
2010 if (is_64bit_tdesc ())
2011 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2012 collector, lockaddr,
2013 orig_size, jump_entry,
2014 trampoline, trampoline_size,
2015 jjump_pad_insn,
2016 jjump_pad_insn_size,
2017 adjusted_insn_addr,
2018 adjusted_insn_addr_end,
2019 err);
2020 #endif
2021
2022 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2023 collector, lockaddr,
2024 orig_size, jump_entry,
2025 trampoline, trampoline_size,
2026 jjump_pad_insn,
2027 jjump_pad_insn_size,
2028 adjusted_insn_addr,
2029 adjusted_insn_addr_end,
2030 err);
2031 }
2032
2033 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2034 architectures. */
2035
2036 static int
2037 x86_get_min_fast_tracepoint_insn_len (void)
2038 {
2039 static int warned_about_fast_tracepoints = 0;
2040
2041 #ifdef __x86_64__
2042 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2043 used for fast tracepoints. */
2044 if (is_64bit_tdesc ())
2045 return 5;
2046 #endif
2047
2048 if (agent_loaded_p ())
2049 {
2050 char errbuf[IPA_BUFSIZ];
2051
2052 errbuf[0] = '\0';
2053
2054 /* On x86, if trampolines are available, then 4-byte jump instructions
2055 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2056 with a 4-byte offset are used instead. */
2057 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2058 return 4;
2059 else
2060 {
2061 /* GDB has no channel to explain to user why a shorter fast
2062 tracepoint is not possible, but at least make GDBserver
2063 mention that something has gone awry. */
2064 if (!warned_about_fast_tracepoints)
2065 {
2066 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2067 warned_about_fast_tracepoints = 1;
2068 }
2069 return 5;
2070 }
2071 }
2072 else
2073 {
2074 /* Indicate that the minimum length is currently unknown since the IPA
2075 has not loaded yet. */
2076 return 0;
2077 }
2078 }
2079
2080 static void
2081 add_insns (unsigned char *start, int len)
2082 {
2083 CORE_ADDR buildaddr = current_insn_ptr;
2084
2085 if (debug_threads)
2086 debug_printf ("Adding %d bytes of insn at %s\n",
2087 len, paddress (buildaddr));
2088
2089 append_insns (&buildaddr, len, start);
2090 current_insn_ptr = buildaddr;
2091 }
2092
2093 /* Our general strategy for emitting code is to avoid specifying raw
2094 bytes whenever possible, and instead copy a block of inline asm
2095 that is embedded in the function. This is a little messy, because
2096 we need to keep the compiler from discarding what looks like dead
2097 code, plus suppress various warnings. */
2098
2099 #define EMIT_ASM(NAME, INSNS) \
2100 do \
2101 { \
2102 extern unsigned char start_ ## NAME, end_ ## NAME; \
2103 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2104 __asm__ ("jmp end_" #NAME "\n" \
2105 "\t" "start_" #NAME ":" \
2106 "\t" INSNS "\n" \
2107 "\t" "end_" #NAME ":"); \
2108 } while (0)
2109
2110 #ifdef __x86_64__
2111
2112 #define EMIT_ASM32(NAME,INSNS) \
2113 do \
2114 { \
2115 extern unsigned char start_ ## NAME, end_ ## NAME; \
2116 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2117 __asm__ (".code32\n" \
2118 "\t" "jmp end_" #NAME "\n" \
2119 "\t" "start_" #NAME ":\n" \
2120 "\t" INSNS "\n" \
2121 "\t" "end_" #NAME ":\n" \
2122 ".code64\n"); \
2123 } while (0)
2124
2125 #else
2126
2127 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2128
2129 #endif
2130
2131 #ifdef __x86_64__
2132
2133 static void
2134 amd64_emit_prologue (void)
2135 {
2136 EMIT_ASM (amd64_prologue,
2137 "pushq %rbp\n\t"
2138 "movq %rsp,%rbp\n\t"
2139 "sub $0x20,%rsp\n\t"
2140 "movq %rdi,-8(%rbp)\n\t"
2141 "movq %rsi,-16(%rbp)");
2142 }
2143
2144
2145 static void
2146 amd64_emit_epilogue (void)
2147 {
2148 EMIT_ASM (amd64_epilogue,
2149 "movq -16(%rbp),%rdi\n\t"
2150 "movq %rax,(%rdi)\n\t"
2151 "xor %rax,%rax\n\t"
2152 "leave\n\t"
2153 "ret");
2154 }
2155
2156 static void
2157 amd64_emit_add (void)
2158 {
2159 EMIT_ASM (amd64_add,
2160 "add (%rsp),%rax\n\t"
2161 "lea 0x8(%rsp),%rsp");
2162 }
2163
2164 static void
2165 amd64_emit_sub (void)
2166 {
2167 EMIT_ASM (amd64_sub,
2168 "sub %rax,(%rsp)\n\t"
2169 "pop %rax");
2170 }
2171
2172 static void
2173 amd64_emit_mul (void)
2174 {
2175 emit_error = 1;
2176 }
2177
2178 static void
2179 amd64_emit_lsh (void)
2180 {
2181 emit_error = 1;
2182 }
2183
2184 static void
2185 amd64_emit_rsh_signed (void)
2186 {
2187 emit_error = 1;
2188 }
2189
2190 static void
2191 amd64_emit_rsh_unsigned (void)
2192 {
2193 emit_error = 1;
2194 }
2195
2196 static void
2197 amd64_emit_ext (int arg)
2198 {
2199 switch (arg)
2200 {
2201 case 8:
2202 EMIT_ASM (amd64_ext_8,
2203 "cbtw\n\t"
2204 "cwtl\n\t"
2205 "cltq");
2206 break;
2207 case 16:
2208 EMIT_ASM (amd64_ext_16,
2209 "cwtl\n\t"
2210 "cltq");
2211 break;
2212 case 32:
2213 EMIT_ASM (amd64_ext_32,
2214 "cltq");
2215 break;
2216 default:
2217 emit_error = 1;
2218 }
2219 }
2220
2221 static void
2222 amd64_emit_log_not (void)
2223 {
2224 EMIT_ASM (amd64_log_not,
2225 "test %rax,%rax\n\t"
2226 "sete %cl\n\t"
2227 "movzbq %cl,%rax");
2228 }
2229
2230 static void
2231 amd64_emit_bit_and (void)
2232 {
2233 EMIT_ASM (amd64_and,
2234 "and (%rsp),%rax\n\t"
2235 "lea 0x8(%rsp),%rsp");
2236 }
2237
2238 static void
2239 amd64_emit_bit_or (void)
2240 {
2241 EMIT_ASM (amd64_or,
2242 "or (%rsp),%rax\n\t"
2243 "lea 0x8(%rsp),%rsp");
2244 }
2245
2246 static void
2247 amd64_emit_bit_xor (void)
2248 {
2249 EMIT_ASM (amd64_xor,
2250 "xor (%rsp),%rax\n\t"
2251 "lea 0x8(%rsp),%rsp");
2252 }
2253
2254 static void
2255 amd64_emit_bit_not (void)
2256 {
2257 EMIT_ASM (amd64_bit_not,
2258 "xorq $0xffffffffffffffff,%rax");
2259 }
2260
2261 static void
2262 amd64_emit_equal (void)
2263 {
2264 EMIT_ASM (amd64_equal,
2265 "cmp %rax,(%rsp)\n\t"
2266 "je .Lamd64_equal_true\n\t"
2267 "xor %rax,%rax\n\t"
2268 "jmp .Lamd64_equal_end\n\t"
2269 ".Lamd64_equal_true:\n\t"
2270 "mov $0x1,%rax\n\t"
2271 ".Lamd64_equal_end:\n\t"
2272 "lea 0x8(%rsp),%rsp");
2273 }
2274
2275 static void
2276 amd64_emit_less_signed (void)
2277 {
2278 EMIT_ASM (amd64_less_signed,
2279 "cmp %rax,(%rsp)\n\t"
2280 "jl .Lamd64_less_signed_true\n\t"
2281 "xor %rax,%rax\n\t"
2282 "jmp .Lamd64_less_signed_end\n\t"
2283 ".Lamd64_less_signed_true:\n\t"
2284 "mov $1,%rax\n\t"
2285 ".Lamd64_less_signed_end:\n\t"
2286 "lea 0x8(%rsp),%rsp");
2287 }
2288
2289 static void
2290 amd64_emit_less_unsigned (void)
2291 {
2292 EMIT_ASM (amd64_less_unsigned,
2293 "cmp %rax,(%rsp)\n\t"
2294 "jb .Lamd64_less_unsigned_true\n\t"
2295 "xor %rax,%rax\n\t"
2296 "jmp .Lamd64_less_unsigned_end\n\t"
2297 ".Lamd64_less_unsigned_true:\n\t"
2298 "mov $1,%rax\n\t"
2299 ".Lamd64_less_unsigned_end:\n\t"
2300 "lea 0x8(%rsp),%rsp");
2301 }
2302
2303 static void
2304 amd64_emit_ref (int size)
2305 {
2306 switch (size)
2307 {
2308 case 1:
2309 EMIT_ASM (amd64_ref1,
2310 "movb (%rax),%al");
2311 break;
2312 case 2:
2313 EMIT_ASM (amd64_ref2,
2314 "movw (%rax),%ax");
2315 break;
2316 case 4:
2317 EMIT_ASM (amd64_ref4,
2318 "movl (%rax),%eax");
2319 break;
2320 case 8:
2321 EMIT_ASM (amd64_ref8,
2322 "movq (%rax),%rax");
2323 break;
2324 }
2325 }
2326
2327 static void
2328 amd64_emit_if_goto (int *offset_p, int *size_p)
2329 {
2330 EMIT_ASM (amd64_if_goto,
2331 "mov %rax,%rcx\n\t"
2332 "pop %rax\n\t"
2333 "cmp $0,%rcx\n\t"
2334 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2335 if (offset_p)
2336 *offset_p = 10;
2337 if (size_p)
2338 *size_p = 4;
2339 }
2340
2341 static void
2342 amd64_emit_goto (int *offset_p, int *size_p)
2343 {
2344 EMIT_ASM (amd64_goto,
2345 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2346 if (offset_p)
2347 *offset_p = 1;
2348 if (size_p)
2349 *size_p = 4;
2350 }
2351
2352 static void
2353 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2354 {
2355 int diff = (to - (from + size));
2356 unsigned char buf[sizeof (int)];
2357
2358 if (size != 4)
2359 {
2360 emit_error = 1;
2361 return;
2362 }
2363
2364 memcpy (buf, &diff, sizeof (int));
2365 write_inferior_memory (from, buf, sizeof (int));
2366 }
2367
2368 static void
2369 amd64_emit_const (LONGEST num)
2370 {
2371 unsigned char buf[16];
2372 int i;
2373 CORE_ADDR buildaddr = current_insn_ptr;
2374
2375 i = 0;
2376 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
2377 memcpy (&buf[i], &num, sizeof (num));
2378 i += 8;
2379 append_insns (&buildaddr, i, buf);
2380 current_insn_ptr = buildaddr;
2381 }
2382
2383 static void
2384 amd64_emit_call (CORE_ADDR fn)
2385 {
2386 unsigned char buf[16];
2387 int i;
2388 CORE_ADDR buildaddr;
2389 LONGEST offset64;
2390
2391 /* The destination function being in the shared library, may be
2392 >31-bits away off the compiled code pad. */
2393
2394 buildaddr = current_insn_ptr;
2395
2396 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2397
2398 i = 0;
2399
2400 if (offset64 > INT_MAX || offset64 < INT_MIN)
2401 {
2402 /* Offset is too large for a call. Use callq, but that requires
2403 a register, so avoid it if possible. Use r10, since it is
2404 call-clobbered, we don't have to push/pop it. */
2405 buf[i++] = 0x48; /* mov $fn,%r10 */
2406 buf[i++] = 0xba;
2407 memcpy (buf + i, &fn, 8);
2408 i += 8;
2409 buf[i++] = 0xff; /* callq *%r10 */
2410 buf[i++] = 0xd2;
2411 }
2412 else
2413 {
2414 int offset32 = offset64; /* we know we can't overflow here. */
2415 memcpy (buf + i, &offset32, 4);
2416 i += 4;
2417 }
2418
2419 append_insns (&buildaddr, i, buf);
2420 current_insn_ptr = buildaddr;
2421 }
2422
2423 static void
2424 amd64_emit_reg (int reg)
2425 {
2426 unsigned char buf[16];
2427 int i;
2428 CORE_ADDR buildaddr;
2429
2430 /* Assume raw_regs is still in %rdi. */
2431 buildaddr = current_insn_ptr;
2432 i = 0;
2433 buf[i++] = 0xbe; /* mov $<n>,%esi */
2434 memcpy (&buf[i], &reg, sizeof (reg));
2435 i += 4;
2436 append_insns (&buildaddr, i, buf);
2437 current_insn_ptr = buildaddr;
2438 amd64_emit_call (get_raw_reg_func_addr ());
2439 }
2440
2441 static void
2442 amd64_emit_pop (void)
2443 {
2444 EMIT_ASM (amd64_pop,
2445 "pop %rax");
2446 }
2447
2448 static void
2449 amd64_emit_stack_flush (void)
2450 {
2451 EMIT_ASM (amd64_stack_flush,
2452 "push %rax");
2453 }
2454
2455 static void
2456 amd64_emit_zero_ext (int arg)
2457 {
2458 switch (arg)
2459 {
2460 case 8:
2461 EMIT_ASM (amd64_zero_ext_8,
2462 "and $0xff,%rax");
2463 break;
2464 case 16:
2465 EMIT_ASM (amd64_zero_ext_16,
2466 "and $0xffff,%rax");
2467 break;
2468 case 32:
2469 EMIT_ASM (amd64_zero_ext_32,
2470 "mov $0xffffffff,%rcx\n\t"
2471 "and %rcx,%rax");
2472 break;
2473 default:
2474 emit_error = 1;
2475 }
2476 }
2477
2478 static void
2479 amd64_emit_swap (void)
2480 {
2481 EMIT_ASM (amd64_swap,
2482 "mov %rax,%rcx\n\t"
2483 "pop %rax\n\t"
2484 "push %rcx");
2485 }
2486
2487 static void
2488 amd64_emit_stack_adjust (int n)
2489 {
2490 unsigned char buf[16];
2491 int i;
2492 CORE_ADDR buildaddr = current_insn_ptr;
2493
2494 i = 0;
2495 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2496 buf[i++] = 0x8d;
2497 buf[i++] = 0x64;
2498 buf[i++] = 0x24;
2499 /* This only handles adjustments up to 16, but we don't expect any more. */
2500 buf[i++] = n * 8;
2501 append_insns (&buildaddr, i, buf);
2502 current_insn_ptr = buildaddr;
2503 }
2504
2505 /* FN's prototype is `LONGEST(*fn)(int)'. */
2506
2507 static void
2508 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2509 {
2510 unsigned char buf[16];
2511 int i;
2512 CORE_ADDR buildaddr;
2513
2514 buildaddr = current_insn_ptr;
2515 i = 0;
2516 buf[i++] = 0xbf; /* movl $<n>,%edi */
2517 memcpy (&buf[i], &arg1, sizeof (arg1));
2518 i += 4;
2519 append_insns (&buildaddr, i, buf);
2520 current_insn_ptr = buildaddr;
2521 amd64_emit_call (fn);
2522 }
2523
2524 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2525
2526 static void
2527 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2528 {
2529 unsigned char buf[16];
2530 int i;
2531 CORE_ADDR buildaddr;
2532
2533 buildaddr = current_insn_ptr;
2534 i = 0;
2535 buf[i++] = 0xbf; /* movl $<n>,%edi */
2536 memcpy (&buf[i], &arg1, sizeof (arg1));
2537 i += 4;
2538 append_insns (&buildaddr, i, buf);
2539 current_insn_ptr = buildaddr;
2540 EMIT_ASM (amd64_void_call_2_a,
2541 /* Save away a copy of the stack top. */
2542 "push %rax\n\t"
2543 /* Also pass top as the second argument. */
2544 "mov %rax,%rsi");
2545 amd64_emit_call (fn);
2546 EMIT_ASM (amd64_void_call_2_b,
2547 /* Restore the stack top, %rax may have been trashed. */
2548 "pop %rax");
2549 }
2550
2551 void
2552 amd64_emit_eq_goto (int *offset_p, int *size_p)
2553 {
2554 EMIT_ASM (amd64_eq,
2555 "cmp %rax,(%rsp)\n\t"
2556 "jne .Lamd64_eq_fallthru\n\t"
2557 "lea 0x8(%rsp),%rsp\n\t"
2558 "pop %rax\n\t"
2559 /* jmp, but don't trust the assembler to choose the right jump */
2560 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2561 ".Lamd64_eq_fallthru:\n\t"
2562 "lea 0x8(%rsp),%rsp\n\t"
2563 "pop %rax");
2564
2565 if (offset_p)
2566 *offset_p = 13;
2567 if (size_p)
2568 *size_p = 4;
2569 }
2570
2571 void
2572 amd64_emit_ne_goto (int *offset_p, int *size_p)
2573 {
2574 EMIT_ASM (amd64_ne,
2575 "cmp %rax,(%rsp)\n\t"
2576 "je .Lamd64_ne_fallthru\n\t"
2577 "lea 0x8(%rsp),%rsp\n\t"
2578 "pop %rax\n\t"
2579 /* jmp, but don't trust the assembler to choose the right jump */
2580 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2581 ".Lamd64_ne_fallthru:\n\t"
2582 "lea 0x8(%rsp),%rsp\n\t"
2583 "pop %rax");
2584
2585 if (offset_p)
2586 *offset_p = 13;
2587 if (size_p)
2588 *size_p = 4;
2589 }
2590
2591 void
2592 amd64_emit_lt_goto (int *offset_p, int *size_p)
2593 {
2594 EMIT_ASM (amd64_lt,
2595 "cmp %rax,(%rsp)\n\t"
2596 "jnl .Lamd64_lt_fallthru\n\t"
2597 "lea 0x8(%rsp),%rsp\n\t"
2598 "pop %rax\n\t"
2599 /* jmp, but don't trust the assembler to choose the right jump */
2600 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2601 ".Lamd64_lt_fallthru:\n\t"
2602 "lea 0x8(%rsp),%rsp\n\t"
2603 "pop %rax");
2604
2605 if (offset_p)
2606 *offset_p = 13;
2607 if (size_p)
2608 *size_p = 4;
2609 }
2610
2611 void
2612 amd64_emit_le_goto (int *offset_p, int *size_p)
2613 {
2614 EMIT_ASM (amd64_le,
2615 "cmp %rax,(%rsp)\n\t"
2616 "jnle .Lamd64_le_fallthru\n\t"
2617 "lea 0x8(%rsp),%rsp\n\t"
2618 "pop %rax\n\t"
2619 /* jmp, but don't trust the assembler to choose the right jump */
2620 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2621 ".Lamd64_le_fallthru:\n\t"
2622 "lea 0x8(%rsp),%rsp\n\t"
2623 "pop %rax");
2624
2625 if (offset_p)
2626 *offset_p = 13;
2627 if (size_p)
2628 *size_p = 4;
2629 }
2630
2631 void
2632 amd64_emit_gt_goto (int *offset_p, int *size_p)
2633 {
2634 EMIT_ASM (amd64_gt,
2635 "cmp %rax,(%rsp)\n\t"
2636 "jng .Lamd64_gt_fallthru\n\t"
2637 "lea 0x8(%rsp),%rsp\n\t"
2638 "pop %rax\n\t"
2639 /* jmp, but don't trust the assembler to choose the right jump */
2640 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2641 ".Lamd64_gt_fallthru:\n\t"
2642 "lea 0x8(%rsp),%rsp\n\t"
2643 "pop %rax");
2644
2645 if (offset_p)
2646 *offset_p = 13;
2647 if (size_p)
2648 *size_p = 4;
2649 }
2650
2651 void
2652 amd64_emit_ge_goto (int *offset_p, int *size_p)
2653 {
2654 EMIT_ASM (amd64_ge,
2655 "cmp %rax,(%rsp)\n\t"
2656 "jnge .Lamd64_ge_fallthru\n\t"
2657 ".Lamd64_ge_jump:\n\t"
2658 "lea 0x8(%rsp),%rsp\n\t"
2659 "pop %rax\n\t"
2660 /* jmp, but don't trust the assembler to choose the right jump */
2661 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2662 ".Lamd64_ge_fallthru:\n\t"
2663 "lea 0x8(%rsp),%rsp\n\t"
2664 "pop %rax");
2665
2666 if (offset_p)
2667 *offset_p = 13;
2668 if (size_p)
2669 *size_p = 4;
2670 }
2671
2672 struct emit_ops amd64_emit_ops =
2673 {
2674 amd64_emit_prologue,
2675 amd64_emit_epilogue,
2676 amd64_emit_add,
2677 amd64_emit_sub,
2678 amd64_emit_mul,
2679 amd64_emit_lsh,
2680 amd64_emit_rsh_signed,
2681 amd64_emit_rsh_unsigned,
2682 amd64_emit_ext,
2683 amd64_emit_log_not,
2684 amd64_emit_bit_and,
2685 amd64_emit_bit_or,
2686 amd64_emit_bit_xor,
2687 amd64_emit_bit_not,
2688 amd64_emit_equal,
2689 amd64_emit_less_signed,
2690 amd64_emit_less_unsigned,
2691 amd64_emit_ref,
2692 amd64_emit_if_goto,
2693 amd64_emit_goto,
2694 amd64_write_goto_address,
2695 amd64_emit_const,
2696 amd64_emit_call,
2697 amd64_emit_reg,
2698 amd64_emit_pop,
2699 amd64_emit_stack_flush,
2700 amd64_emit_zero_ext,
2701 amd64_emit_swap,
2702 amd64_emit_stack_adjust,
2703 amd64_emit_int_call_1,
2704 amd64_emit_void_call_2,
2705 amd64_emit_eq_goto,
2706 amd64_emit_ne_goto,
2707 amd64_emit_lt_goto,
2708 amd64_emit_le_goto,
2709 amd64_emit_gt_goto,
2710 amd64_emit_ge_goto
2711 };
2712
2713 #endif /* __x86_64__ */
2714
2715 static void
2716 i386_emit_prologue (void)
2717 {
2718 EMIT_ASM32 (i386_prologue,
2719 "push %ebp\n\t"
2720 "mov %esp,%ebp\n\t"
2721 "push %ebx");
2722 /* At this point, the raw regs base address is at 8(%ebp), and the
2723 value pointer is at 12(%ebp). */
2724 }
2725
2726 static void
2727 i386_emit_epilogue (void)
2728 {
2729 EMIT_ASM32 (i386_epilogue,
2730 "mov 12(%ebp),%ecx\n\t"
2731 "mov %eax,(%ecx)\n\t"
2732 "mov %ebx,0x4(%ecx)\n\t"
2733 "xor %eax,%eax\n\t"
2734 "pop %ebx\n\t"
2735 "pop %ebp\n\t"
2736 "ret");
2737 }
2738
2739 static void
2740 i386_emit_add (void)
2741 {
2742 EMIT_ASM32 (i386_add,
2743 "add (%esp),%eax\n\t"
2744 "adc 0x4(%esp),%ebx\n\t"
2745 "lea 0x8(%esp),%esp");
2746 }
2747
2748 static void
2749 i386_emit_sub (void)
2750 {
2751 EMIT_ASM32 (i386_sub,
2752 "subl %eax,(%esp)\n\t"
2753 "sbbl %ebx,4(%esp)\n\t"
2754 "pop %eax\n\t"
2755 "pop %ebx\n\t");
2756 }
2757
2758 static void
2759 i386_emit_mul (void)
2760 {
2761 emit_error = 1;
2762 }
2763
2764 static void
2765 i386_emit_lsh (void)
2766 {
2767 emit_error = 1;
2768 }
2769
2770 static void
2771 i386_emit_rsh_signed (void)
2772 {
2773 emit_error = 1;
2774 }
2775
2776 static void
2777 i386_emit_rsh_unsigned (void)
2778 {
2779 emit_error = 1;
2780 }
2781
2782 static void
2783 i386_emit_ext (int arg)
2784 {
2785 switch (arg)
2786 {
2787 case 8:
2788 EMIT_ASM32 (i386_ext_8,
2789 "cbtw\n\t"
2790 "cwtl\n\t"
2791 "movl %eax,%ebx\n\t"
2792 "sarl $31,%ebx");
2793 break;
2794 case 16:
2795 EMIT_ASM32 (i386_ext_16,
2796 "cwtl\n\t"
2797 "movl %eax,%ebx\n\t"
2798 "sarl $31,%ebx");
2799 break;
2800 case 32:
2801 EMIT_ASM32 (i386_ext_32,
2802 "movl %eax,%ebx\n\t"
2803 "sarl $31,%ebx");
2804 break;
2805 default:
2806 emit_error = 1;
2807 }
2808 }
2809
2810 static void
2811 i386_emit_log_not (void)
2812 {
2813 EMIT_ASM32 (i386_log_not,
2814 "or %ebx,%eax\n\t"
2815 "test %eax,%eax\n\t"
2816 "sete %cl\n\t"
2817 "xor %ebx,%ebx\n\t"
2818 "movzbl %cl,%eax");
2819 }
2820
2821 static void
2822 i386_emit_bit_and (void)
2823 {
2824 EMIT_ASM32 (i386_and,
2825 "and (%esp),%eax\n\t"
2826 "and 0x4(%esp),%ebx\n\t"
2827 "lea 0x8(%esp),%esp");
2828 }
2829
2830 static void
2831 i386_emit_bit_or (void)
2832 {
2833 EMIT_ASM32 (i386_or,
2834 "or (%esp),%eax\n\t"
2835 "or 0x4(%esp),%ebx\n\t"
2836 "lea 0x8(%esp),%esp");
2837 }
2838
2839 static void
2840 i386_emit_bit_xor (void)
2841 {
2842 EMIT_ASM32 (i386_xor,
2843 "xor (%esp),%eax\n\t"
2844 "xor 0x4(%esp),%ebx\n\t"
2845 "lea 0x8(%esp),%esp");
2846 }
2847
2848 static void
2849 i386_emit_bit_not (void)
2850 {
2851 EMIT_ASM32 (i386_bit_not,
2852 "xor $0xffffffff,%eax\n\t"
2853 "xor $0xffffffff,%ebx\n\t");
2854 }
2855
2856 static void
2857 i386_emit_equal (void)
2858 {
2859 EMIT_ASM32 (i386_equal,
2860 "cmpl %ebx,4(%esp)\n\t"
2861 "jne .Li386_equal_false\n\t"
2862 "cmpl %eax,(%esp)\n\t"
2863 "je .Li386_equal_true\n\t"
2864 ".Li386_equal_false:\n\t"
2865 "xor %eax,%eax\n\t"
2866 "jmp .Li386_equal_end\n\t"
2867 ".Li386_equal_true:\n\t"
2868 "mov $1,%eax\n\t"
2869 ".Li386_equal_end:\n\t"
2870 "xor %ebx,%ebx\n\t"
2871 "lea 0x8(%esp),%esp");
2872 }
2873
2874 static void
2875 i386_emit_less_signed (void)
2876 {
2877 EMIT_ASM32 (i386_less_signed,
2878 "cmpl %ebx,4(%esp)\n\t"
2879 "jl .Li386_less_signed_true\n\t"
2880 "jne .Li386_less_signed_false\n\t"
2881 "cmpl %eax,(%esp)\n\t"
2882 "jl .Li386_less_signed_true\n\t"
2883 ".Li386_less_signed_false:\n\t"
2884 "xor %eax,%eax\n\t"
2885 "jmp .Li386_less_signed_end\n\t"
2886 ".Li386_less_signed_true:\n\t"
2887 "mov $1,%eax\n\t"
2888 ".Li386_less_signed_end:\n\t"
2889 "xor %ebx,%ebx\n\t"
2890 "lea 0x8(%esp),%esp");
2891 }
2892
2893 static void
2894 i386_emit_less_unsigned (void)
2895 {
2896 EMIT_ASM32 (i386_less_unsigned,
2897 "cmpl %ebx,4(%esp)\n\t"
2898 "jb .Li386_less_unsigned_true\n\t"
2899 "jne .Li386_less_unsigned_false\n\t"
2900 "cmpl %eax,(%esp)\n\t"
2901 "jb .Li386_less_unsigned_true\n\t"
2902 ".Li386_less_unsigned_false:\n\t"
2903 "xor %eax,%eax\n\t"
2904 "jmp .Li386_less_unsigned_end\n\t"
2905 ".Li386_less_unsigned_true:\n\t"
2906 "mov $1,%eax\n\t"
2907 ".Li386_less_unsigned_end:\n\t"
2908 "xor %ebx,%ebx\n\t"
2909 "lea 0x8(%esp),%esp");
2910 }
2911
2912 static void
2913 i386_emit_ref (int size)
2914 {
2915 switch (size)
2916 {
2917 case 1:
2918 EMIT_ASM32 (i386_ref1,
2919 "movb (%eax),%al");
2920 break;
2921 case 2:
2922 EMIT_ASM32 (i386_ref2,
2923 "movw (%eax),%ax");
2924 break;
2925 case 4:
2926 EMIT_ASM32 (i386_ref4,
2927 "movl (%eax),%eax");
2928 break;
2929 case 8:
2930 EMIT_ASM32 (i386_ref8,
2931 "movl 4(%eax),%ebx\n\t"
2932 "movl (%eax),%eax");
2933 break;
2934 }
2935 }
2936
2937 static void
2938 i386_emit_if_goto (int *offset_p, int *size_p)
2939 {
2940 EMIT_ASM32 (i386_if_goto,
2941 "mov %eax,%ecx\n\t"
2942 "or %ebx,%ecx\n\t"
2943 "pop %eax\n\t"
2944 "pop %ebx\n\t"
2945 "cmpl $0,%ecx\n\t"
2946 /* Don't trust the assembler to choose the right jump */
2947 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2948
2949 if (offset_p)
2950 *offset_p = 11; /* be sure that this matches the sequence above */
2951 if (size_p)
2952 *size_p = 4;
2953 }
2954
2955 static void
2956 i386_emit_goto (int *offset_p, int *size_p)
2957 {
2958 EMIT_ASM32 (i386_goto,
2959 /* Don't trust the assembler to choose the right jump */
2960 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2961 if (offset_p)
2962 *offset_p = 1;
2963 if (size_p)
2964 *size_p = 4;
2965 }
2966
2967 static void
2968 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2969 {
2970 int diff = (to - (from + size));
2971 unsigned char buf[sizeof (int)];
2972
2973 /* We're only doing 4-byte sizes at the moment. */
2974 if (size != 4)
2975 {
2976 emit_error = 1;
2977 return;
2978 }
2979
2980 memcpy (buf, &diff, sizeof (int));
2981 write_inferior_memory (from, buf, sizeof (int));
2982 }
2983
2984 static void
2985 i386_emit_const (LONGEST num)
2986 {
2987 unsigned char buf[16];
2988 int i, hi, lo;
2989 CORE_ADDR buildaddr = current_insn_ptr;
2990
2991 i = 0;
2992 buf[i++] = 0xb8; /* mov $<n>,%eax */
2993 lo = num & 0xffffffff;
2994 memcpy (&buf[i], &lo, sizeof (lo));
2995 i += 4;
2996 hi = ((num >> 32) & 0xffffffff);
2997 if (hi)
2998 {
2999 buf[i++] = 0xbb; /* mov $<n>,%ebx */
3000 memcpy (&buf[i], &hi, sizeof (hi));
3001 i += 4;
3002 }
3003 else
3004 {
3005 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
3006 }
3007 append_insns (&buildaddr, i, buf);
3008 current_insn_ptr = buildaddr;
3009 }
3010
3011 static void
3012 i386_emit_call (CORE_ADDR fn)
3013 {
3014 unsigned char buf[16];
3015 int i, offset;
3016 CORE_ADDR buildaddr;
3017
3018 buildaddr = current_insn_ptr;
3019 i = 0;
3020 buf[i++] = 0xe8; /* call <reladdr> */
3021 offset = ((int) fn) - (buildaddr + 5);
3022 memcpy (buf + 1, &offset, 4);
3023 append_insns (&buildaddr, 5, buf);
3024 current_insn_ptr = buildaddr;
3025 }
3026
3027 static void
3028 i386_emit_reg (int reg)
3029 {
3030 unsigned char buf[16];
3031 int i;
3032 CORE_ADDR buildaddr;
3033
3034 EMIT_ASM32 (i386_reg_a,
3035 "sub $0x8,%esp");
3036 buildaddr = current_insn_ptr;
3037 i = 0;
3038 buf[i++] = 0xb8; /* mov $<n>,%eax */
3039 memcpy (&buf[i], &reg, sizeof (reg));
3040 i += 4;
3041 append_insns (&buildaddr, i, buf);
3042 current_insn_ptr = buildaddr;
3043 EMIT_ASM32 (i386_reg_b,
3044 "mov %eax,4(%esp)\n\t"
3045 "mov 8(%ebp),%eax\n\t"
3046 "mov %eax,(%esp)");
3047 i386_emit_call (get_raw_reg_func_addr ());
3048 EMIT_ASM32 (i386_reg_c,
3049 "xor %ebx,%ebx\n\t"
3050 "lea 0x8(%esp),%esp");
3051 }
3052
3053 static void
3054 i386_emit_pop (void)
3055 {
3056 EMIT_ASM32 (i386_pop,
3057 "pop %eax\n\t"
3058 "pop %ebx");
3059 }
3060
3061 static void
3062 i386_emit_stack_flush (void)
3063 {
3064 EMIT_ASM32 (i386_stack_flush,
3065 "push %ebx\n\t"
3066 "push %eax");
3067 }
3068
3069 static void
3070 i386_emit_zero_ext (int arg)
3071 {
3072 switch (arg)
3073 {
3074 case 8:
3075 EMIT_ASM32 (i386_zero_ext_8,
3076 "and $0xff,%eax\n\t"
3077 "xor %ebx,%ebx");
3078 break;
3079 case 16:
3080 EMIT_ASM32 (i386_zero_ext_16,
3081 "and $0xffff,%eax\n\t"
3082 "xor %ebx,%ebx");
3083 break;
3084 case 32:
3085 EMIT_ASM32 (i386_zero_ext_32,
3086 "xor %ebx,%ebx");
3087 break;
3088 default:
3089 emit_error = 1;
3090 }
3091 }
3092
3093 static void
3094 i386_emit_swap (void)
3095 {
3096 EMIT_ASM32 (i386_swap,
3097 "mov %eax,%ecx\n\t"
3098 "mov %ebx,%edx\n\t"
3099 "pop %eax\n\t"
3100 "pop %ebx\n\t"
3101 "push %edx\n\t"
3102 "push %ecx");
3103 }
3104
3105 static void
3106 i386_emit_stack_adjust (int n)
3107 {
3108 unsigned char buf[16];
3109 int i;
3110 CORE_ADDR buildaddr = current_insn_ptr;
3111
3112 i = 0;
3113 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3114 buf[i++] = 0x64;
3115 buf[i++] = 0x24;
3116 buf[i++] = n * 8;
3117 append_insns (&buildaddr, i, buf);
3118 current_insn_ptr = buildaddr;
3119 }
3120
3121 /* FN's prototype is `LONGEST(*fn)(int)'. */
3122
3123 static void
3124 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3125 {
3126 unsigned char buf[16];
3127 int i;
3128 CORE_ADDR buildaddr;
3129
3130 EMIT_ASM32 (i386_int_call_1_a,
3131 /* Reserve a bit of stack space. */
3132 "sub $0x8,%esp");
3133 /* Put the one argument on the stack. */
3134 buildaddr = current_insn_ptr;
3135 i = 0;
3136 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3137 buf[i++] = 0x04;
3138 buf[i++] = 0x24;
3139 memcpy (&buf[i], &arg1, sizeof (arg1));
3140 i += 4;
3141 append_insns (&buildaddr, i, buf);
3142 current_insn_ptr = buildaddr;
3143 i386_emit_call (fn);
3144 EMIT_ASM32 (i386_int_call_1_c,
3145 "mov %edx,%ebx\n\t"
3146 "lea 0x8(%esp),%esp");
3147 }
3148
3149 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3150
3151 static void
3152 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3153 {
3154 unsigned char buf[16];
3155 int i;
3156 CORE_ADDR buildaddr;
3157
3158 EMIT_ASM32 (i386_void_call_2_a,
3159 /* Preserve %eax only; we don't have to worry about %ebx. */
3160 "push %eax\n\t"
3161 /* Reserve a bit of stack space for arguments. */
3162 "sub $0x10,%esp\n\t"
3163 /* Copy "top" to the second argument position. (Note that
3164 we can't assume function won't scribble on its
3165 arguments, so don't try to restore from this.) */
3166 "mov %eax,4(%esp)\n\t"
3167 "mov %ebx,8(%esp)");
3168 /* Put the first argument on the stack. */
3169 buildaddr = current_insn_ptr;
3170 i = 0;
3171 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3172 buf[i++] = 0x04;
3173 buf[i++] = 0x24;
3174 memcpy (&buf[i], &arg1, sizeof (arg1));
3175 i += 4;
3176 append_insns (&buildaddr, i, buf);
3177 current_insn_ptr = buildaddr;
3178 i386_emit_call (fn);
3179 EMIT_ASM32 (i386_void_call_2_b,
3180 "lea 0x10(%esp),%esp\n\t"
3181 /* Restore original stack top. */
3182 "pop %eax");
3183 }
3184
3185
3186 void
3187 i386_emit_eq_goto (int *offset_p, int *size_p)
3188 {
3189 EMIT_ASM32 (eq,
3190 /* Check low half first, more likely to be decider */
3191 "cmpl %eax,(%esp)\n\t"
3192 "jne .Leq_fallthru\n\t"
3193 "cmpl %ebx,4(%esp)\n\t"
3194 "jne .Leq_fallthru\n\t"
3195 "lea 0x8(%esp),%esp\n\t"
3196 "pop %eax\n\t"
3197 "pop %ebx\n\t"
3198 /* jmp, but don't trust the assembler to choose the right jump */
3199 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3200 ".Leq_fallthru:\n\t"
3201 "lea 0x8(%esp),%esp\n\t"
3202 "pop %eax\n\t"
3203 "pop %ebx");
3204
3205 if (offset_p)
3206 *offset_p = 18;
3207 if (size_p)
3208 *size_p = 4;
3209 }
3210
3211 void
3212 i386_emit_ne_goto (int *offset_p, int *size_p)
3213 {
3214 EMIT_ASM32 (ne,
3215 /* Check low half first, more likely to be decider */
3216 "cmpl %eax,(%esp)\n\t"
3217 "jne .Lne_jump\n\t"
3218 "cmpl %ebx,4(%esp)\n\t"
3219 "je .Lne_fallthru\n\t"
3220 ".Lne_jump:\n\t"
3221 "lea 0x8(%esp),%esp\n\t"
3222 "pop %eax\n\t"
3223 "pop %ebx\n\t"
3224 /* jmp, but don't trust the assembler to choose the right jump */
3225 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3226 ".Lne_fallthru:\n\t"
3227 "lea 0x8(%esp),%esp\n\t"
3228 "pop %eax\n\t"
3229 "pop %ebx");
3230
3231 if (offset_p)
3232 *offset_p = 18;
3233 if (size_p)
3234 *size_p = 4;
3235 }
3236
3237 void
3238 i386_emit_lt_goto (int *offset_p, int *size_p)
3239 {
3240 EMIT_ASM32 (lt,
3241 "cmpl %ebx,4(%esp)\n\t"
3242 "jl .Llt_jump\n\t"
3243 "jne .Llt_fallthru\n\t"
3244 "cmpl %eax,(%esp)\n\t"
3245 "jnl .Llt_fallthru\n\t"
3246 ".Llt_jump:\n\t"
3247 "lea 0x8(%esp),%esp\n\t"
3248 "pop %eax\n\t"
3249 "pop %ebx\n\t"
3250 /* jmp, but don't trust the assembler to choose the right jump */
3251 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3252 ".Llt_fallthru:\n\t"
3253 "lea 0x8(%esp),%esp\n\t"
3254 "pop %eax\n\t"
3255 "pop %ebx");
3256
3257 if (offset_p)
3258 *offset_p = 20;
3259 if (size_p)
3260 *size_p = 4;
3261 }
3262
3263 void
3264 i386_emit_le_goto (int *offset_p, int *size_p)
3265 {
3266 EMIT_ASM32 (le,
3267 "cmpl %ebx,4(%esp)\n\t"
3268 "jle .Lle_jump\n\t"
3269 "jne .Lle_fallthru\n\t"
3270 "cmpl %eax,(%esp)\n\t"
3271 "jnle .Lle_fallthru\n\t"
3272 ".Lle_jump:\n\t"
3273 "lea 0x8(%esp),%esp\n\t"
3274 "pop %eax\n\t"
3275 "pop %ebx\n\t"
3276 /* jmp, but don't trust the assembler to choose the right jump */
3277 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3278 ".Lle_fallthru:\n\t"
3279 "lea 0x8(%esp),%esp\n\t"
3280 "pop %eax\n\t"
3281 "pop %ebx");
3282
3283 if (offset_p)
3284 *offset_p = 20;
3285 if (size_p)
3286 *size_p = 4;
3287 }
3288
3289 void
3290 i386_emit_gt_goto (int *offset_p, int *size_p)
3291 {
3292 EMIT_ASM32 (gt,
3293 "cmpl %ebx,4(%esp)\n\t"
3294 "jg .Lgt_jump\n\t"
3295 "jne .Lgt_fallthru\n\t"
3296 "cmpl %eax,(%esp)\n\t"
3297 "jng .Lgt_fallthru\n\t"
3298 ".Lgt_jump:\n\t"
3299 "lea 0x8(%esp),%esp\n\t"
3300 "pop %eax\n\t"
3301 "pop %ebx\n\t"
3302 /* jmp, but don't trust the assembler to choose the right jump */
3303 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3304 ".Lgt_fallthru:\n\t"
3305 "lea 0x8(%esp),%esp\n\t"
3306 "pop %eax\n\t"
3307 "pop %ebx");
3308
3309 if (offset_p)
3310 *offset_p = 20;
3311 if (size_p)
3312 *size_p = 4;
3313 }
3314
3315 void
3316 i386_emit_ge_goto (int *offset_p, int *size_p)
3317 {
3318 EMIT_ASM32 (ge,
3319 "cmpl %ebx,4(%esp)\n\t"
3320 "jge .Lge_jump\n\t"
3321 "jne .Lge_fallthru\n\t"
3322 "cmpl %eax,(%esp)\n\t"
3323 "jnge .Lge_fallthru\n\t"
3324 ".Lge_jump:\n\t"
3325 "lea 0x8(%esp),%esp\n\t"
3326 "pop %eax\n\t"
3327 "pop %ebx\n\t"
3328 /* jmp, but don't trust the assembler to choose the right jump */
3329 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3330 ".Lge_fallthru:\n\t"
3331 "lea 0x8(%esp),%esp\n\t"
3332 "pop %eax\n\t"
3333 "pop %ebx");
3334
3335 if (offset_p)
3336 *offset_p = 20;
3337 if (size_p)
3338 *size_p = 4;
3339 }
3340
3341 struct emit_ops i386_emit_ops =
3342 {
3343 i386_emit_prologue,
3344 i386_emit_epilogue,
3345 i386_emit_add,
3346 i386_emit_sub,
3347 i386_emit_mul,
3348 i386_emit_lsh,
3349 i386_emit_rsh_signed,
3350 i386_emit_rsh_unsigned,
3351 i386_emit_ext,
3352 i386_emit_log_not,
3353 i386_emit_bit_and,
3354 i386_emit_bit_or,
3355 i386_emit_bit_xor,
3356 i386_emit_bit_not,
3357 i386_emit_equal,
3358 i386_emit_less_signed,
3359 i386_emit_less_unsigned,
3360 i386_emit_ref,
3361 i386_emit_if_goto,
3362 i386_emit_goto,
3363 i386_write_goto_address,
3364 i386_emit_const,
3365 i386_emit_call,
3366 i386_emit_reg,
3367 i386_emit_pop,
3368 i386_emit_stack_flush,
3369 i386_emit_zero_ext,
3370 i386_emit_swap,
3371 i386_emit_stack_adjust,
3372 i386_emit_int_call_1,
3373 i386_emit_void_call_2,
3374 i386_emit_eq_goto,
3375 i386_emit_ne_goto,
3376 i386_emit_lt_goto,
3377 i386_emit_le_goto,
3378 i386_emit_gt_goto,
3379 i386_emit_ge_goto
3380 };
3381
3382
3383 static struct emit_ops *
3384 x86_emit_ops (void)
3385 {
3386 #ifdef __x86_64__
3387 if (is_64bit_tdesc ())
3388 return &amd64_emit_ops;
3389 else
3390 #endif
3391 return &i386_emit_ops;
3392 }
3393
3394 static int
3395 x86_supports_range_stepping (void)
3396 {
3397 return 1;
3398 }
3399
3400 /* This is initialized assuming an amd64 target.
3401 x86_arch_setup will correct it for i386 or amd64 targets. */
3402
3403 struct linux_target_ops the_low_target =
3404 {
3405 x86_arch_setup,
3406 x86_linux_regs_info,
3407 x86_cannot_fetch_register,
3408 x86_cannot_store_register,
3409 NULL, /* fetch_register */
3410 x86_get_pc,
3411 x86_set_pc,
3412 x86_breakpoint,
3413 x86_breakpoint_len,
3414 NULL,
3415 1,
3416 x86_breakpoint_at,
3417 x86_supports_z_point_type,
3418 x86_insert_point,
3419 x86_remove_point,
3420 x86_stopped_by_watchpoint,
3421 x86_stopped_data_address,
3422 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3423 native i386 case (no registers smaller than an xfer unit), and are not
3424 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3425 NULL,
3426 NULL,
3427 /* need to fix up i386 siginfo if host is amd64 */
3428 x86_siginfo_fixup,
3429 x86_linux_new_process,
3430 x86_linux_new_thread,
3431 x86_linux_prepare_to_resume,
3432 x86_linux_process_qsupported,
3433 x86_supports_tracepoints,
3434 x86_get_thread_area,
3435 x86_install_fast_tracepoint_jump_pad,
3436 x86_emit_ops,
3437 x86_get_min_fast_tracepoint_insn_len,
3438 x86_supports_range_stepping,
3439 };
3440
3441 void
3442 initialize_low_arch (void)
3443 {
3444 /* Initialize the Linux target descriptions. */
3445 #ifdef __x86_64__
3446 init_registers_amd64_linux ();
3447 init_registers_amd64_avx_linux ();
3448 init_registers_amd64_avx512_linux ();
3449 init_registers_amd64_mpx_linux ();
3450
3451 init_registers_x32_linux ();
3452 init_registers_x32_avx_linux ();
3453 init_registers_x32_avx512_linux ();
3454
3455 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3456 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3457 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3458 #endif
3459 init_registers_i386_linux ();
3460 init_registers_i386_mmx_linux ();
3461 init_registers_i386_avx_linux ();
3462 init_registers_i386_avx512_linux ();
3463 init_registers_i386_mpx_linux ();
3464
3465 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3466 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3467 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3468
3469 initialize_regsets_info (&x86_regsets_info);
3470 }