]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-x86-low.c
Make lwp_info.arch_private handling shared
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "x86-xstate.h"
28
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
32 #ifndef ELFMAG0
33 #include "elf/common.h"
34 #endif
35
36 #include "agent.h"
37 #include "tdesc.h"
38 #include "tracepoint.h"
39 #include "ax.h"
40 #include "nat/linux-nat.h"
41 #include "nat/x86-linux.h"
42
43 #ifdef __x86_64__
44 /* Defined in auto-generated file amd64-linux.c. */
45 void init_registers_amd64_linux (void);
46 extern const struct target_desc *tdesc_amd64_linux;
47
48 /* Defined in auto-generated file amd64-avx-linux.c. */
49 void init_registers_amd64_avx_linux (void);
50 extern const struct target_desc *tdesc_amd64_avx_linux;
51
52 /* Defined in auto-generated file amd64-avx512-linux.c. */
53 void init_registers_amd64_avx512_linux (void);
54 extern const struct target_desc *tdesc_amd64_avx512_linux;
55
56 /* Defined in auto-generated file amd64-mpx-linux.c. */
57 void init_registers_amd64_mpx_linux (void);
58 extern const struct target_desc *tdesc_amd64_mpx_linux;
59
60 /* Defined in auto-generated file x32-linux.c. */
61 void init_registers_x32_linux (void);
62 extern const struct target_desc *tdesc_x32_linux;
63
64 /* Defined in auto-generated file x32-avx-linux.c. */
65 void init_registers_x32_avx_linux (void);
66 extern const struct target_desc *tdesc_x32_avx_linux;
67
68 /* Defined in auto-generated file x32-avx512-linux.c. */
69 void init_registers_x32_avx512_linux (void);
70 extern const struct target_desc *tdesc_x32_avx512_linux;
71
72 #endif
73
74 /* Defined in auto-generated file i386-linux.c. */
75 void init_registers_i386_linux (void);
76 extern const struct target_desc *tdesc_i386_linux;
77
78 /* Defined in auto-generated file i386-mmx-linux.c. */
79 void init_registers_i386_mmx_linux (void);
80 extern const struct target_desc *tdesc_i386_mmx_linux;
81
82 /* Defined in auto-generated file i386-avx-linux.c. */
83 void init_registers_i386_avx_linux (void);
84 extern const struct target_desc *tdesc_i386_avx_linux;
85
86 /* Defined in auto-generated file i386-avx512-linux.c. */
87 void init_registers_i386_avx512_linux (void);
88 extern const struct target_desc *tdesc_i386_avx512_linux;
89
90 /* Defined in auto-generated file i386-mpx-linux.c. */
91 void init_registers_i386_mpx_linux (void);
92 extern const struct target_desc *tdesc_i386_mpx_linux;
93
94 #ifdef __x86_64__
95 static struct target_desc *tdesc_amd64_linux_no_xml;
96 #endif
97 static struct target_desc *tdesc_i386_linux_no_xml;
98
99
100 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
101 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
102
103 /* Backward compatibility for gdb without XML support. */
104
105 static const char *xmltarget_i386_linux_no_xml = "@<target>\
106 <architecture>i386</architecture>\
107 <osabi>GNU/Linux</osabi>\
108 </target>";
109
110 #ifdef __x86_64__
111 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
112 <architecture>i386:x86-64</architecture>\
113 <osabi>GNU/Linux</osabi>\
114 </target>";
115 #endif
116
117 #include <sys/reg.h>
118 #include <sys/procfs.h>
119 #include <sys/ptrace.h>
120 #include <sys/uio.h>
121
122 #ifndef PTRACE_GETREGSET
123 #define PTRACE_GETREGSET 0x4204
124 #endif
125
126 #ifndef PTRACE_SETREGSET
127 #define PTRACE_SETREGSET 0x4205
128 #endif
129
130
131 #ifndef PTRACE_GET_THREAD_AREA
132 #define PTRACE_GET_THREAD_AREA 25
133 #endif
134
135 /* This definition comes from prctl.h, but some kernels may not have it. */
136 #ifndef PTRACE_ARCH_PRCTL
137 #define PTRACE_ARCH_PRCTL 30
138 #endif
139
140 /* The following definitions come from prctl.h, but may be absent
141 for certain configurations. */
142 #ifndef ARCH_GET_FS
143 #define ARCH_SET_GS 0x1001
144 #define ARCH_SET_FS 0x1002
145 #define ARCH_GET_FS 0x1003
146 #define ARCH_GET_GS 0x1004
147 #endif
148
149 /* Per-process arch-specific data we want to keep. */
150
151 struct arch_process_info
152 {
153 struct x86_debug_reg_state debug_reg_state;
154 };
155
156 #ifdef __x86_64__
157
158 /* Mapping between the general-purpose registers in `struct user'
159 format and GDB's register array layout.
160 Note that the transfer layout uses 64-bit regs. */
161 static /*const*/ int i386_regmap[] =
162 {
163 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
164 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
165 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
166 DS * 8, ES * 8, FS * 8, GS * 8
167 };
168
169 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
170
171 /* So code below doesn't have to care, i386 or amd64. */
172 #define ORIG_EAX ORIG_RAX
173 #define REGSIZE 8
174
175 static const int x86_64_regmap[] =
176 {
177 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
178 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
179 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
180 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
181 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
182 DS * 8, ES * 8, FS * 8, GS * 8,
183 -1, -1, -1, -1, -1, -1, -1, -1,
184 -1, -1, -1, -1, -1, -1, -1, -1,
185 -1, -1, -1, -1, -1, -1, -1, -1,
186 -1,
187 -1, -1, -1, -1, -1, -1, -1, -1,
188 ORIG_RAX * 8,
189 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
190 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
191 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
192 -1, -1, -1, -1, -1, -1, -1, -1,
193 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
194 -1, -1, -1, -1, -1, -1, -1, -1,
195 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
196 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
197 -1, -1, -1, -1, -1, -1, -1, -1,
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1
200 };
201
202 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
203 #define X86_64_USER_REGS (GS + 1)
204
205 #else /* ! __x86_64__ */
206
207 /* Mapping between the general-purpose registers in `struct user'
208 format and GDB's register array layout. */
209 static /*const*/ int i386_regmap[] =
210 {
211 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
212 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
213 EIP * 4, EFL * 4, CS * 4, SS * 4,
214 DS * 4, ES * 4, FS * 4, GS * 4
215 };
216
217 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
218
219 #define REGSIZE 4
220
221 #endif
222
223 #ifdef __x86_64__
224
225 /* Returns true if the current inferior belongs to a x86-64 process,
226 per the tdesc. */
227
228 static int
229 is_64bit_tdesc (void)
230 {
231 struct regcache *regcache = get_thread_regcache (current_thread, 0);
232
233 return register_size (regcache->tdesc, 0) == 8;
234 }
235
236 #endif
237
238 \f
239 /* Called by libthread_db. */
240
241 ps_err_e
242 ps_get_thread_area (const struct ps_prochandle *ph,
243 lwpid_t lwpid, int idx, void **base)
244 {
245 #ifdef __x86_64__
246 int use_64bit = is_64bit_tdesc ();
247
248 if (use_64bit)
249 {
250 switch (idx)
251 {
252 case FS:
253 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
254 return PS_OK;
255 break;
256 case GS:
257 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
258 return PS_OK;
259 break;
260 default:
261 return PS_BADADDR;
262 }
263 return PS_ERR;
264 }
265 #endif
266
267 {
268 unsigned int desc[4];
269
270 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
271 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
272 return PS_ERR;
273
274 /* Ensure we properly extend the value to 64-bits for x86_64. */
275 *base = (void *) (uintptr_t) desc[1];
276 return PS_OK;
277 }
278 }
279
280 /* Get the thread area address. This is used to recognize which
281 thread is which when tracing with the in-process agent library. We
282 don't read anything from the address, and treat it as opaque; it's
283 the address itself that we assume is unique per-thread. */
284
285 static int
286 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
287 {
288 #ifdef __x86_64__
289 int use_64bit = is_64bit_tdesc ();
290
291 if (use_64bit)
292 {
293 void *base;
294 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
295 {
296 *addr = (CORE_ADDR) (uintptr_t) base;
297 return 0;
298 }
299
300 return -1;
301 }
302 #endif
303
304 {
305 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
306 struct thread_info *thr = get_lwp_thread (lwp);
307 struct regcache *regcache = get_thread_regcache (thr, 1);
308 unsigned int desc[4];
309 ULONGEST gs = 0;
310 const int reg_thread_area = 3; /* bits to scale down register value. */
311 int idx;
312
313 collect_register_by_name (regcache, "gs", &gs);
314
315 idx = gs >> reg_thread_area;
316
317 if (ptrace (PTRACE_GET_THREAD_AREA,
318 lwpid_of (thr),
319 (void *) (long) idx, (unsigned long) &desc) < 0)
320 return -1;
321
322 *addr = desc[1];
323 return 0;
324 }
325 }
326
327
328 \f
329 static int
330 x86_cannot_store_register (int regno)
331 {
332 #ifdef __x86_64__
333 if (is_64bit_tdesc ())
334 return 0;
335 #endif
336
337 return regno >= I386_NUM_REGS;
338 }
339
340 static int
341 x86_cannot_fetch_register (int regno)
342 {
343 #ifdef __x86_64__
344 if (is_64bit_tdesc ())
345 return 0;
346 #endif
347
348 return regno >= I386_NUM_REGS;
349 }
350
351 static void
352 x86_fill_gregset (struct regcache *regcache, void *buf)
353 {
354 int i;
355
356 #ifdef __x86_64__
357 if (register_size (regcache->tdesc, 0) == 8)
358 {
359 for (i = 0; i < X86_64_NUM_REGS; i++)
360 if (x86_64_regmap[i] != -1)
361 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
362 return;
363 }
364
365 /* 32-bit inferior registers need to be zero-extended.
366 Callers would read uninitialized memory otherwise. */
367 memset (buf, 0x00, X86_64_USER_REGS * 8);
368 #endif
369
370 for (i = 0; i < I386_NUM_REGS; i++)
371 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
372
373 collect_register_by_name (regcache, "orig_eax",
374 ((char *) buf) + ORIG_EAX * REGSIZE);
375 }
376
377 static void
378 x86_store_gregset (struct regcache *regcache, const void *buf)
379 {
380 int i;
381
382 #ifdef __x86_64__
383 if (register_size (regcache->tdesc, 0) == 8)
384 {
385 for (i = 0; i < X86_64_NUM_REGS; i++)
386 if (x86_64_regmap[i] != -1)
387 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
388 return;
389 }
390 #endif
391
392 for (i = 0; i < I386_NUM_REGS; i++)
393 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
394
395 supply_register_by_name (regcache, "orig_eax",
396 ((char *) buf) + ORIG_EAX * REGSIZE);
397 }
398
399 static void
400 x86_fill_fpregset (struct regcache *regcache, void *buf)
401 {
402 #ifdef __x86_64__
403 i387_cache_to_fxsave (regcache, buf);
404 #else
405 i387_cache_to_fsave (regcache, buf);
406 #endif
407 }
408
409 static void
410 x86_store_fpregset (struct regcache *regcache, const void *buf)
411 {
412 #ifdef __x86_64__
413 i387_fxsave_to_cache (regcache, buf);
414 #else
415 i387_fsave_to_cache (regcache, buf);
416 #endif
417 }
418
419 #ifndef __x86_64__
420
421 static void
422 x86_fill_fpxregset (struct regcache *regcache, void *buf)
423 {
424 i387_cache_to_fxsave (regcache, buf);
425 }
426
427 static void
428 x86_store_fpxregset (struct regcache *regcache, const void *buf)
429 {
430 i387_fxsave_to_cache (regcache, buf);
431 }
432
433 #endif
434
435 static void
436 x86_fill_xstateregset (struct regcache *regcache, void *buf)
437 {
438 i387_cache_to_xsave (regcache, buf);
439 }
440
441 static void
442 x86_store_xstateregset (struct regcache *regcache, const void *buf)
443 {
444 i387_xsave_to_cache (regcache, buf);
445 }
446
447 /* ??? The non-biarch i386 case stores all the i387 regs twice.
448 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
449 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
450 doesn't work. IWBN to avoid the duplication in the case where it
451 does work. Maybe the arch_setup routine could check whether it works
452 and update the supported regsets accordingly. */
453
454 static struct regset_info x86_regsets[] =
455 {
456 #ifdef HAVE_PTRACE_GETREGS
457 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
458 GENERAL_REGS,
459 x86_fill_gregset, x86_store_gregset },
460 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
461 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
462 # ifndef __x86_64__
463 # ifdef HAVE_PTRACE_GETFPXREGS
464 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
465 EXTENDED_REGS,
466 x86_fill_fpxregset, x86_store_fpxregset },
467 # endif
468 # endif
469 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
470 FP_REGS,
471 x86_fill_fpregset, x86_store_fpregset },
472 #endif /* HAVE_PTRACE_GETREGS */
473 { 0, 0, 0, -1, -1, NULL, NULL }
474 };
475
476 static CORE_ADDR
477 x86_get_pc (struct regcache *regcache)
478 {
479 int use_64bit = register_size (regcache->tdesc, 0) == 8;
480
481 if (use_64bit)
482 {
483 unsigned long pc;
484 collect_register_by_name (regcache, "rip", &pc);
485 return (CORE_ADDR) pc;
486 }
487 else
488 {
489 unsigned int pc;
490 collect_register_by_name (regcache, "eip", &pc);
491 return (CORE_ADDR) pc;
492 }
493 }
494
495 static void
496 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
497 {
498 int use_64bit = register_size (regcache->tdesc, 0) == 8;
499
500 if (use_64bit)
501 {
502 unsigned long newpc = pc;
503 supply_register_by_name (regcache, "rip", &newpc);
504 }
505 else
506 {
507 unsigned int newpc = pc;
508 supply_register_by_name (regcache, "eip", &newpc);
509 }
510 }
511 \f
512 static const unsigned char x86_breakpoint[] = { 0xCC };
513 #define x86_breakpoint_len 1
514
515 static int
516 x86_breakpoint_at (CORE_ADDR pc)
517 {
518 unsigned char c;
519
520 (*the_target->read_memory) (pc, &c, 1);
521 if (c == 0xCC)
522 return 1;
523
524 return 0;
525 }
526 \f
527
528 /* Return the offset of REGNUM in the u_debugreg field of struct
529 user. */
530
531 static int
532 u_debugreg_offset (int regnum)
533 {
534 return (offsetof (struct user, u_debugreg)
535 + sizeof (((struct user *) 0)->u_debugreg[0]) * regnum);
536 }
537
538
539 /* Support for debug registers. */
540
541 static unsigned long
542 x86_linux_dr_get (ptid_t ptid, int regnum)
543 {
544 int tid;
545 unsigned long value;
546
547 tid = ptid_get_lwp (ptid);
548
549 errno = 0;
550 value = ptrace (PTRACE_PEEKUSER, tid, u_debugreg_offset (regnum), 0);
551 if (errno != 0)
552 error ("Couldn't read debug register");
553
554 return value;
555 }
556
557 static void
558 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
559 {
560 int tid;
561
562 tid = ptid_get_lwp (ptid);
563
564 errno = 0;
565 ptrace (PTRACE_POKEUSER, tid, u_debugreg_offset (regnum), value);
566 if (errno != 0)
567 error ("Couldn't write debug register");
568 }
569
570 static int
571 update_debug_registers_callback (struct lwp_info *lwp, void *arg)
572 {
573 /* The actual update is done later just before resuming the lwp,
574 we just mark that the registers need updating. */
575 lwp_set_debug_registers_changed (lwp, 1);
576
577 /* If the lwp isn't stopped, force it to momentarily pause, so
578 we can update its debug registers. */
579 if (!lwp_is_stopped (lwp))
580 linux_stop_lwp (lwp);
581
582 return 0;
583 }
584
585 /* Update the inferior's debug register REGNUM from STATE. */
586
587 static void
588 x86_dr_low_set_addr (int regnum, CORE_ADDR addr)
589 {
590 /* Only update the threads of this process. */
591 ptid_t pid_ptid = pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
592
593 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
594
595 iterate_over_lwps (pid_ptid, update_debug_registers_callback, NULL);
596 }
597
598 /* Return the inferior's debug register REGNUM. */
599
600 static CORE_ADDR
601 x86_dr_low_get_addr (int regnum)
602 {
603 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
604
605 return x86_linux_dr_get (current_lwp_ptid (), regnum);
606 }
607
608 /* Update the inferior's DR7 debug control register from STATE. */
609
610 static void
611 x86_dr_low_set_control (unsigned long control)
612 {
613 /* Only update the threads of this process. */
614 ptid_t pid_ptid = pid_to_ptid (ptid_get_pid (current_lwp_ptid ()));
615
616 iterate_over_lwps (pid_ptid, update_debug_registers_callback, NULL);
617 }
618
619 /* Return the inferior's DR7 debug control register. */
620
621 static unsigned long
622 x86_dr_low_get_control (void)
623 {
624 return x86_linux_dr_get (current_lwp_ptid (), DR_CONTROL);
625 }
626
627 /* Get the value of the DR6 debug status register from the inferior
628 and record it in STATE. */
629
630 static unsigned long
631 x86_dr_low_get_status (void)
632 {
633 return x86_linux_dr_get (current_lwp_ptid (), DR_STATUS);
634 }
635
636 /* Low-level function vector. */
637 struct x86_dr_low_type x86_dr_low =
638 {
639 x86_dr_low_set_control,
640 x86_dr_low_set_addr,
641 x86_dr_low_get_addr,
642 x86_dr_low_get_status,
643 x86_dr_low_get_control,
644 sizeof (void *),
645 };
646 \f
647 /* Breakpoint/Watchpoint support. */
648
649 static int
650 x86_supports_z_point_type (char z_type)
651 {
652 switch (z_type)
653 {
654 case Z_PACKET_SW_BP:
655 case Z_PACKET_HW_BP:
656 case Z_PACKET_WRITE_WP:
657 case Z_PACKET_ACCESS_WP:
658 return 1;
659 default:
660 return 0;
661 }
662 }
663
664 static int
665 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
666 int size, struct raw_breakpoint *bp)
667 {
668 struct process_info *proc = current_process ();
669
670 switch (type)
671 {
672 case raw_bkpt_type_sw:
673 return insert_memory_breakpoint (bp);
674
675 case raw_bkpt_type_hw:
676 case raw_bkpt_type_write_wp:
677 case raw_bkpt_type_access_wp:
678 {
679 enum target_hw_bp_type hw_type
680 = raw_bkpt_type_to_target_hw_bp_type (type);
681 struct x86_debug_reg_state *state
682 = &proc->priv->arch_private->debug_reg_state;
683
684 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
685 }
686
687 default:
688 /* Unsupported. */
689 return 1;
690 }
691 }
692
693 static int
694 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
695 int size, struct raw_breakpoint *bp)
696 {
697 struct process_info *proc = current_process ();
698
699 switch (type)
700 {
701 case raw_bkpt_type_sw:
702 return remove_memory_breakpoint (bp);
703
704 case raw_bkpt_type_hw:
705 case raw_bkpt_type_write_wp:
706 case raw_bkpt_type_access_wp:
707 {
708 enum target_hw_bp_type hw_type
709 = raw_bkpt_type_to_target_hw_bp_type (type);
710 struct x86_debug_reg_state *state
711 = &proc->priv->arch_private->debug_reg_state;
712
713 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
714 }
715 default:
716 /* Unsupported. */
717 return 1;
718 }
719 }
720
721 static int
722 x86_stopped_by_watchpoint (void)
723 {
724 struct process_info *proc = current_process ();
725 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
726 }
727
728 static CORE_ADDR
729 x86_stopped_data_address (void)
730 {
731 struct process_info *proc = current_process ();
732 CORE_ADDR addr;
733 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
734 &addr))
735 return addr;
736 return 0;
737 }
738 \f
739 /* Called when a new process is created. */
740
741 static struct arch_process_info *
742 x86_linux_new_process (void)
743 {
744 struct arch_process_info *info = XCNEW (struct arch_process_info);
745
746 x86_low_init_dregs (&info->debug_reg_state);
747
748 return info;
749 }
750
751 /* Called when a new thread is detected. */
752
753 static void
754 x86_linux_new_thread (struct lwp_info *lwp)
755 {
756 lwp_set_debug_registers_changed (lwp, 1);
757 }
758
759 /* See nat/x86-dregs.h. */
760
761 struct x86_debug_reg_state *
762 x86_debug_reg_state (pid_t pid)
763 {
764 struct process_info *proc = find_process_pid (pid);
765
766 return &proc->priv->arch_private->debug_reg_state;
767 }
768
769 /* Called when resuming a thread.
770 If the debug regs have changed, update the thread's copies. */
771
772 static void
773 x86_linux_prepare_to_resume (struct lwp_info *lwp)
774 {
775 ptid_t ptid = ptid_of_lwp (lwp);
776 int clear_status = 0;
777
778 if (lwp_debug_registers_changed (lwp))
779 {
780 struct x86_debug_reg_state *state
781 = x86_debug_reg_state (ptid_get_pid (ptid));
782 int i;
783
784 x86_linux_dr_set (ptid, DR_CONTROL, 0);
785
786 ALL_DEBUG_ADDRESS_REGISTERS (i)
787 if (state->dr_ref_count[i] > 0)
788 {
789 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
790
791 /* If we're setting a watchpoint, any change the inferior
792 had done itself to the debug registers needs to be
793 discarded, otherwise, x86_dr_stopped_data_address can
794 get confused. */
795 clear_status = 1;
796 }
797
798 if (state->dr_control_mirror != 0)
799 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
800
801 lwp_set_debug_registers_changed (lwp, 0);
802 }
803
804 if (clear_status
805 || lwp_stop_reason (lwp) == TARGET_STOPPED_BY_WATCHPOINT)
806 x86_linux_dr_set (ptid, DR_STATUS, 0);
807 }
808 \f
809 /* When GDBSERVER is built as a 64-bit application on linux, the
810 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
811 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
812 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
813 conversion in-place ourselves. */
814
815 /* These types below (compat_*) define a siginfo type that is layout
816 compatible with the siginfo type exported by the 32-bit userspace
817 support. */
818
819 #ifdef __x86_64__
820
821 typedef int compat_int_t;
822 typedef unsigned int compat_uptr_t;
823
824 typedef int compat_time_t;
825 typedef int compat_timer_t;
826 typedef int compat_clock_t;
827
828 struct compat_timeval
829 {
830 compat_time_t tv_sec;
831 int tv_usec;
832 };
833
834 typedef union compat_sigval
835 {
836 compat_int_t sival_int;
837 compat_uptr_t sival_ptr;
838 } compat_sigval_t;
839
840 typedef struct compat_siginfo
841 {
842 int si_signo;
843 int si_errno;
844 int si_code;
845
846 union
847 {
848 int _pad[((128 / sizeof (int)) - 3)];
849
850 /* kill() */
851 struct
852 {
853 unsigned int _pid;
854 unsigned int _uid;
855 } _kill;
856
857 /* POSIX.1b timers */
858 struct
859 {
860 compat_timer_t _tid;
861 int _overrun;
862 compat_sigval_t _sigval;
863 } _timer;
864
865 /* POSIX.1b signals */
866 struct
867 {
868 unsigned int _pid;
869 unsigned int _uid;
870 compat_sigval_t _sigval;
871 } _rt;
872
873 /* SIGCHLD */
874 struct
875 {
876 unsigned int _pid;
877 unsigned int _uid;
878 int _status;
879 compat_clock_t _utime;
880 compat_clock_t _stime;
881 } _sigchld;
882
883 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
884 struct
885 {
886 unsigned int _addr;
887 } _sigfault;
888
889 /* SIGPOLL */
890 struct
891 {
892 int _band;
893 int _fd;
894 } _sigpoll;
895 } _sifields;
896 } compat_siginfo_t;
897
898 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
899 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
900
901 typedef struct compat_x32_siginfo
902 {
903 int si_signo;
904 int si_errno;
905 int si_code;
906
907 union
908 {
909 int _pad[((128 / sizeof (int)) - 3)];
910
911 /* kill() */
912 struct
913 {
914 unsigned int _pid;
915 unsigned int _uid;
916 } _kill;
917
918 /* POSIX.1b timers */
919 struct
920 {
921 compat_timer_t _tid;
922 int _overrun;
923 compat_sigval_t _sigval;
924 } _timer;
925
926 /* POSIX.1b signals */
927 struct
928 {
929 unsigned int _pid;
930 unsigned int _uid;
931 compat_sigval_t _sigval;
932 } _rt;
933
934 /* SIGCHLD */
935 struct
936 {
937 unsigned int _pid;
938 unsigned int _uid;
939 int _status;
940 compat_x32_clock_t _utime;
941 compat_x32_clock_t _stime;
942 } _sigchld;
943
944 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
945 struct
946 {
947 unsigned int _addr;
948 } _sigfault;
949
950 /* SIGPOLL */
951 struct
952 {
953 int _band;
954 int _fd;
955 } _sigpoll;
956 } _sifields;
957 } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
958
959 #define cpt_si_pid _sifields._kill._pid
960 #define cpt_si_uid _sifields._kill._uid
961 #define cpt_si_timerid _sifields._timer._tid
962 #define cpt_si_overrun _sifields._timer._overrun
963 #define cpt_si_status _sifields._sigchld._status
964 #define cpt_si_utime _sifields._sigchld._utime
965 #define cpt_si_stime _sifields._sigchld._stime
966 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
967 #define cpt_si_addr _sifields._sigfault._addr
968 #define cpt_si_band _sifields._sigpoll._band
969 #define cpt_si_fd _sifields._sigpoll._fd
970
971 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
972 In their place is si_timer1,si_timer2. */
973 #ifndef si_timerid
974 #define si_timerid si_timer1
975 #endif
976 #ifndef si_overrun
977 #define si_overrun si_timer2
978 #endif
979
980 static void
981 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
982 {
983 memset (to, 0, sizeof (*to));
984
985 to->si_signo = from->si_signo;
986 to->si_errno = from->si_errno;
987 to->si_code = from->si_code;
988
989 if (to->si_code == SI_TIMER)
990 {
991 to->cpt_si_timerid = from->si_timerid;
992 to->cpt_si_overrun = from->si_overrun;
993 to->cpt_si_ptr = (intptr_t) from->si_ptr;
994 }
995 else if (to->si_code == SI_USER)
996 {
997 to->cpt_si_pid = from->si_pid;
998 to->cpt_si_uid = from->si_uid;
999 }
1000 else if (to->si_code < 0)
1001 {
1002 to->cpt_si_pid = from->si_pid;
1003 to->cpt_si_uid = from->si_uid;
1004 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1005 }
1006 else
1007 {
1008 switch (to->si_signo)
1009 {
1010 case SIGCHLD:
1011 to->cpt_si_pid = from->si_pid;
1012 to->cpt_si_uid = from->si_uid;
1013 to->cpt_si_status = from->si_status;
1014 to->cpt_si_utime = from->si_utime;
1015 to->cpt_si_stime = from->si_stime;
1016 break;
1017 case SIGILL:
1018 case SIGFPE:
1019 case SIGSEGV:
1020 case SIGBUS:
1021 to->cpt_si_addr = (intptr_t) from->si_addr;
1022 break;
1023 case SIGPOLL:
1024 to->cpt_si_band = from->si_band;
1025 to->cpt_si_fd = from->si_fd;
1026 break;
1027 default:
1028 to->cpt_si_pid = from->si_pid;
1029 to->cpt_si_uid = from->si_uid;
1030 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1031 break;
1032 }
1033 }
1034 }
1035
1036 static void
1037 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
1038 {
1039 memset (to, 0, sizeof (*to));
1040
1041 to->si_signo = from->si_signo;
1042 to->si_errno = from->si_errno;
1043 to->si_code = from->si_code;
1044
1045 if (to->si_code == SI_TIMER)
1046 {
1047 to->si_timerid = from->cpt_si_timerid;
1048 to->si_overrun = from->cpt_si_overrun;
1049 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1050 }
1051 else if (to->si_code == SI_USER)
1052 {
1053 to->si_pid = from->cpt_si_pid;
1054 to->si_uid = from->cpt_si_uid;
1055 }
1056 else if (to->si_code < 0)
1057 {
1058 to->si_pid = from->cpt_si_pid;
1059 to->si_uid = from->cpt_si_uid;
1060 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1061 }
1062 else
1063 {
1064 switch (to->si_signo)
1065 {
1066 case SIGCHLD:
1067 to->si_pid = from->cpt_si_pid;
1068 to->si_uid = from->cpt_si_uid;
1069 to->si_status = from->cpt_si_status;
1070 to->si_utime = from->cpt_si_utime;
1071 to->si_stime = from->cpt_si_stime;
1072 break;
1073 case SIGILL:
1074 case SIGFPE:
1075 case SIGSEGV:
1076 case SIGBUS:
1077 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1078 break;
1079 case SIGPOLL:
1080 to->si_band = from->cpt_si_band;
1081 to->si_fd = from->cpt_si_fd;
1082 break;
1083 default:
1084 to->si_pid = from->cpt_si_pid;
1085 to->si_uid = from->cpt_si_uid;
1086 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1087 break;
1088 }
1089 }
1090 }
1091
1092 static void
1093 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1094 siginfo_t *from)
1095 {
1096 memset (to, 0, sizeof (*to));
1097
1098 to->si_signo = from->si_signo;
1099 to->si_errno = from->si_errno;
1100 to->si_code = from->si_code;
1101
1102 if (to->si_code == SI_TIMER)
1103 {
1104 to->cpt_si_timerid = from->si_timerid;
1105 to->cpt_si_overrun = from->si_overrun;
1106 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1107 }
1108 else if (to->si_code == SI_USER)
1109 {
1110 to->cpt_si_pid = from->si_pid;
1111 to->cpt_si_uid = from->si_uid;
1112 }
1113 else if (to->si_code < 0)
1114 {
1115 to->cpt_si_pid = from->si_pid;
1116 to->cpt_si_uid = from->si_uid;
1117 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1118 }
1119 else
1120 {
1121 switch (to->si_signo)
1122 {
1123 case SIGCHLD:
1124 to->cpt_si_pid = from->si_pid;
1125 to->cpt_si_uid = from->si_uid;
1126 to->cpt_si_status = from->si_status;
1127 to->cpt_si_utime = from->si_utime;
1128 to->cpt_si_stime = from->si_stime;
1129 break;
1130 case SIGILL:
1131 case SIGFPE:
1132 case SIGSEGV:
1133 case SIGBUS:
1134 to->cpt_si_addr = (intptr_t) from->si_addr;
1135 break;
1136 case SIGPOLL:
1137 to->cpt_si_band = from->si_band;
1138 to->cpt_si_fd = from->si_fd;
1139 break;
1140 default:
1141 to->cpt_si_pid = from->si_pid;
1142 to->cpt_si_uid = from->si_uid;
1143 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1144 break;
1145 }
1146 }
1147 }
1148
1149 static void
1150 siginfo_from_compat_x32_siginfo (siginfo_t *to,
1151 compat_x32_siginfo_t *from)
1152 {
1153 memset (to, 0, sizeof (*to));
1154
1155 to->si_signo = from->si_signo;
1156 to->si_errno = from->si_errno;
1157 to->si_code = from->si_code;
1158
1159 if (to->si_code == SI_TIMER)
1160 {
1161 to->si_timerid = from->cpt_si_timerid;
1162 to->si_overrun = from->cpt_si_overrun;
1163 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1164 }
1165 else if (to->si_code == SI_USER)
1166 {
1167 to->si_pid = from->cpt_si_pid;
1168 to->si_uid = from->cpt_si_uid;
1169 }
1170 else if (to->si_code < 0)
1171 {
1172 to->si_pid = from->cpt_si_pid;
1173 to->si_uid = from->cpt_si_uid;
1174 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1175 }
1176 else
1177 {
1178 switch (to->si_signo)
1179 {
1180 case SIGCHLD:
1181 to->si_pid = from->cpt_si_pid;
1182 to->si_uid = from->cpt_si_uid;
1183 to->si_status = from->cpt_si_status;
1184 to->si_utime = from->cpt_si_utime;
1185 to->si_stime = from->cpt_si_stime;
1186 break;
1187 case SIGILL:
1188 case SIGFPE:
1189 case SIGSEGV:
1190 case SIGBUS:
1191 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1192 break;
1193 case SIGPOLL:
1194 to->si_band = from->cpt_si_band;
1195 to->si_fd = from->cpt_si_fd;
1196 break;
1197 default:
1198 to->si_pid = from->cpt_si_pid;
1199 to->si_uid = from->cpt_si_uid;
1200 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1201 break;
1202 }
1203 }
1204 }
1205
1206 #endif /* __x86_64__ */
1207
1208 /* Convert a native/host siginfo object, into/from the siginfo in the
1209 layout of the inferiors' architecture. Returns true if any
1210 conversion was done; false otherwise. If DIRECTION is 1, then copy
1211 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1212 INF. */
1213
1214 static int
1215 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
1216 {
1217 #ifdef __x86_64__
1218 unsigned int machine;
1219 int tid = lwpid_of (current_thread);
1220 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1221
1222 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1223 if (!is_64bit_tdesc ())
1224 {
1225 gdb_assert (sizeof (siginfo_t) == sizeof (compat_siginfo_t));
1226
1227 if (direction == 0)
1228 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1229 else
1230 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1231
1232 return 1;
1233 }
1234 /* No fixup for native x32 GDB. */
1235 else if (!is_elf64 && sizeof (void *) == 8)
1236 {
1237 gdb_assert (sizeof (siginfo_t) == sizeof (compat_x32_siginfo_t));
1238
1239 if (direction == 0)
1240 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1241 native);
1242 else
1243 siginfo_from_compat_x32_siginfo (native,
1244 (struct compat_x32_siginfo *) inf);
1245
1246 return 1;
1247 }
1248 #endif
1249
1250 return 0;
1251 }
1252 \f
1253 static int use_xml;
1254
1255 /* Format of XSAVE extended state is:
1256 struct
1257 {
1258 fxsave_bytes[0..463]
1259 sw_usable_bytes[464..511]
1260 xstate_hdr_bytes[512..575]
1261 avx_bytes[576..831]
1262 future_state etc
1263 };
1264
1265 Same memory layout will be used for the coredump NT_X86_XSTATE
1266 representing the XSAVE extended state registers.
1267
1268 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1269 extended state mask, which is the same as the extended control register
1270 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1271 together with the mask saved in the xstate_hdr_bytes to determine what
1272 states the processor/OS supports and what state, used or initialized,
1273 the process/thread is in. */
1274 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1275
1276 /* Does the current host support the GETFPXREGS request? The header
1277 file may or may not define it, and even if it is defined, the
1278 kernel will return EIO if it's running on a pre-SSE processor. */
1279 int have_ptrace_getfpxregs =
1280 #ifdef HAVE_PTRACE_GETFPXREGS
1281 -1
1282 #else
1283 0
1284 #endif
1285 ;
1286
1287 /* Does the current host support PTRACE_GETREGSET? */
1288 static int have_ptrace_getregset = -1;
1289
1290 /* Get Linux/x86 target description from running target. */
1291
1292 static const struct target_desc *
1293 x86_linux_read_description (void)
1294 {
1295 unsigned int machine;
1296 int is_elf64;
1297 int xcr0_features;
1298 int tid;
1299 static uint64_t xcr0;
1300 struct regset_info *regset;
1301
1302 tid = lwpid_of (current_thread);
1303
1304 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1305
1306 if (sizeof (void *) == 4)
1307 {
1308 if (is_elf64 > 0)
1309 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1310 #ifndef __x86_64__
1311 else if (machine == EM_X86_64)
1312 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1313 #endif
1314 }
1315
1316 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1317 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1318 {
1319 elf_fpxregset_t fpxregs;
1320
1321 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
1322 {
1323 have_ptrace_getfpxregs = 0;
1324 have_ptrace_getregset = 0;
1325 return tdesc_i386_mmx_linux;
1326 }
1327 else
1328 have_ptrace_getfpxregs = 1;
1329 }
1330 #endif
1331
1332 if (!use_xml)
1333 {
1334 x86_xcr0 = X86_XSTATE_SSE_MASK;
1335
1336 /* Don't use XML. */
1337 #ifdef __x86_64__
1338 if (machine == EM_X86_64)
1339 return tdesc_amd64_linux_no_xml;
1340 else
1341 #endif
1342 return tdesc_i386_linux_no_xml;
1343 }
1344
1345 if (have_ptrace_getregset == -1)
1346 {
1347 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1348 struct iovec iov;
1349
1350 iov.iov_base = xstateregs;
1351 iov.iov_len = sizeof (xstateregs);
1352
1353 /* Check if PTRACE_GETREGSET works. */
1354 if (ptrace (PTRACE_GETREGSET, tid,
1355 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1356 have_ptrace_getregset = 0;
1357 else
1358 {
1359 have_ptrace_getregset = 1;
1360
1361 /* Get XCR0 from XSAVE extended state. */
1362 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1363 / sizeof (uint64_t))];
1364
1365 /* Use PTRACE_GETREGSET if it is available. */
1366 for (regset = x86_regsets;
1367 regset->fill_function != NULL; regset++)
1368 if (regset->get_request == PTRACE_GETREGSET)
1369 regset->size = X86_XSTATE_SIZE (xcr0);
1370 else if (regset->type != GENERAL_REGS)
1371 regset->size = 0;
1372 }
1373 }
1374
1375 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1376 xcr0_features = (have_ptrace_getregset
1377 && (xcr0 & X86_XSTATE_ALL_MASK));
1378
1379 if (xcr0_features)
1380 x86_xcr0 = xcr0;
1381
1382 if (machine == EM_X86_64)
1383 {
1384 #ifdef __x86_64__
1385 if (is_elf64)
1386 {
1387 if (xcr0_features)
1388 {
1389 switch (xcr0 & X86_XSTATE_ALL_MASK)
1390 {
1391 case X86_XSTATE_AVX512_MASK:
1392 return tdesc_amd64_avx512_linux;
1393
1394 case X86_XSTATE_MPX_MASK:
1395 return tdesc_amd64_mpx_linux;
1396
1397 case X86_XSTATE_AVX_MASK:
1398 return tdesc_amd64_avx_linux;
1399
1400 default:
1401 return tdesc_amd64_linux;
1402 }
1403 }
1404 else
1405 return tdesc_amd64_linux;
1406 }
1407 else
1408 {
1409 if (xcr0_features)
1410 {
1411 switch (xcr0 & X86_XSTATE_ALL_MASK)
1412 {
1413 case X86_XSTATE_AVX512_MASK:
1414 return tdesc_x32_avx512_linux;
1415
1416 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
1417 case X86_XSTATE_AVX_MASK:
1418 return tdesc_x32_avx_linux;
1419
1420 default:
1421 return tdesc_x32_linux;
1422 }
1423 }
1424 else
1425 return tdesc_x32_linux;
1426 }
1427 #endif
1428 }
1429 else
1430 {
1431 if (xcr0_features)
1432 {
1433 switch (xcr0 & X86_XSTATE_ALL_MASK)
1434 {
1435 case (X86_XSTATE_AVX512_MASK):
1436 return tdesc_i386_avx512_linux;
1437
1438 case (X86_XSTATE_MPX_MASK):
1439 return tdesc_i386_mpx_linux;
1440
1441 case (X86_XSTATE_AVX_MASK):
1442 return tdesc_i386_avx_linux;
1443
1444 default:
1445 return tdesc_i386_linux;
1446 }
1447 }
1448 else
1449 return tdesc_i386_linux;
1450 }
1451
1452 gdb_assert_not_reached ("failed to return tdesc");
1453 }
1454
1455 /* Callback for find_inferior. Stops iteration when a thread with a
1456 given PID is found. */
1457
1458 static int
1459 same_process_callback (struct inferior_list_entry *entry, void *data)
1460 {
1461 int pid = *(int *) data;
1462
1463 return (ptid_get_pid (entry->id) == pid);
1464 }
1465
1466 /* Callback for for_each_inferior. Calls the arch_setup routine for
1467 each process. */
1468
1469 static void
1470 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1471 {
1472 int pid = ptid_get_pid (entry->id);
1473
1474 /* Look up any thread of this processes. */
1475 current_thread
1476 = (struct thread_info *) find_inferior (&all_threads,
1477 same_process_callback, &pid);
1478
1479 the_low_target.arch_setup ();
1480 }
1481
1482 /* Update all the target description of all processes; a new GDB
1483 connected, and it may or not support xml target descriptions. */
1484
1485 static void
1486 x86_linux_update_xmltarget (void)
1487 {
1488 struct thread_info *saved_thread = current_thread;
1489
1490 /* Before changing the register cache's internal layout, flush the
1491 contents of the current valid caches back to the threads, and
1492 release the current regcache objects. */
1493 regcache_release ();
1494
1495 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1496
1497 current_thread = saved_thread;
1498 }
1499
1500 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1501 PTRACE_GETREGSET. */
1502
1503 static void
1504 x86_linux_process_qsupported (const char *query)
1505 {
1506 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1507 with "i386" in qSupported query, it supports x86 XML target
1508 descriptions. */
1509 use_xml = 0;
1510 if (query != NULL && startswith (query, "xmlRegisters="))
1511 {
1512 char *copy = xstrdup (query + 13);
1513 char *p;
1514
1515 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1516 {
1517 if (strcmp (p, "i386") == 0)
1518 {
1519 use_xml = 1;
1520 break;
1521 }
1522 }
1523
1524 free (copy);
1525 }
1526
1527 x86_linux_update_xmltarget ();
1528 }
1529
1530 /* Common for x86/x86-64. */
1531
1532 static struct regsets_info x86_regsets_info =
1533 {
1534 x86_regsets, /* regsets */
1535 0, /* num_regsets */
1536 NULL, /* disabled_regsets */
1537 };
1538
1539 #ifdef __x86_64__
1540 static struct regs_info amd64_linux_regs_info =
1541 {
1542 NULL, /* regset_bitmap */
1543 NULL, /* usrregs_info */
1544 &x86_regsets_info
1545 };
1546 #endif
1547 static struct usrregs_info i386_linux_usrregs_info =
1548 {
1549 I386_NUM_REGS,
1550 i386_regmap,
1551 };
1552
1553 static struct regs_info i386_linux_regs_info =
1554 {
1555 NULL, /* regset_bitmap */
1556 &i386_linux_usrregs_info,
1557 &x86_regsets_info
1558 };
1559
1560 const struct regs_info *
1561 x86_linux_regs_info (void)
1562 {
1563 #ifdef __x86_64__
1564 if (is_64bit_tdesc ())
1565 return &amd64_linux_regs_info;
1566 else
1567 #endif
1568 return &i386_linux_regs_info;
1569 }
1570
1571 /* Initialize the target description for the architecture of the
1572 inferior. */
1573
1574 static void
1575 x86_arch_setup (void)
1576 {
1577 current_process ()->tdesc = x86_linux_read_description ();
1578 }
1579
1580 static int
1581 x86_supports_tracepoints (void)
1582 {
1583 return 1;
1584 }
1585
1586 static void
1587 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1588 {
1589 write_inferior_memory (*to, buf, len);
1590 *to += len;
1591 }
1592
1593 static int
1594 push_opcode (unsigned char *buf, char *op)
1595 {
1596 unsigned char *buf_org = buf;
1597
1598 while (1)
1599 {
1600 char *endptr;
1601 unsigned long ul = strtoul (op, &endptr, 16);
1602
1603 if (endptr == op)
1604 break;
1605
1606 *buf++ = ul;
1607 op = endptr;
1608 }
1609
1610 return buf - buf_org;
1611 }
1612
1613 #ifdef __x86_64__
1614
1615 /* Build a jump pad that saves registers and calls a collection
1616 function. Writes a jump instruction to the jump pad to
1617 JJUMPAD_INSN. The caller is responsible to write it in at the
1618 tracepoint address. */
1619
1620 static int
1621 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1622 CORE_ADDR collector,
1623 CORE_ADDR lockaddr,
1624 ULONGEST orig_size,
1625 CORE_ADDR *jump_entry,
1626 CORE_ADDR *trampoline,
1627 ULONGEST *trampoline_size,
1628 unsigned char *jjump_pad_insn,
1629 ULONGEST *jjump_pad_insn_size,
1630 CORE_ADDR *adjusted_insn_addr,
1631 CORE_ADDR *adjusted_insn_addr_end,
1632 char *err)
1633 {
1634 unsigned char buf[40];
1635 int i, offset;
1636 int64_t loffset;
1637
1638 CORE_ADDR buildaddr = *jump_entry;
1639
1640 /* Build the jump pad. */
1641
1642 /* First, do tracepoint data collection. Save registers. */
1643 i = 0;
1644 /* Need to ensure stack pointer saved first. */
1645 buf[i++] = 0x54; /* push %rsp */
1646 buf[i++] = 0x55; /* push %rbp */
1647 buf[i++] = 0x57; /* push %rdi */
1648 buf[i++] = 0x56; /* push %rsi */
1649 buf[i++] = 0x52; /* push %rdx */
1650 buf[i++] = 0x51; /* push %rcx */
1651 buf[i++] = 0x53; /* push %rbx */
1652 buf[i++] = 0x50; /* push %rax */
1653 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1654 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1655 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1656 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1657 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1658 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1659 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1660 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1661 buf[i++] = 0x9c; /* pushfq */
1662 buf[i++] = 0x48; /* movl <addr>,%rdi */
1663 buf[i++] = 0xbf;
1664 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1665 i += sizeof (unsigned long);
1666 buf[i++] = 0x57; /* push %rdi */
1667 append_insns (&buildaddr, i, buf);
1668
1669 /* Stack space for the collecting_t object. */
1670 i = 0;
1671 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1672 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1673 memcpy (buf + i, &tpoint, 8);
1674 i += 8;
1675 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1676 i += push_opcode (&buf[i],
1677 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1678 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1679 append_insns (&buildaddr, i, buf);
1680
1681 /* spin-lock. */
1682 i = 0;
1683 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1684 memcpy (&buf[i], (void *) &lockaddr, 8);
1685 i += 8;
1686 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1687 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1688 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1689 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1690 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1691 append_insns (&buildaddr, i, buf);
1692
1693 /* Set up the gdb_collect call. */
1694 /* At this point, (stack pointer + 0x18) is the base of our saved
1695 register block. */
1696
1697 i = 0;
1698 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1699 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1700
1701 /* tpoint address may be 64-bit wide. */
1702 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1703 memcpy (buf + i, &tpoint, 8);
1704 i += 8;
1705 append_insns (&buildaddr, i, buf);
1706
1707 /* The collector function being in the shared library, may be
1708 >31-bits away off the jump pad. */
1709 i = 0;
1710 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1711 memcpy (buf + i, &collector, 8);
1712 i += 8;
1713 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1714 append_insns (&buildaddr, i, buf);
1715
1716 /* Clear the spin-lock. */
1717 i = 0;
1718 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1719 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1720 memcpy (buf + i, &lockaddr, 8);
1721 i += 8;
1722 append_insns (&buildaddr, i, buf);
1723
1724 /* Remove stack that had been used for the collect_t object. */
1725 i = 0;
1726 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1727 append_insns (&buildaddr, i, buf);
1728
1729 /* Restore register state. */
1730 i = 0;
1731 buf[i++] = 0x48; /* add $0x8,%rsp */
1732 buf[i++] = 0x83;
1733 buf[i++] = 0xc4;
1734 buf[i++] = 0x08;
1735 buf[i++] = 0x9d; /* popfq */
1736 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1737 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1738 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1739 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1740 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1741 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1742 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1743 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1744 buf[i++] = 0x58; /* pop %rax */
1745 buf[i++] = 0x5b; /* pop %rbx */
1746 buf[i++] = 0x59; /* pop %rcx */
1747 buf[i++] = 0x5a; /* pop %rdx */
1748 buf[i++] = 0x5e; /* pop %rsi */
1749 buf[i++] = 0x5f; /* pop %rdi */
1750 buf[i++] = 0x5d; /* pop %rbp */
1751 buf[i++] = 0x5c; /* pop %rsp */
1752 append_insns (&buildaddr, i, buf);
1753
1754 /* Now, adjust the original instruction to execute in the jump
1755 pad. */
1756 *adjusted_insn_addr = buildaddr;
1757 relocate_instruction (&buildaddr, tpaddr);
1758 *adjusted_insn_addr_end = buildaddr;
1759
1760 /* Finally, write a jump back to the program. */
1761
1762 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1763 if (loffset > INT_MAX || loffset < INT_MIN)
1764 {
1765 sprintf (err,
1766 "E.Jump back from jump pad too far from tracepoint "
1767 "(offset 0x%" PRIx64 " > int32).", loffset);
1768 return 1;
1769 }
1770
1771 offset = (int) loffset;
1772 memcpy (buf, jump_insn, sizeof (jump_insn));
1773 memcpy (buf + 1, &offset, 4);
1774 append_insns (&buildaddr, sizeof (jump_insn), buf);
1775
1776 /* The jump pad is now built. Wire in a jump to our jump pad. This
1777 is always done last (by our caller actually), so that we can
1778 install fast tracepoints with threads running. This relies on
1779 the agent's atomic write support. */
1780 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1781 if (loffset > INT_MAX || loffset < INT_MIN)
1782 {
1783 sprintf (err,
1784 "E.Jump pad too far from tracepoint "
1785 "(offset 0x%" PRIx64 " > int32).", loffset);
1786 return 1;
1787 }
1788
1789 offset = (int) loffset;
1790
1791 memcpy (buf, jump_insn, sizeof (jump_insn));
1792 memcpy (buf + 1, &offset, 4);
1793 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1794 *jjump_pad_insn_size = sizeof (jump_insn);
1795
1796 /* Return the end address of our pad. */
1797 *jump_entry = buildaddr;
1798
1799 return 0;
1800 }
1801
1802 #endif /* __x86_64__ */
1803
1804 /* Build a jump pad that saves registers and calls a collection
1805 function. Writes a jump instruction to the jump pad to
1806 JJUMPAD_INSN. The caller is responsible to write it in at the
1807 tracepoint address. */
1808
1809 static int
1810 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1811 CORE_ADDR collector,
1812 CORE_ADDR lockaddr,
1813 ULONGEST orig_size,
1814 CORE_ADDR *jump_entry,
1815 CORE_ADDR *trampoline,
1816 ULONGEST *trampoline_size,
1817 unsigned char *jjump_pad_insn,
1818 ULONGEST *jjump_pad_insn_size,
1819 CORE_ADDR *adjusted_insn_addr,
1820 CORE_ADDR *adjusted_insn_addr_end,
1821 char *err)
1822 {
1823 unsigned char buf[0x100];
1824 int i, offset;
1825 CORE_ADDR buildaddr = *jump_entry;
1826
1827 /* Build the jump pad. */
1828
1829 /* First, do tracepoint data collection. Save registers. */
1830 i = 0;
1831 buf[i++] = 0x60; /* pushad */
1832 buf[i++] = 0x68; /* push tpaddr aka $pc */
1833 *((int *)(buf + i)) = (int) tpaddr;
1834 i += 4;
1835 buf[i++] = 0x9c; /* pushf */
1836 buf[i++] = 0x1e; /* push %ds */
1837 buf[i++] = 0x06; /* push %es */
1838 buf[i++] = 0x0f; /* push %fs */
1839 buf[i++] = 0xa0;
1840 buf[i++] = 0x0f; /* push %gs */
1841 buf[i++] = 0xa8;
1842 buf[i++] = 0x16; /* push %ss */
1843 buf[i++] = 0x0e; /* push %cs */
1844 append_insns (&buildaddr, i, buf);
1845
1846 /* Stack space for the collecting_t object. */
1847 i = 0;
1848 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1849
1850 /* Build the object. */
1851 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1852 memcpy (buf + i, &tpoint, 4);
1853 i += 4;
1854 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1855
1856 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1857 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1858 append_insns (&buildaddr, i, buf);
1859
1860 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1861 If we cared for it, this could be using xchg alternatively. */
1862
1863 i = 0;
1864 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1865 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1866 %esp,<lockaddr> */
1867 memcpy (&buf[i], (void *) &lockaddr, 4);
1868 i += 4;
1869 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1870 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1871 append_insns (&buildaddr, i, buf);
1872
1873
1874 /* Set up arguments to the gdb_collect call. */
1875 i = 0;
1876 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1877 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1878 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1879 append_insns (&buildaddr, i, buf);
1880
1881 i = 0;
1882 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1883 append_insns (&buildaddr, i, buf);
1884
1885 i = 0;
1886 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1887 memcpy (&buf[i], (void *) &tpoint, 4);
1888 i += 4;
1889 append_insns (&buildaddr, i, buf);
1890
1891 buf[0] = 0xe8; /* call <reladdr> */
1892 offset = collector - (buildaddr + sizeof (jump_insn));
1893 memcpy (buf + 1, &offset, 4);
1894 append_insns (&buildaddr, 5, buf);
1895 /* Clean up after the call. */
1896 buf[0] = 0x83; /* add $0x8,%esp */
1897 buf[1] = 0xc4;
1898 buf[2] = 0x08;
1899 append_insns (&buildaddr, 3, buf);
1900
1901
1902 /* Clear the spin-lock. This would need the LOCK prefix on older
1903 broken archs. */
1904 i = 0;
1905 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1906 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1907 memcpy (buf + i, &lockaddr, 4);
1908 i += 4;
1909 append_insns (&buildaddr, i, buf);
1910
1911
1912 /* Remove stack that had been used for the collect_t object. */
1913 i = 0;
1914 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1915 append_insns (&buildaddr, i, buf);
1916
1917 i = 0;
1918 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1919 buf[i++] = 0xc4;
1920 buf[i++] = 0x04;
1921 buf[i++] = 0x17; /* pop %ss */
1922 buf[i++] = 0x0f; /* pop %gs */
1923 buf[i++] = 0xa9;
1924 buf[i++] = 0x0f; /* pop %fs */
1925 buf[i++] = 0xa1;
1926 buf[i++] = 0x07; /* pop %es */
1927 buf[i++] = 0x1f; /* pop %ds */
1928 buf[i++] = 0x9d; /* popf */
1929 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1930 buf[i++] = 0xc4;
1931 buf[i++] = 0x04;
1932 buf[i++] = 0x61; /* popad */
1933 append_insns (&buildaddr, i, buf);
1934
1935 /* Now, adjust the original instruction to execute in the jump
1936 pad. */
1937 *adjusted_insn_addr = buildaddr;
1938 relocate_instruction (&buildaddr, tpaddr);
1939 *adjusted_insn_addr_end = buildaddr;
1940
1941 /* Write the jump back to the program. */
1942 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1943 memcpy (buf, jump_insn, sizeof (jump_insn));
1944 memcpy (buf + 1, &offset, 4);
1945 append_insns (&buildaddr, sizeof (jump_insn), buf);
1946
1947 /* The jump pad is now built. Wire in a jump to our jump pad. This
1948 is always done last (by our caller actually), so that we can
1949 install fast tracepoints with threads running. This relies on
1950 the agent's atomic write support. */
1951 if (orig_size == 4)
1952 {
1953 /* Create a trampoline. */
1954 *trampoline_size = sizeof (jump_insn);
1955 if (!claim_trampoline_space (*trampoline_size, trampoline))
1956 {
1957 /* No trampoline space available. */
1958 strcpy (err,
1959 "E.Cannot allocate trampoline space needed for fast "
1960 "tracepoints on 4-byte instructions.");
1961 return 1;
1962 }
1963
1964 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1965 memcpy (buf, jump_insn, sizeof (jump_insn));
1966 memcpy (buf + 1, &offset, 4);
1967 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1968
1969 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1970 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1971 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1972 memcpy (buf + 2, &offset, 2);
1973 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1974 *jjump_pad_insn_size = sizeof (small_jump_insn);
1975 }
1976 else
1977 {
1978 /* Else use a 32-bit relative jump instruction. */
1979 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1980 memcpy (buf, jump_insn, sizeof (jump_insn));
1981 memcpy (buf + 1, &offset, 4);
1982 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1983 *jjump_pad_insn_size = sizeof (jump_insn);
1984 }
1985
1986 /* Return the end address of our pad. */
1987 *jump_entry = buildaddr;
1988
1989 return 0;
1990 }
1991
1992 static int
1993 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1994 CORE_ADDR collector,
1995 CORE_ADDR lockaddr,
1996 ULONGEST orig_size,
1997 CORE_ADDR *jump_entry,
1998 CORE_ADDR *trampoline,
1999 ULONGEST *trampoline_size,
2000 unsigned char *jjump_pad_insn,
2001 ULONGEST *jjump_pad_insn_size,
2002 CORE_ADDR *adjusted_insn_addr,
2003 CORE_ADDR *adjusted_insn_addr_end,
2004 char *err)
2005 {
2006 #ifdef __x86_64__
2007 if (is_64bit_tdesc ())
2008 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2009 collector, lockaddr,
2010 orig_size, jump_entry,
2011 trampoline, trampoline_size,
2012 jjump_pad_insn,
2013 jjump_pad_insn_size,
2014 adjusted_insn_addr,
2015 adjusted_insn_addr_end,
2016 err);
2017 #endif
2018
2019 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2020 collector, lockaddr,
2021 orig_size, jump_entry,
2022 trampoline, trampoline_size,
2023 jjump_pad_insn,
2024 jjump_pad_insn_size,
2025 adjusted_insn_addr,
2026 adjusted_insn_addr_end,
2027 err);
2028 }
2029
2030 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2031 architectures. */
2032
2033 static int
2034 x86_get_min_fast_tracepoint_insn_len (void)
2035 {
2036 static int warned_about_fast_tracepoints = 0;
2037
2038 #ifdef __x86_64__
2039 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2040 used for fast tracepoints. */
2041 if (is_64bit_tdesc ())
2042 return 5;
2043 #endif
2044
2045 if (agent_loaded_p ())
2046 {
2047 char errbuf[IPA_BUFSIZ];
2048
2049 errbuf[0] = '\0';
2050
2051 /* On x86, if trampolines are available, then 4-byte jump instructions
2052 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2053 with a 4-byte offset are used instead. */
2054 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2055 return 4;
2056 else
2057 {
2058 /* GDB has no channel to explain to user why a shorter fast
2059 tracepoint is not possible, but at least make GDBserver
2060 mention that something has gone awry. */
2061 if (!warned_about_fast_tracepoints)
2062 {
2063 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2064 warned_about_fast_tracepoints = 1;
2065 }
2066 return 5;
2067 }
2068 }
2069 else
2070 {
2071 /* Indicate that the minimum length is currently unknown since the IPA
2072 has not loaded yet. */
2073 return 0;
2074 }
2075 }
2076
2077 static void
2078 add_insns (unsigned char *start, int len)
2079 {
2080 CORE_ADDR buildaddr = current_insn_ptr;
2081
2082 if (debug_threads)
2083 debug_printf ("Adding %d bytes of insn at %s\n",
2084 len, paddress (buildaddr));
2085
2086 append_insns (&buildaddr, len, start);
2087 current_insn_ptr = buildaddr;
2088 }
2089
2090 /* Our general strategy for emitting code is to avoid specifying raw
2091 bytes whenever possible, and instead copy a block of inline asm
2092 that is embedded in the function. This is a little messy, because
2093 we need to keep the compiler from discarding what looks like dead
2094 code, plus suppress various warnings. */
2095
2096 #define EMIT_ASM(NAME, INSNS) \
2097 do \
2098 { \
2099 extern unsigned char start_ ## NAME, end_ ## NAME; \
2100 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2101 __asm__ ("jmp end_" #NAME "\n" \
2102 "\t" "start_" #NAME ":" \
2103 "\t" INSNS "\n" \
2104 "\t" "end_" #NAME ":"); \
2105 } while (0)
2106
2107 #ifdef __x86_64__
2108
2109 #define EMIT_ASM32(NAME,INSNS) \
2110 do \
2111 { \
2112 extern unsigned char start_ ## NAME, end_ ## NAME; \
2113 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2114 __asm__ (".code32\n" \
2115 "\t" "jmp end_" #NAME "\n" \
2116 "\t" "start_" #NAME ":\n" \
2117 "\t" INSNS "\n" \
2118 "\t" "end_" #NAME ":\n" \
2119 ".code64\n"); \
2120 } while (0)
2121
2122 #else
2123
2124 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2125
2126 #endif
2127
2128 #ifdef __x86_64__
2129
2130 static void
2131 amd64_emit_prologue (void)
2132 {
2133 EMIT_ASM (amd64_prologue,
2134 "pushq %rbp\n\t"
2135 "movq %rsp,%rbp\n\t"
2136 "sub $0x20,%rsp\n\t"
2137 "movq %rdi,-8(%rbp)\n\t"
2138 "movq %rsi,-16(%rbp)");
2139 }
2140
2141
2142 static void
2143 amd64_emit_epilogue (void)
2144 {
2145 EMIT_ASM (amd64_epilogue,
2146 "movq -16(%rbp),%rdi\n\t"
2147 "movq %rax,(%rdi)\n\t"
2148 "xor %rax,%rax\n\t"
2149 "leave\n\t"
2150 "ret");
2151 }
2152
2153 static void
2154 amd64_emit_add (void)
2155 {
2156 EMIT_ASM (amd64_add,
2157 "add (%rsp),%rax\n\t"
2158 "lea 0x8(%rsp),%rsp");
2159 }
2160
2161 static void
2162 amd64_emit_sub (void)
2163 {
2164 EMIT_ASM (amd64_sub,
2165 "sub %rax,(%rsp)\n\t"
2166 "pop %rax");
2167 }
2168
2169 static void
2170 amd64_emit_mul (void)
2171 {
2172 emit_error = 1;
2173 }
2174
2175 static void
2176 amd64_emit_lsh (void)
2177 {
2178 emit_error = 1;
2179 }
2180
2181 static void
2182 amd64_emit_rsh_signed (void)
2183 {
2184 emit_error = 1;
2185 }
2186
2187 static void
2188 amd64_emit_rsh_unsigned (void)
2189 {
2190 emit_error = 1;
2191 }
2192
2193 static void
2194 amd64_emit_ext (int arg)
2195 {
2196 switch (arg)
2197 {
2198 case 8:
2199 EMIT_ASM (amd64_ext_8,
2200 "cbtw\n\t"
2201 "cwtl\n\t"
2202 "cltq");
2203 break;
2204 case 16:
2205 EMIT_ASM (amd64_ext_16,
2206 "cwtl\n\t"
2207 "cltq");
2208 break;
2209 case 32:
2210 EMIT_ASM (amd64_ext_32,
2211 "cltq");
2212 break;
2213 default:
2214 emit_error = 1;
2215 }
2216 }
2217
2218 static void
2219 amd64_emit_log_not (void)
2220 {
2221 EMIT_ASM (amd64_log_not,
2222 "test %rax,%rax\n\t"
2223 "sete %cl\n\t"
2224 "movzbq %cl,%rax");
2225 }
2226
2227 static void
2228 amd64_emit_bit_and (void)
2229 {
2230 EMIT_ASM (amd64_and,
2231 "and (%rsp),%rax\n\t"
2232 "lea 0x8(%rsp),%rsp");
2233 }
2234
2235 static void
2236 amd64_emit_bit_or (void)
2237 {
2238 EMIT_ASM (amd64_or,
2239 "or (%rsp),%rax\n\t"
2240 "lea 0x8(%rsp),%rsp");
2241 }
2242
2243 static void
2244 amd64_emit_bit_xor (void)
2245 {
2246 EMIT_ASM (amd64_xor,
2247 "xor (%rsp),%rax\n\t"
2248 "lea 0x8(%rsp),%rsp");
2249 }
2250
2251 static void
2252 amd64_emit_bit_not (void)
2253 {
2254 EMIT_ASM (amd64_bit_not,
2255 "xorq $0xffffffffffffffff,%rax");
2256 }
2257
2258 static void
2259 amd64_emit_equal (void)
2260 {
2261 EMIT_ASM (amd64_equal,
2262 "cmp %rax,(%rsp)\n\t"
2263 "je .Lamd64_equal_true\n\t"
2264 "xor %rax,%rax\n\t"
2265 "jmp .Lamd64_equal_end\n\t"
2266 ".Lamd64_equal_true:\n\t"
2267 "mov $0x1,%rax\n\t"
2268 ".Lamd64_equal_end:\n\t"
2269 "lea 0x8(%rsp),%rsp");
2270 }
2271
2272 static void
2273 amd64_emit_less_signed (void)
2274 {
2275 EMIT_ASM (amd64_less_signed,
2276 "cmp %rax,(%rsp)\n\t"
2277 "jl .Lamd64_less_signed_true\n\t"
2278 "xor %rax,%rax\n\t"
2279 "jmp .Lamd64_less_signed_end\n\t"
2280 ".Lamd64_less_signed_true:\n\t"
2281 "mov $1,%rax\n\t"
2282 ".Lamd64_less_signed_end:\n\t"
2283 "lea 0x8(%rsp),%rsp");
2284 }
2285
2286 static void
2287 amd64_emit_less_unsigned (void)
2288 {
2289 EMIT_ASM (amd64_less_unsigned,
2290 "cmp %rax,(%rsp)\n\t"
2291 "jb .Lamd64_less_unsigned_true\n\t"
2292 "xor %rax,%rax\n\t"
2293 "jmp .Lamd64_less_unsigned_end\n\t"
2294 ".Lamd64_less_unsigned_true:\n\t"
2295 "mov $1,%rax\n\t"
2296 ".Lamd64_less_unsigned_end:\n\t"
2297 "lea 0x8(%rsp),%rsp");
2298 }
2299
2300 static void
2301 amd64_emit_ref (int size)
2302 {
2303 switch (size)
2304 {
2305 case 1:
2306 EMIT_ASM (amd64_ref1,
2307 "movb (%rax),%al");
2308 break;
2309 case 2:
2310 EMIT_ASM (amd64_ref2,
2311 "movw (%rax),%ax");
2312 break;
2313 case 4:
2314 EMIT_ASM (amd64_ref4,
2315 "movl (%rax),%eax");
2316 break;
2317 case 8:
2318 EMIT_ASM (amd64_ref8,
2319 "movq (%rax),%rax");
2320 break;
2321 }
2322 }
2323
2324 static void
2325 amd64_emit_if_goto (int *offset_p, int *size_p)
2326 {
2327 EMIT_ASM (amd64_if_goto,
2328 "mov %rax,%rcx\n\t"
2329 "pop %rax\n\t"
2330 "cmp $0,%rcx\n\t"
2331 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2332 if (offset_p)
2333 *offset_p = 10;
2334 if (size_p)
2335 *size_p = 4;
2336 }
2337
2338 static void
2339 amd64_emit_goto (int *offset_p, int *size_p)
2340 {
2341 EMIT_ASM (amd64_goto,
2342 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2343 if (offset_p)
2344 *offset_p = 1;
2345 if (size_p)
2346 *size_p = 4;
2347 }
2348
2349 static void
2350 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2351 {
2352 int diff = (to - (from + size));
2353 unsigned char buf[sizeof (int)];
2354
2355 if (size != 4)
2356 {
2357 emit_error = 1;
2358 return;
2359 }
2360
2361 memcpy (buf, &diff, sizeof (int));
2362 write_inferior_memory (from, buf, sizeof (int));
2363 }
2364
2365 static void
2366 amd64_emit_const (LONGEST num)
2367 {
2368 unsigned char buf[16];
2369 int i;
2370 CORE_ADDR buildaddr = current_insn_ptr;
2371
2372 i = 0;
2373 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
2374 memcpy (&buf[i], &num, sizeof (num));
2375 i += 8;
2376 append_insns (&buildaddr, i, buf);
2377 current_insn_ptr = buildaddr;
2378 }
2379
2380 static void
2381 amd64_emit_call (CORE_ADDR fn)
2382 {
2383 unsigned char buf[16];
2384 int i;
2385 CORE_ADDR buildaddr;
2386 LONGEST offset64;
2387
2388 /* The destination function being in the shared library, may be
2389 >31-bits away off the compiled code pad. */
2390
2391 buildaddr = current_insn_ptr;
2392
2393 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2394
2395 i = 0;
2396
2397 if (offset64 > INT_MAX || offset64 < INT_MIN)
2398 {
2399 /* Offset is too large for a call. Use callq, but that requires
2400 a register, so avoid it if possible. Use r10, since it is
2401 call-clobbered, we don't have to push/pop it. */
2402 buf[i++] = 0x48; /* mov $fn,%r10 */
2403 buf[i++] = 0xba;
2404 memcpy (buf + i, &fn, 8);
2405 i += 8;
2406 buf[i++] = 0xff; /* callq *%r10 */
2407 buf[i++] = 0xd2;
2408 }
2409 else
2410 {
2411 int offset32 = offset64; /* we know we can't overflow here. */
2412 memcpy (buf + i, &offset32, 4);
2413 i += 4;
2414 }
2415
2416 append_insns (&buildaddr, i, buf);
2417 current_insn_ptr = buildaddr;
2418 }
2419
2420 static void
2421 amd64_emit_reg (int reg)
2422 {
2423 unsigned char buf[16];
2424 int i;
2425 CORE_ADDR buildaddr;
2426
2427 /* Assume raw_regs is still in %rdi. */
2428 buildaddr = current_insn_ptr;
2429 i = 0;
2430 buf[i++] = 0xbe; /* mov $<n>,%esi */
2431 memcpy (&buf[i], &reg, sizeof (reg));
2432 i += 4;
2433 append_insns (&buildaddr, i, buf);
2434 current_insn_ptr = buildaddr;
2435 amd64_emit_call (get_raw_reg_func_addr ());
2436 }
2437
2438 static void
2439 amd64_emit_pop (void)
2440 {
2441 EMIT_ASM (amd64_pop,
2442 "pop %rax");
2443 }
2444
2445 static void
2446 amd64_emit_stack_flush (void)
2447 {
2448 EMIT_ASM (amd64_stack_flush,
2449 "push %rax");
2450 }
2451
2452 static void
2453 amd64_emit_zero_ext (int arg)
2454 {
2455 switch (arg)
2456 {
2457 case 8:
2458 EMIT_ASM (amd64_zero_ext_8,
2459 "and $0xff,%rax");
2460 break;
2461 case 16:
2462 EMIT_ASM (amd64_zero_ext_16,
2463 "and $0xffff,%rax");
2464 break;
2465 case 32:
2466 EMIT_ASM (amd64_zero_ext_32,
2467 "mov $0xffffffff,%rcx\n\t"
2468 "and %rcx,%rax");
2469 break;
2470 default:
2471 emit_error = 1;
2472 }
2473 }
2474
2475 static void
2476 amd64_emit_swap (void)
2477 {
2478 EMIT_ASM (amd64_swap,
2479 "mov %rax,%rcx\n\t"
2480 "pop %rax\n\t"
2481 "push %rcx");
2482 }
2483
2484 static void
2485 amd64_emit_stack_adjust (int n)
2486 {
2487 unsigned char buf[16];
2488 int i;
2489 CORE_ADDR buildaddr = current_insn_ptr;
2490
2491 i = 0;
2492 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2493 buf[i++] = 0x8d;
2494 buf[i++] = 0x64;
2495 buf[i++] = 0x24;
2496 /* This only handles adjustments up to 16, but we don't expect any more. */
2497 buf[i++] = n * 8;
2498 append_insns (&buildaddr, i, buf);
2499 current_insn_ptr = buildaddr;
2500 }
2501
2502 /* FN's prototype is `LONGEST(*fn)(int)'. */
2503
2504 static void
2505 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2506 {
2507 unsigned char buf[16];
2508 int i;
2509 CORE_ADDR buildaddr;
2510
2511 buildaddr = current_insn_ptr;
2512 i = 0;
2513 buf[i++] = 0xbf; /* movl $<n>,%edi */
2514 memcpy (&buf[i], &arg1, sizeof (arg1));
2515 i += 4;
2516 append_insns (&buildaddr, i, buf);
2517 current_insn_ptr = buildaddr;
2518 amd64_emit_call (fn);
2519 }
2520
2521 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2522
2523 static void
2524 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2525 {
2526 unsigned char buf[16];
2527 int i;
2528 CORE_ADDR buildaddr;
2529
2530 buildaddr = current_insn_ptr;
2531 i = 0;
2532 buf[i++] = 0xbf; /* movl $<n>,%edi */
2533 memcpy (&buf[i], &arg1, sizeof (arg1));
2534 i += 4;
2535 append_insns (&buildaddr, i, buf);
2536 current_insn_ptr = buildaddr;
2537 EMIT_ASM (amd64_void_call_2_a,
2538 /* Save away a copy of the stack top. */
2539 "push %rax\n\t"
2540 /* Also pass top as the second argument. */
2541 "mov %rax,%rsi");
2542 amd64_emit_call (fn);
2543 EMIT_ASM (amd64_void_call_2_b,
2544 /* Restore the stack top, %rax may have been trashed. */
2545 "pop %rax");
2546 }
2547
2548 void
2549 amd64_emit_eq_goto (int *offset_p, int *size_p)
2550 {
2551 EMIT_ASM (amd64_eq,
2552 "cmp %rax,(%rsp)\n\t"
2553 "jne .Lamd64_eq_fallthru\n\t"
2554 "lea 0x8(%rsp),%rsp\n\t"
2555 "pop %rax\n\t"
2556 /* jmp, but don't trust the assembler to choose the right jump */
2557 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2558 ".Lamd64_eq_fallthru:\n\t"
2559 "lea 0x8(%rsp),%rsp\n\t"
2560 "pop %rax");
2561
2562 if (offset_p)
2563 *offset_p = 13;
2564 if (size_p)
2565 *size_p = 4;
2566 }
2567
2568 void
2569 amd64_emit_ne_goto (int *offset_p, int *size_p)
2570 {
2571 EMIT_ASM (amd64_ne,
2572 "cmp %rax,(%rsp)\n\t"
2573 "je .Lamd64_ne_fallthru\n\t"
2574 "lea 0x8(%rsp),%rsp\n\t"
2575 "pop %rax\n\t"
2576 /* jmp, but don't trust the assembler to choose the right jump */
2577 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2578 ".Lamd64_ne_fallthru:\n\t"
2579 "lea 0x8(%rsp),%rsp\n\t"
2580 "pop %rax");
2581
2582 if (offset_p)
2583 *offset_p = 13;
2584 if (size_p)
2585 *size_p = 4;
2586 }
2587
2588 void
2589 amd64_emit_lt_goto (int *offset_p, int *size_p)
2590 {
2591 EMIT_ASM (amd64_lt,
2592 "cmp %rax,(%rsp)\n\t"
2593 "jnl .Lamd64_lt_fallthru\n\t"
2594 "lea 0x8(%rsp),%rsp\n\t"
2595 "pop %rax\n\t"
2596 /* jmp, but don't trust the assembler to choose the right jump */
2597 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2598 ".Lamd64_lt_fallthru:\n\t"
2599 "lea 0x8(%rsp),%rsp\n\t"
2600 "pop %rax");
2601
2602 if (offset_p)
2603 *offset_p = 13;
2604 if (size_p)
2605 *size_p = 4;
2606 }
2607
2608 void
2609 amd64_emit_le_goto (int *offset_p, int *size_p)
2610 {
2611 EMIT_ASM (amd64_le,
2612 "cmp %rax,(%rsp)\n\t"
2613 "jnle .Lamd64_le_fallthru\n\t"
2614 "lea 0x8(%rsp),%rsp\n\t"
2615 "pop %rax\n\t"
2616 /* jmp, but don't trust the assembler to choose the right jump */
2617 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2618 ".Lamd64_le_fallthru:\n\t"
2619 "lea 0x8(%rsp),%rsp\n\t"
2620 "pop %rax");
2621
2622 if (offset_p)
2623 *offset_p = 13;
2624 if (size_p)
2625 *size_p = 4;
2626 }
2627
2628 void
2629 amd64_emit_gt_goto (int *offset_p, int *size_p)
2630 {
2631 EMIT_ASM (amd64_gt,
2632 "cmp %rax,(%rsp)\n\t"
2633 "jng .Lamd64_gt_fallthru\n\t"
2634 "lea 0x8(%rsp),%rsp\n\t"
2635 "pop %rax\n\t"
2636 /* jmp, but don't trust the assembler to choose the right jump */
2637 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2638 ".Lamd64_gt_fallthru:\n\t"
2639 "lea 0x8(%rsp),%rsp\n\t"
2640 "pop %rax");
2641
2642 if (offset_p)
2643 *offset_p = 13;
2644 if (size_p)
2645 *size_p = 4;
2646 }
2647
2648 void
2649 amd64_emit_ge_goto (int *offset_p, int *size_p)
2650 {
2651 EMIT_ASM (amd64_ge,
2652 "cmp %rax,(%rsp)\n\t"
2653 "jnge .Lamd64_ge_fallthru\n\t"
2654 ".Lamd64_ge_jump:\n\t"
2655 "lea 0x8(%rsp),%rsp\n\t"
2656 "pop %rax\n\t"
2657 /* jmp, but don't trust the assembler to choose the right jump */
2658 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2659 ".Lamd64_ge_fallthru:\n\t"
2660 "lea 0x8(%rsp),%rsp\n\t"
2661 "pop %rax");
2662
2663 if (offset_p)
2664 *offset_p = 13;
2665 if (size_p)
2666 *size_p = 4;
2667 }
2668
2669 struct emit_ops amd64_emit_ops =
2670 {
2671 amd64_emit_prologue,
2672 amd64_emit_epilogue,
2673 amd64_emit_add,
2674 amd64_emit_sub,
2675 amd64_emit_mul,
2676 amd64_emit_lsh,
2677 amd64_emit_rsh_signed,
2678 amd64_emit_rsh_unsigned,
2679 amd64_emit_ext,
2680 amd64_emit_log_not,
2681 amd64_emit_bit_and,
2682 amd64_emit_bit_or,
2683 amd64_emit_bit_xor,
2684 amd64_emit_bit_not,
2685 amd64_emit_equal,
2686 amd64_emit_less_signed,
2687 amd64_emit_less_unsigned,
2688 amd64_emit_ref,
2689 amd64_emit_if_goto,
2690 amd64_emit_goto,
2691 amd64_write_goto_address,
2692 amd64_emit_const,
2693 amd64_emit_call,
2694 amd64_emit_reg,
2695 amd64_emit_pop,
2696 amd64_emit_stack_flush,
2697 amd64_emit_zero_ext,
2698 amd64_emit_swap,
2699 amd64_emit_stack_adjust,
2700 amd64_emit_int_call_1,
2701 amd64_emit_void_call_2,
2702 amd64_emit_eq_goto,
2703 amd64_emit_ne_goto,
2704 amd64_emit_lt_goto,
2705 amd64_emit_le_goto,
2706 amd64_emit_gt_goto,
2707 amd64_emit_ge_goto
2708 };
2709
2710 #endif /* __x86_64__ */
2711
2712 static void
2713 i386_emit_prologue (void)
2714 {
2715 EMIT_ASM32 (i386_prologue,
2716 "push %ebp\n\t"
2717 "mov %esp,%ebp\n\t"
2718 "push %ebx");
2719 /* At this point, the raw regs base address is at 8(%ebp), and the
2720 value pointer is at 12(%ebp). */
2721 }
2722
2723 static void
2724 i386_emit_epilogue (void)
2725 {
2726 EMIT_ASM32 (i386_epilogue,
2727 "mov 12(%ebp),%ecx\n\t"
2728 "mov %eax,(%ecx)\n\t"
2729 "mov %ebx,0x4(%ecx)\n\t"
2730 "xor %eax,%eax\n\t"
2731 "pop %ebx\n\t"
2732 "pop %ebp\n\t"
2733 "ret");
2734 }
2735
2736 static void
2737 i386_emit_add (void)
2738 {
2739 EMIT_ASM32 (i386_add,
2740 "add (%esp),%eax\n\t"
2741 "adc 0x4(%esp),%ebx\n\t"
2742 "lea 0x8(%esp),%esp");
2743 }
2744
2745 static void
2746 i386_emit_sub (void)
2747 {
2748 EMIT_ASM32 (i386_sub,
2749 "subl %eax,(%esp)\n\t"
2750 "sbbl %ebx,4(%esp)\n\t"
2751 "pop %eax\n\t"
2752 "pop %ebx\n\t");
2753 }
2754
2755 static void
2756 i386_emit_mul (void)
2757 {
2758 emit_error = 1;
2759 }
2760
2761 static void
2762 i386_emit_lsh (void)
2763 {
2764 emit_error = 1;
2765 }
2766
2767 static void
2768 i386_emit_rsh_signed (void)
2769 {
2770 emit_error = 1;
2771 }
2772
2773 static void
2774 i386_emit_rsh_unsigned (void)
2775 {
2776 emit_error = 1;
2777 }
2778
2779 static void
2780 i386_emit_ext (int arg)
2781 {
2782 switch (arg)
2783 {
2784 case 8:
2785 EMIT_ASM32 (i386_ext_8,
2786 "cbtw\n\t"
2787 "cwtl\n\t"
2788 "movl %eax,%ebx\n\t"
2789 "sarl $31,%ebx");
2790 break;
2791 case 16:
2792 EMIT_ASM32 (i386_ext_16,
2793 "cwtl\n\t"
2794 "movl %eax,%ebx\n\t"
2795 "sarl $31,%ebx");
2796 break;
2797 case 32:
2798 EMIT_ASM32 (i386_ext_32,
2799 "movl %eax,%ebx\n\t"
2800 "sarl $31,%ebx");
2801 break;
2802 default:
2803 emit_error = 1;
2804 }
2805 }
2806
2807 static void
2808 i386_emit_log_not (void)
2809 {
2810 EMIT_ASM32 (i386_log_not,
2811 "or %ebx,%eax\n\t"
2812 "test %eax,%eax\n\t"
2813 "sete %cl\n\t"
2814 "xor %ebx,%ebx\n\t"
2815 "movzbl %cl,%eax");
2816 }
2817
2818 static void
2819 i386_emit_bit_and (void)
2820 {
2821 EMIT_ASM32 (i386_and,
2822 "and (%esp),%eax\n\t"
2823 "and 0x4(%esp),%ebx\n\t"
2824 "lea 0x8(%esp),%esp");
2825 }
2826
2827 static void
2828 i386_emit_bit_or (void)
2829 {
2830 EMIT_ASM32 (i386_or,
2831 "or (%esp),%eax\n\t"
2832 "or 0x4(%esp),%ebx\n\t"
2833 "lea 0x8(%esp),%esp");
2834 }
2835
2836 static void
2837 i386_emit_bit_xor (void)
2838 {
2839 EMIT_ASM32 (i386_xor,
2840 "xor (%esp),%eax\n\t"
2841 "xor 0x4(%esp),%ebx\n\t"
2842 "lea 0x8(%esp),%esp");
2843 }
2844
2845 static void
2846 i386_emit_bit_not (void)
2847 {
2848 EMIT_ASM32 (i386_bit_not,
2849 "xor $0xffffffff,%eax\n\t"
2850 "xor $0xffffffff,%ebx\n\t");
2851 }
2852
2853 static void
2854 i386_emit_equal (void)
2855 {
2856 EMIT_ASM32 (i386_equal,
2857 "cmpl %ebx,4(%esp)\n\t"
2858 "jne .Li386_equal_false\n\t"
2859 "cmpl %eax,(%esp)\n\t"
2860 "je .Li386_equal_true\n\t"
2861 ".Li386_equal_false:\n\t"
2862 "xor %eax,%eax\n\t"
2863 "jmp .Li386_equal_end\n\t"
2864 ".Li386_equal_true:\n\t"
2865 "mov $1,%eax\n\t"
2866 ".Li386_equal_end:\n\t"
2867 "xor %ebx,%ebx\n\t"
2868 "lea 0x8(%esp),%esp");
2869 }
2870
2871 static void
2872 i386_emit_less_signed (void)
2873 {
2874 EMIT_ASM32 (i386_less_signed,
2875 "cmpl %ebx,4(%esp)\n\t"
2876 "jl .Li386_less_signed_true\n\t"
2877 "jne .Li386_less_signed_false\n\t"
2878 "cmpl %eax,(%esp)\n\t"
2879 "jl .Li386_less_signed_true\n\t"
2880 ".Li386_less_signed_false:\n\t"
2881 "xor %eax,%eax\n\t"
2882 "jmp .Li386_less_signed_end\n\t"
2883 ".Li386_less_signed_true:\n\t"
2884 "mov $1,%eax\n\t"
2885 ".Li386_less_signed_end:\n\t"
2886 "xor %ebx,%ebx\n\t"
2887 "lea 0x8(%esp),%esp");
2888 }
2889
2890 static void
2891 i386_emit_less_unsigned (void)
2892 {
2893 EMIT_ASM32 (i386_less_unsigned,
2894 "cmpl %ebx,4(%esp)\n\t"
2895 "jb .Li386_less_unsigned_true\n\t"
2896 "jne .Li386_less_unsigned_false\n\t"
2897 "cmpl %eax,(%esp)\n\t"
2898 "jb .Li386_less_unsigned_true\n\t"
2899 ".Li386_less_unsigned_false:\n\t"
2900 "xor %eax,%eax\n\t"
2901 "jmp .Li386_less_unsigned_end\n\t"
2902 ".Li386_less_unsigned_true:\n\t"
2903 "mov $1,%eax\n\t"
2904 ".Li386_less_unsigned_end:\n\t"
2905 "xor %ebx,%ebx\n\t"
2906 "lea 0x8(%esp),%esp");
2907 }
2908
2909 static void
2910 i386_emit_ref (int size)
2911 {
2912 switch (size)
2913 {
2914 case 1:
2915 EMIT_ASM32 (i386_ref1,
2916 "movb (%eax),%al");
2917 break;
2918 case 2:
2919 EMIT_ASM32 (i386_ref2,
2920 "movw (%eax),%ax");
2921 break;
2922 case 4:
2923 EMIT_ASM32 (i386_ref4,
2924 "movl (%eax),%eax");
2925 break;
2926 case 8:
2927 EMIT_ASM32 (i386_ref8,
2928 "movl 4(%eax),%ebx\n\t"
2929 "movl (%eax),%eax");
2930 break;
2931 }
2932 }
2933
2934 static void
2935 i386_emit_if_goto (int *offset_p, int *size_p)
2936 {
2937 EMIT_ASM32 (i386_if_goto,
2938 "mov %eax,%ecx\n\t"
2939 "or %ebx,%ecx\n\t"
2940 "pop %eax\n\t"
2941 "pop %ebx\n\t"
2942 "cmpl $0,%ecx\n\t"
2943 /* Don't trust the assembler to choose the right jump */
2944 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2945
2946 if (offset_p)
2947 *offset_p = 11; /* be sure that this matches the sequence above */
2948 if (size_p)
2949 *size_p = 4;
2950 }
2951
2952 static void
2953 i386_emit_goto (int *offset_p, int *size_p)
2954 {
2955 EMIT_ASM32 (i386_goto,
2956 /* Don't trust the assembler to choose the right jump */
2957 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2958 if (offset_p)
2959 *offset_p = 1;
2960 if (size_p)
2961 *size_p = 4;
2962 }
2963
2964 static void
2965 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2966 {
2967 int diff = (to - (from + size));
2968 unsigned char buf[sizeof (int)];
2969
2970 /* We're only doing 4-byte sizes at the moment. */
2971 if (size != 4)
2972 {
2973 emit_error = 1;
2974 return;
2975 }
2976
2977 memcpy (buf, &diff, sizeof (int));
2978 write_inferior_memory (from, buf, sizeof (int));
2979 }
2980
2981 static void
2982 i386_emit_const (LONGEST num)
2983 {
2984 unsigned char buf[16];
2985 int i, hi, lo;
2986 CORE_ADDR buildaddr = current_insn_ptr;
2987
2988 i = 0;
2989 buf[i++] = 0xb8; /* mov $<n>,%eax */
2990 lo = num & 0xffffffff;
2991 memcpy (&buf[i], &lo, sizeof (lo));
2992 i += 4;
2993 hi = ((num >> 32) & 0xffffffff);
2994 if (hi)
2995 {
2996 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2997 memcpy (&buf[i], &hi, sizeof (hi));
2998 i += 4;
2999 }
3000 else
3001 {
3002 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
3003 }
3004 append_insns (&buildaddr, i, buf);
3005 current_insn_ptr = buildaddr;
3006 }
3007
3008 static void
3009 i386_emit_call (CORE_ADDR fn)
3010 {
3011 unsigned char buf[16];
3012 int i, offset;
3013 CORE_ADDR buildaddr;
3014
3015 buildaddr = current_insn_ptr;
3016 i = 0;
3017 buf[i++] = 0xe8; /* call <reladdr> */
3018 offset = ((int) fn) - (buildaddr + 5);
3019 memcpy (buf + 1, &offset, 4);
3020 append_insns (&buildaddr, 5, buf);
3021 current_insn_ptr = buildaddr;
3022 }
3023
3024 static void
3025 i386_emit_reg (int reg)
3026 {
3027 unsigned char buf[16];
3028 int i;
3029 CORE_ADDR buildaddr;
3030
3031 EMIT_ASM32 (i386_reg_a,
3032 "sub $0x8,%esp");
3033 buildaddr = current_insn_ptr;
3034 i = 0;
3035 buf[i++] = 0xb8; /* mov $<n>,%eax */
3036 memcpy (&buf[i], &reg, sizeof (reg));
3037 i += 4;
3038 append_insns (&buildaddr, i, buf);
3039 current_insn_ptr = buildaddr;
3040 EMIT_ASM32 (i386_reg_b,
3041 "mov %eax,4(%esp)\n\t"
3042 "mov 8(%ebp),%eax\n\t"
3043 "mov %eax,(%esp)");
3044 i386_emit_call (get_raw_reg_func_addr ());
3045 EMIT_ASM32 (i386_reg_c,
3046 "xor %ebx,%ebx\n\t"
3047 "lea 0x8(%esp),%esp");
3048 }
3049
3050 static void
3051 i386_emit_pop (void)
3052 {
3053 EMIT_ASM32 (i386_pop,
3054 "pop %eax\n\t"
3055 "pop %ebx");
3056 }
3057
3058 static void
3059 i386_emit_stack_flush (void)
3060 {
3061 EMIT_ASM32 (i386_stack_flush,
3062 "push %ebx\n\t"
3063 "push %eax");
3064 }
3065
3066 static void
3067 i386_emit_zero_ext (int arg)
3068 {
3069 switch (arg)
3070 {
3071 case 8:
3072 EMIT_ASM32 (i386_zero_ext_8,
3073 "and $0xff,%eax\n\t"
3074 "xor %ebx,%ebx");
3075 break;
3076 case 16:
3077 EMIT_ASM32 (i386_zero_ext_16,
3078 "and $0xffff,%eax\n\t"
3079 "xor %ebx,%ebx");
3080 break;
3081 case 32:
3082 EMIT_ASM32 (i386_zero_ext_32,
3083 "xor %ebx,%ebx");
3084 break;
3085 default:
3086 emit_error = 1;
3087 }
3088 }
3089
3090 static void
3091 i386_emit_swap (void)
3092 {
3093 EMIT_ASM32 (i386_swap,
3094 "mov %eax,%ecx\n\t"
3095 "mov %ebx,%edx\n\t"
3096 "pop %eax\n\t"
3097 "pop %ebx\n\t"
3098 "push %edx\n\t"
3099 "push %ecx");
3100 }
3101
3102 static void
3103 i386_emit_stack_adjust (int n)
3104 {
3105 unsigned char buf[16];
3106 int i;
3107 CORE_ADDR buildaddr = current_insn_ptr;
3108
3109 i = 0;
3110 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3111 buf[i++] = 0x64;
3112 buf[i++] = 0x24;
3113 buf[i++] = n * 8;
3114 append_insns (&buildaddr, i, buf);
3115 current_insn_ptr = buildaddr;
3116 }
3117
3118 /* FN's prototype is `LONGEST(*fn)(int)'. */
3119
3120 static void
3121 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3122 {
3123 unsigned char buf[16];
3124 int i;
3125 CORE_ADDR buildaddr;
3126
3127 EMIT_ASM32 (i386_int_call_1_a,
3128 /* Reserve a bit of stack space. */
3129 "sub $0x8,%esp");
3130 /* Put the one argument on the stack. */
3131 buildaddr = current_insn_ptr;
3132 i = 0;
3133 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3134 buf[i++] = 0x04;
3135 buf[i++] = 0x24;
3136 memcpy (&buf[i], &arg1, sizeof (arg1));
3137 i += 4;
3138 append_insns (&buildaddr, i, buf);
3139 current_insn_ptr = buildaddr;
3140 i386_emit_call (fn);
3141 EMIT_ASM32 (i386_int_call_1_c,
3142 "mov %edx,%ebx\n\t"
3143 "lea 0x8(%esp),%esp");
3144 }
3145
3146 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3147
3148 static void
3149 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3150 {
3151 unsigned char buf[16];
3152 int i;
3153 CORE_ADDR buildaddr;
3154
3155 EMIT_ASM32 (i386_void_call_2_a,
3156 /* Preserve %eax only; we don't have to worry about %ebx. */
3157 "push %eax\n\t"
3158 /* Reserve a bit of stack space for arguments. */
3159 "sub $0x10,%esp\n\t"
3160 /* Copy "top" to the second argument position. (Note that
3161 we can't assume function won't scribble on its
3162 arguments, so don't try to restore from this.) */
3163 "mov %eax,4(%esp)\n\t"
3164 "mov %ebx,8(%esp)");
3165 /* Put the first argument on the stack. */
3166 buildaddr = current_insn_ptr;
3167 i = 0;
3168 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3169 buf[i++] = 0x04;
3170 buf[i++] = 0x24;
3171 memcpy (&buf[i], &arg1, sizeof (arg1));
3172 i += 4;
3173 append_insns (&buildaddr, i, buf);
3174 current_insn_ptr = buildaddr;
3175 i386_emit_call (fn);
3176 EMIT_ASM32 (i386_void_call_2_b,
3177 "lea 0x10(%esp),%esp\n\t"
3178 /* Restore original stack top. */
3179 "pop %eax");
3180 }
3181
3182
3183 void
3184 i386_emit_eq_goto (int *offset_p, int *size_p)
3185 {
3186 EMIT_ASM32 (eq,
3187 /* Check low half first, more likely to be decider */
3188 "cmpl %eax,(%esp)\n\t"
3189 "jne .Leq_fallthru\n\t"
3190 "cmpl %ebx,4(%esp)\n\t"
3191 "jne .Leq_fallthru\n\t"
3192 "lea 0x8(%esp),%esp\n\t"
3193 "pop %eax\n\t"
3194 "pop %ebx\n\t"
3195 /* jmp, but don't trust the assembler to choose the right jump */
3196 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3197 ".Leq_fallthru:\n\t"
3198 "lea 0x8(%esp),%esp\n\t"
3199 "pop %eax\n\t"
3200 "pop %ebx");
3201
3202 if (offset_p)
3203 *offset_p = 18;
3204 if (size_p)
3205 *size_p = 4;
3206 }
3207
3208 void
3209 i386_emit_ne_goto (int *offset_p, int *size_p)
3210 {
3211 EMIT_ASM32 (ne,
3212 /* Check low half first, more likely to be decider */
3213 "cmpl %eax,(%esp)\n\t"
3214 "jne .Lne_jump\n\t"
3215 "cmpl %ebx,4(%esp)\n\t"
3216 "je .Lne_fallthru\n\t"
3217 ".Lne_jump:\n\t"
3218 "lea 0x8(%esp),%esp\n\t"
3219 "pop %eax\n\t"
3220 "pop %ebx\n\t"
3221 /* jmp, but don't trust the assembler to choose the right jump */
3222 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3223 ".Lne_fallthru:\n\t"
3224 "lea 0x8(%esp),%esp\n\t"
3225 "pop %eax\n\t"
3226 "pop %ebx");
3227
3228 if (offset_p)
3229 *offset_p = 18;
3230 if (size_p)
3231 *size_p = 4;
3232 }
3233
3234 void
3235 i386_emit_lt_goto (int *offset_p, int *size_p)
3236 {
3237 EMIT_ASM32 (lt,
3238 "cmpl %ebx,4(%esp)\n\t"
3239 "jl .Llt_jump\n\t"
3240 "jne .Llt_fallthru\n\t"
3241 "cmpl %eax,(%esp)\n\t"
3242 "jnl .Llt_fallthru\n\t"
3243 ".Llt_jump:\n\t"
3244 "lea 0x8(%esp),%esp\n\t"
3245 "pop %eax\n\t"
3246 "pop %ebx\n\t"
3247 /* jmp, but don't trust the assembler to choose the right jump */
3248 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3249 ".Llt_fallthru:\n\t"
3250 "lea 0x8(%esp),%esp\n\t"
3251 "pop %eax\n\t"
3252 "pop %ebx");
3253
3254 if (offset_p)
3255 *offset_p = 20;
3256 if (size_p)
3257 *size_p = 4;
3258 }
3259
3260 void
3261 i386_emit_le_goto (int *offset_p, int *size_p)
3262 {
3263 EMIT_ASM32 (le,
3264 "cmpl %ebx,4(%esp)\n\t"
3265 "jle .Lle_jump\n\t"
3266 "jne .Lle_fallthru\n\t"
3267 "cmpl %eax,(%esp)\n\t"
3268 "jnle .Lle_fallthru\n\t"
3269 ".Lle_jump:\n\t"
3270 "lea 0x8(%esp),%esp\n\t"
3271 "pop %eax\n\t"
3272 "pop %ebx\n\t"
3273 /* jmp, but don't trust the assembler to choose the right jump */
3274 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3275 ".Lle_fallthru:\n\t"
3276 "lea 0x8(%esp),%esp\n\t"
3277 "pop %eax\n\t"
3278 "pop %ebx");
3279
3280 if (offset_p)
3281 *offset_p = 20;
3282 if (size_p)
3283 *size_p = 4;
3284 }
3285
3286 void
3287 i386_emit_gt_goto (int *offset_p, int *size_p)
3288 {
3289 EMIT_ASM32 (gt,
3290 "cmpl %ebx,4(%esp)\n\t"
3291 "jg .Lgt_jump\n\t"
3292 "jne .Lgt_fallthru\n\t"
3293 "cmpl %eax,(%esp)\n\t"
3294 "jng .Lgt_fallthru\n\t"
3295 ".Lgt_jump:\n\t"
3296 "lea 0x8(%esp),%esp\n\t"
3297 "pop %eax\n\t"
3298 "pop %ebx\n\t"
3299 /* jmp, but don't trust the assembler to choose the right jump */
3300 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3301 ".Lgt_fallthru:\n\t"
3302 "lea 0x8(%esp),%esp\n\t"
3303 "pop %eax\n\t"
3304 "pop %ebx");
3305
3306 if (offset_p)
3307 *offset_p = 20;
3308 if (size_p)
3309 *size_p = 4;
3310 }
3311
3312 void
3313 i386_emit_ge_goto (int *offset_p, int *size_p)
3314 {
3315 EMIT_ASM32 (ge,
3316 "cmpl %ebx,4(%esp)\n\t"
3317 "jge .Lge_jump\n\t"
3318 "jne .Lge_fallthru\n\t"
3319 "cmpl %eax,(%esp)\n\t"
3320 "jnge .Lge_fallthru\n\t"
3321 ".Lge_jump:\n\t"
3322 "lea 0x8(%esp),%esp\n\t"
3323 "pop %eax\n\t"
3324 "pop %ebx\n\t"
3325 /* jmp, but don't trust the assembler to choose the right jump */
3326 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3327 ".Lge_fallthru:\n\t"
3328 "lea 0x8(%esp),%esp\n\t"
3329 "pop %eax\n\t"
3330 "pop %ebx");
3331
3332 if (offset_p)
3333 *offset_p = 20;
3334 if (size_p)
3335 *size_p = 4;
3336 }
3337
3338 struct emit_ops i386_emit_ops =
3339 {
3340 i386_emit_prologue,
3341 i386_emit_epilogue,
3342 i386_emit_add,
3343 i386_emit_sub,
3344 i386_emit_mul,
3345 i386_emit_lsh,
3346 i386_emit_rsh_signed,
3347 i386_emit_rsh_unsigned,
3348 i386_emit_ext,
3349 i386_emit_log_not,
3350 i386_emit_bit_and,
3351 i386_emit_bit_or,
3352 i386_emit_bit_xor,
3353 i386_emit_bit_not,
3354 i386_emit_equal,
3355 i386_emit_less_signed,
3356 i386_emit_less_unsigned,
3357 i386_emit_ref,
3358 i386_emit_if_goto,
3359 i386_emit_goto,
3360 i386_write_goto_address,
3361 i386_emit_const,
3362 i386_emit_call,
3363 i386_emit_reg,
3364 i386_emit_pop,
3365 i386_emit_stack_flush,
3366 i386_emit_zero_ext,
3367 i386_emit_swap,
3368 i386_emit_stack_adjust,
3369 i386_emit_int_call_1,
3370 i386_emit_void_call_2,
3371 i386_emit_eq_goto,
3372 i386_emit_ne_goto,
3373 i386_emit_lt_goto,
3374 i386_emit_le_goto,
3375 i386_emit_gt_goto,
3376 i386_emit_ge_goto
3377 };
3378
3379
3380 static struct emit_ops *
3381 x86_emit_ops (void)
3382 {
3383 #ifdef __x86_64__
3384 if (is_64bit_tdesc ())
3385 return &amd64_emit_ops;
3386 else
3387 #endif
3388 return &i386_emit_ops;
3389 }
3390
3391 static int
3392 x86_supports_range_stepping (void)
3393 {
3394 return 1;
3395 }
3396
3397 /* This is initialized assuming an amd64 target.
3398 x86_arch_setup will correct it for i386 or amd64 targets. */
3399
3400 struct linux_target_ops the_low_target =
3401 {
3402 x86_arch_setup,
3403 x86_linux_regs_info,
3404 x86_cannot_fetch_register,
3405 x86_cannot_store_register,
3406 NULL, /* fetch_register */
3407 x86_get_pc,
3408 x86_set_pc,
3409 x86_breakpoint,
3410 x86_breakpoint_len,
3411 NULL,
3412 1,
3413 x86_breakpoint_at,
3414 x86_supports_z_point_type,
3415 x86_insert_point,
3416 x86_remove_point,
3417 x86_stopped_by_watchpoint,
3418 x86_stopped_data_address,
3419 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3420 native i386 case (no registers smaller than an xfer unit), and are not
3421 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3422 NULL,
3423 NULL,
3424 /* need to fix up i386 siginfo if host is amd64 */
3425 x86_siginfo_fixup,
3426 x86_linux_new_process,
3427 x86_linux_new_thread,
3428 x86_linux_prepare_to_resume,
3429 x86_linux_process_qsupported,
3430 x86_supports_tracepoints,
3431 x86_get_thread_area,
3432 x86_install_fast_tracepoint_jump_pad,
3433 x86_emit_ops,
3434 x86_get_min_fast_tracepoint_insn_len,
3435 x86_supports_range_stepping,
3436 };
3437
3438 void
3439 initialize_low_arch (void)
3440 {
3441 /* Initialize the Linux target descriptions. */
3442 #ifdef __x86_64__
3443 init_registers_amd64_linux ();
3444 init_registers_amd64_avx_linux ();
3445 init_registers_amd64_avx512_linux ();
3446 init_registers_amd64_mpx_linux ();
3447
3448 init_registers_x32_linux ();
3449 init_registers_x32_avx_linux ();
3450 init_registers_x32_avx512_linux ();
3451
3452 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3453 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3454 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3455 #endif
3456 init_registers_i386_linux ();
3457 init_registers_i386_mmx_linux ();
3458 init_registers_i386_avx_linux ();
3459 init_registers_i386_avx512_linux ();
3460 init_registers_i386_mpx_linux ();
3461
3462 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3463 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3464 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3465
3466 initialize_regsets_info (&x86_regsets_info);
3467 }