]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/gdbserver/linux-x86-low.c
3a10e612b204f9628bc08a283cb02245dc1eb219
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
1 /* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
3 Copyright (C) 2002-2015 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "server.h"
21 #include <signal.h>
22 #include <limits.h>
23 #include <inttypes.h>
24 #include "linux-low.h"
25 #include "i387-fp.h"
26 #include "x86-low.h"
27 #include "x86-xstate.h"
28
29 #include "gdb_proc_service.h"
30 /* Don't include elf/common.h if linux/elf.h got included by
31 gdb_proc_service.h. */
32 #ifndef ELFMAG0
33 #include "elf/common.h"
34 #endif
35
36 #include "agent.h"
37 #include "tdesc.h"
38 #include "tracepoint.h"
39 #include "ax.h"
40
41 #ifdef __x86_64__
42 /* Defined in auto-generated file amd64-linux.c. */
43 void init_registers_amd64_linux (void);
44 extern const struct target_desc *tdesc_amd64_linux;
45
46 /* Defined in auto-generated file amd64-avx-linux.c. */
47 void init_registers_amd64_avx_linux (void);
48 extern const struct target_desc *tdesc_amd64_avx_linux;
49
50 /* Defined in auto-generated file amd64-avx512-linux.c. */
51 void init_registers_amd64_avx512_linux (void);
52 extern const struct target_desc *tdesc_amd64_avx512_linux;
53
54 /* Defined in auto-generated file amd64-mpx-linux.c. */
55 void init_registers_amd64_mpx_linux (void);
56 extern const struct target_desc *tdesc_amd64_mpx_linux;
57
58 /* Defined in auto-generated file x32-linux.c. */
59 void init_registers_x32_linux (void);
60 extern const struct target_desc *tdesc_x32_linux;
61
62 /* Defined in auto-generated file x32-avx-linux.c. */
63 void init_registers_x32_avx_linux (void);
64 extern const struct target_desc *tdesc_x32_avx_linux;
65
66 /* Defined in auto-generated file x32-avx512-linux.c. */
67 void init_registers_x32_avx512_linux (void);
68 extern const struct target_desc *tdesc_x32_avx512_linux;
69
70 #endif
71
72 /* Defined in auto-generated file i386-linux.c. */
73 void init_registers_i386_linux (void);
74 extern const struct target_desc *tdesc_i386_linux;
75
76 /* Defined in auto-generated file i386-mmx-linux.c. */
77 void init_registers_i386_mmx_linux (void);
78 extern const struct target_desc *tdesc_i386_mmx_linux;
79
80 /* Defined in auto-generated file i386-avx-linux.c. */
81 void init_registers_i386_avx_linux (void);
82 extern const struct target_desc *tdesc_i386_avx_linux;
83
84 /* Defined in auto-generated file i386-avx512-linux.c. */
85 void init_registers_i386_avx512_linux (void);
86 extern const struct target_desc *tdesc_i386_avx512_linux;
87
88 /* Defined in auto-generated file i386-mpx-linux.c. */
89 void init_registers_i386_mpx_linux (void);
90 extern const struct target_desc *tdesc_i386_mpx_linux;
91
92 #ifdef __x86_64__
93 static struct target_desc *tdesc_amd64_linux_no_xml;
94 #endif
95 static struct target_desc *tdesc_i386_linux_no_xml;
96
97
98 static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
99 static unsigned char small_jump_insn[] = { 0x66, 0xe9, 0, 0 };
100
101 /* Backward compatibility for gdb without XML support. */
102
103 static const char *xmltarget_i386_linux_no_xml = "@<target>\
104 <architecture>i386</architecture>\
105 <osabi>GNU/Linux</osabi>\
106 </target>";
107
108 #ifdef __x86_64__
109 static const char *xmltarget_amd64_linux_no_xml = "@<target>\
110 <architecture>i386:x86-64</architecture>\
111 <osabi>GNU/Linux</osabi>\
112 </target>";
113 #endif
114
115 #include <sys/reg.h>
116 #include <sys/procfs.h>
117 #include <sys/ptrace.h>
118 #include <sys/uio.h>
119
120 #ifndef PTRACE_GETREGSET
121 #define PTRACE_GETREGSET 0x4204
122 #endif
123
124 #ifndef PTRACE_SETREGSET
125 #define PTRACE_SETREGSET 0x4205
126 #endif
127
128
129 #ifndef PTRACE_GET_THREAD_AREA
130 #define PTRACE_GET_THREAD_AREA 25
131 #endif
132
133 /* This definition comes from prctl.h, but some kernels may not have it. */
134 #ifndef PTRACE_ARCH_PRCTL
135 #define PTRACE_ARCH_PRCTL 30
136 #endif
137
138 /* The following definitions come from prctl.h, but may be absent
139 for certain configurations. */
140 #ifndef ARCH_GET_FS
141 #define ARCH_SET_GS 0x1001
142 #define ARCH_SET_FS 0x1002
143 #define ARCH_GET_FS 0x1003
144 #define ARCH_GET_GS 0x1004
145 #endif
146
147 /* Per-process arch-specific data we want to keep. */
148
149 struct arch_process_info
150 {
151 struct x86_debug_reg_state debug_reg_state;
152 };
153
154 /* Per-thread arch-specific data we want to keep. */
155
156 struct arch_lwp_info
157 {
158 /* Non-zero if our copy differs from what's recorded in the thread. */
159 int debug_registers_changed;
160 };
161
162 #ifdef __x86_64__
163
164 /* Mapping between the general-purpose registers in `struct user'
165 format and GDB's register array layout.
166 Note that the transfer layout uses 64-bit regs. */
167 static /*const*/ int i386_regmap[] =
168 {
169 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
170 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
171 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
172 DS * 8, ES * 8, FS * 8, GS * 8
173 };
174
175 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
176
177 /* So code below doesn't have to care, i386 or amd64. */
178 #define ORIG_EAX ORIG_RAX
179 #define REGSIZE 8
180
181 static const int x86_64_regmap[] =
182 {
183 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
184 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
185 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
186 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
187 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
188 DS * 8, ES * 8, FS * 8, GS * 8,
189 -1, -1, -1, -1, -1, -1, -1, -1,
190 -1, -1, -1, -1, -1, -1, -1, -1,
191 -1, -1, -1, -1, -1, -1, -1, -1,
192 -1,
193 -1, -1, -1, -1, -1, -1, -1, -1,
194 ORIG_RAX * 8,
195 -1, -1, -1, -1, /* MPX registers BND0 ... BND3. */
196 -1, -1, /* MPX registers BNDCFGU, BNDSTATUS. */
197 -1, -1, -1, -1, -1, -1, -1, -1, /* xmm16 ... xmm31 (AVX512) */
198 -1, -1, -1, -1, -1, -1, -1, -1,
199 -1, -1, -1, -1, -1, -1, -1, -1, /* ymm16 ... ymm31 (AVX512) */
200 -1, -1, -1, -1, -1, -1, -1, -1,
201 -1, -1, -1, -1, -1, -1, -1, -1, /* k0 ... k7 (AVX512) */
202 -1, -1, -1, -1, -1, -1, -1, -1, /* zmm0 ... zmm31 (AVX512) */
203 -1, -1, -1, -1, -1, -1, -1, -1,
204 -1, -1, -1, -1, -1, -1, -1, -1,
205 -1, -1, -1, -1, -1, -1, -1, -1
206 };
207
208 #define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
209 #define X86_64_USER_REGS (GS + 1)
210
211 #else /* ! __x86_64__ */
212
213 /* Mapping between the general-purpose registers in `struct user'
214 format and GDB's register array layout. */
215 static /*const*/ int i386_regmap[] =
216 {
217 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
218 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
219 EIP * 4, EFL * 4, CS * 4, SS * 4,
220 DS * 4, ES * 4, FS * 4, GS * 4
221 };
222
223 #define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
224
225 #define REGSIZE 4
226
227 #endif
228
229 #ifdef __x86_64__
230
231 /* Returns true if the current inferior belongs to a x86-64 process,
232 per the tdesc. */
233
234 static int
235 is_64bit_tdesc (void)
236 {
237 struct regcache *regcache = get_thread_regcache (current_thread, 0);
238
239 return register_size (regcache->tdesc, 0) == 8;
240 }
241
242 #endif
243
244 \f
245 /* Called by libthread_db. */
246
247 ps_err_e
248 ps_get_thread_area (const struct ps_prochandle *ph,
249 lwpid_t lwpid, int idx, void **base)
250 {
251 #ifdef __x86_64__
252 int use_64bit = is_64bit_tdesc ();
253
254 if (use_64bit)
255 {
256 switch (idx)
257 {
258 case FS:
259 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
260 return PS_OK;
261 break;
262 case GS:
263 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
264 return PS_OK;
265 break;
266 default:
267 return PS_BADADDR;
268 }
269 return PS_ERR;
270 }
271 #endif
272
273 {
274 unsigned int desc[4];
275
276 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
277 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
278 return PS_ERR;
279
280 /* Ensure we properly extend the value to 64-bits for x86_64. */
281 *base = (void *) (uintptr_t) desc[1];
282 return PS_OK;
283 }
284 }
285
286 /* Get the thread area address. This is used to recognize which
287 thread is which when tracing with the in-process agent library. We
288 don't read anything from the address, and treat it as opaque; it's
289 the address itself that we assume is unique per-thread. */
290
291 static int
292 x86_get_thread_area (int lwpid, CORE_ADDR *addr)
293 {
294 #ifdef __x86_64__
295 int use_64bit = is_64bit_tdesc ();
296
297 if (use_64bit)
298 {
299 void *base;
300 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
301 {
302 *addr = (CORE_ADDR) (uintptr_t) base;
303 return 0;
304 }
305
306 return -1;
307 }
308 #endif
309
310 {
311 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
312 struct thread_info *thr = get_lwp_thread (lwp);
313 struct regcache *regcache = get_thread_regcache (thr, 1);
314 unsigned int desc[4];
315 ULONGEST gs = 0;
316 const int reg_thread_area = 3; /* bits to scale down register value. */
317 int idx;
318
319 collect_register_by_name (regcache, "gs", &gs);
320
321 idx = gs >> reg_thread_area;
322
323 if (ptrace (PTRACE_GET_THREAD_AREA,
324 lwpid_of (thr),
325 (void *) (long) idx, (unsigned long) &desc) < 0)
326 return -1;
327
328 *addr = desc[1];
329 return 0;
330 }
331 }
332
333
334 \f
335 static int
336 x86_cannot_store_register (int regno)
337 {
338 #ifdef __x86_64__
339 if (is_64bit_tdesc ())
340 return 0;
341 #endif
342
343 return regno >= I386_NUM_REGS;
344 }
345
346 static int
347 x86_cannot_fetch_register (int regno)
348 {
349 #ifdef __x86_64__
350 if (is_64bit_tdesc ())
351 return 0;
352 #endif
353
354 return regno >= I386_NUM_REGS;
355 }
356
357 static void
358 x86_fill_gregset (struct regcache *regcache, void *buf)
359 {
360 int i;
361
362 #ifdef __x86_64__
363 if (register_size (regcache->tdesc, 0) == 8)
364 {
365 for (i = 0; i < X86_64_NUM_REGS; i++)
366 if (x86_64_regmap[i] != -1)
367 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
368 return;
369 }
370
371 /* 32-bit inferior registers need to be zero-extended.
372 Callers would read uninitialized memory otherwise. */
373 memset (buf, 0x00, X86_64_USER_REGS * 8);
374 #endif
375
376 for (i = 0; i < I386_NUM_REGS; i++)
377 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
378
379 collect_register_by_name (regcache, "orig_eax",
380 ((char *) buf) + ORIG_EAX * REGSIZE);
381 }
382
383 static void
384 x86_store_gregset (struct regcache *regcache, const void *buf)
385 {
386 int i;
387
388 #ifdef __x86_64__
389 if (register_size (regcache->tdesc, 0) == 8)
390 {
391 for (i = 0; i < X86_64_NUM_REGS; i++)
392 if (x86_64_regmap[i] != -1)
393 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
394 return;
395 }
396 #endif
397
398 for (i = 0; i < I386_NUM_REGS; i++)
399 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
400
401 supply_register_by_name (regcache, "orig_eax",
402 ((char *) buf) + ORIG_EAX * REGSIZE);
403 }
404
405 static void
406 x86_fill_fpregset (struct regcache *regcache, void *buf)
407 {
408 #ifdef __x86_64__
409 i387_cache_to_fxsave (regcache, buf);
410 #else
411 i387_cache_to_fsave (regcache, buf);
412 #endif
413 }
414
415 static void
416 x86_store_fpregset (struct regcache *regcache, const void *buf)
417 {
418 #ifdef __x86_64__
419 i387_fxsave_to_cache (regcache, buf);
420 #else
421 i387_fsave_to_cache (regcache, buf);
422 #endif
423 }
424
425 #ifndef __x86_64__
426
427 static void
428 x86_fill_fpxregset (struct regcache *regcache, void *buf)
429 {
430 i387_cache_to_fxsave (regcache, buf);
431 }
432
433 static void
434 x86_store_fpxregset (struct regcache *regcache, const void *buf)
435 {
436 i387_fxsave_to_cache (regcache, buf);
437 }
438
439 #endif
440
441 static void
442 x86_fill_xstateregset (struct regcache *regcache, void *buf)
443 {
444 i387_cache_to_xsave (regcache, buf);
445 }
446
447 static void
448 x86_store_xstateregset (struct regcache *regcache, const void *buf)
449 {
450 i387_xsave_to_cache (regcache, buf);
451 }
452
453 /* ??? The non-biarch i386 case stores all the i387 regs twice.
454 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
455 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
456 doesn't work. IWBN to avoid the duplication in the case where it
457 does work. Maybe the arch_setup routine could check whether it works
458 and update the supported regsets accordingly. */
459
460 static struct regset_info x86_regsets[] =
461 {
462 #ifdef HAVE_PTRACE_GETREGS
463 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
464 GENERAL_REGS,
465 x86_fill_gregset, x86_store_gregset },
466 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
467 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
468 # ifndef __x86_64__
469 # ifdef HAVE_PTRACE_GETFPXREGS
470 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
471 EXTENDED_REGS,
472 x86_fill_fpxregset, x86_store_fpxregset },
473 # endif
474 # endif
475 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
476 FP_REGS,
477 x86_fill_fpregset, x86_store_fpregset },
478 #endif /* HAVE_PTRACE_GETREGS */
479 { 0, 0, 0, -1, -1, NULL, NULL }
480 };
481
482 static CORE_ADDR
483 x86_get_pc (struct regcache *regcache)
484 {
485 int use_64bit = register_size (regcache->tdesc, 0) == 8;
486
487 if (use_64bit)
488 {
489 unsigned long pc;
490 collect_register_by_name (regcache, "rip", &pc);
491 return (CORE_ADDR) pc;
492 }
493 else
494 {
495 unsigned int pc;
496 collect_register_by_name (regcache, "eip", &pc);
497 return (CORE_ADDR) pc;
498 }
499 }
500
501 static void
502 x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
503 {
504 int use_64bit = register_size (regcache->tdesc, 0) == 8;
505
506 if (use_64bit)
507 {
508 unsigned long newpc = pc;
509 supply_register_by_name (regcache, "rip", &newpc);
510 }
511 else
512 {
513 unsigned int newpc = pc;
514 supply_register_by_name (regcache, "eip", &newpc);
515 }
516 }
517 \f
518 static const unsigned char x86_breakpoint[] = { 0xCC };
519 #define x86_breakpoint_len 1
520
521 static int
522 x86_breakpoint_at (CORE_ADDR pc)
523 {
524 unsigned char c;
525
526 (*the_target->read_memory) (pc, &c, 1);
527 if (c == 0xCC)
528 return 1;
529
530 return 0;
531 }
532 \f
533
534 /* Return the offset of REGNUM in the u_debugreg field of struct
535 user. */
536
537 static int
538 u_debugreg_offset (int regnum)
539 {
540 return (offsetof (struct user, u_debugreg)
541 + sizeof (((struct user *) 0)->u_debugreg[0]) * regnum);
542 }
543
544
545 /* Support for debug registers. */
546
547 static unsigned long
548 x86_linux_dr_get (ptid_t ptid, int regnum)
549 {
550 int tid;
551 unsigned long value;
552
553 tid = ptid_get_lwp (ptid);
554
555 errno = 0;
556 value = ptrace (PTRACE_PEEKUSER, tid, u_debugreg_offset (regnum), 0);
557 if (errno != 0)
558 error ("Couldn't read debug register");
559
560 return value;
561 }
562
563 static void
564 x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
565 {
566 int tid;
567
568 tid = ptid_get_lwp (ptid);
569
570 errno = 0;
571 ptrace (PTRACE_POKEUSER, tid, u_debugreg_offset (regnum), value);
572 if (errno != 0)
573 error ("Couldn't write debug register");
574 }
575
576 static int
577 update_debug_registers_callback (struct inferior_list_entry *entry,
578 void *pid_p)
579 {
580 struct thread_info *thr = (struct thread_info *) entry;
581 struct lwp_info *lwp = get_thread_lwp (thr);
582 int pid = *(int *) pid_p;
583
584 /* Only update the threads of this process. */
585 if (pid_of (thr) == pid)
586 {
587 /* The actual update is done later just before resuming the lwp,
588 we just mark that the registers need updating. */
589 lwp->arch_private->debug_registers_changed = 1;
590
591 /* If the lwp isn't stopped, force it to momentarily pause, so
592 we can update its debug registers. */
593 if (!lwp->stopped)
594 linux_stop_lwp (lwp);
595 }
596
597 return 0;
598 }
599
600 /* Update the inferior's debug register REGNUM from STATE. */
601
602 static void
603 x86_dr_low_set_addr (int regnum, CORE_ADDR addr)
604 {
605 /* Only update the threads of this process. */
606 int pid = pid_of (current_thread);
607
608 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
609
610 find_inferior (&all_threads, update_debug_registers_callback, &pid);
611 }
612
613 /* Return the inferior's debug register REGNUM. */
614
615 static CORE_ADDR
616 x86_dr_low_get_addr (int regnum)
617 {
618 ptid_t ptid = ptid_of (current_thread);
619
620 gdb_assert (DR_FIRSTADDR <= regnum && regnum <= DR_LASTADDR);
621
622 return x86_linux_dr_get (ptid, regnum);
623 }
624
625 /* Update the inferior's DR7 debug control register from STATE. */
626
627 static void
628 x86_dr_low_set_control (unsigned long control)
629 {
630 /* Only update the threads of this process. */
631 int pid = pid_of (current_thread);
632
633 find_inferior (&all_threads, update_debug_registers_callback, &pid);
634 }
635
636 /* Return the inferior's DR7 debug control register. */
637
638 static unsigned long
639 x86_dr_low_get_control (void)
640 {
641 ptid_t ptid = ptid_of (current_thread);
642
643 return x86_linux_dr_get (ptid, DR_CONTROL);
644 }
645
646 /* Get the value of the DR6 debug status register from the inferior
647 and record it in STATE. */
648
649 static unsigned long
650 x86_dr_low_get_status (void)
651 {
652 ptid_t ptid = ptid_of (current_thread);
653
654 return x86_linux_dr_get (ptid, DR_STATUS);
655 }
656
657 /* Low-level function vector. */
658 struct x86_dr_low_type x86_dr_low =
659 {
660 x86_dr_low_set_control,
661 x86_dr_low_set_addr,
662 x86_dr_low_get_addr,
663 x86_dr_low_get_status,
664 x86_dr_low_get_control,
665 sizeof (void *),
666 };
667 \f
668 /* Breakpoint/Watchpoint support. */
669
670 static int
671 x86_supports_z_point_type (char z_type)
672 {
673 switch (z_type)
674 {
675 case Z_PACKET_SW_BP:
676 case Z_PACKET_HW_BP:
677 case Z_PACKET_WRITE_WP:
678 case Z_PACKET_ACCESS_WP:
679 return 1;
680 default:
681 return 0;
682 }
683 }
684
685 static int
686 x86_insert_point (enum raw_bkpt_type type, CORE_ADDR addr,
687 int size, struct raw_breakpoint *bp)
688 {
689 struct process_info *proc = current_process ();
690
691 switch (type)
692 {
693 case raw_bkpt_type_sw:
694 return insert_memory_breakpoint (bp);
695
696 case raw_bkpt_type_hw:
697 case raw_bkpt_type_write_wp:
698 case raw_bkpt_type_access_wp:
699 {
700 enum target_hw_bp_type hw_type
701 = raw_bkpt_type_to_target_hw_bp_type (type);
702 struct x86_debug_reg_state *state
703 = &proc->priv->arch_private->debug_reg_state;
704
705 return x86_dr_insert_watchpoint (state, hw_type, addr, size);
706 }
707
708 default:
709 /* Unsupported. */
710 return 1;
711 }
712 }
713
714 static int
715 x86_remove_point (enum raw_bkpt_type type, CORE_ADDR addr,
716 int size, struct raw_breakpoint *bp)
717 {
718 struct process_info *proc = current_process ();
719
720 switch (type)
721 {
722 case raw_bkpt_type_sw:
723 return remove_memory_breakpoint (bp);
724
725 case raw_bkpt_type_hw:
726 case raw_bkpt_type_write_wp:
727 case raw_bkpt_type_access_wp:
728 {
729 enum target_hw_bp_type hw_type
730 = raw_bkpt_type_to_target_hw_bp_type (type);
731 struct x86_debug_reg_state *state
732 = &proc->priv->arch_private->debug_reg_state;
733
734 return x86_dr_remove_watchpoint (state, hw_type, addr, size);
735 }
736 default:
737 /* Unsupported. */
738 return 1;
739 }
740 }
741
742 static int
743 x86_stopped_by_watchpoint (void)
744 {
745 struct process_info *proc = current_process ();
746 return x86_dr_stopped_by_watchpoint (&proc->priv->arch_private->debug_reg_state);
747 }
748
749 static CORE_ADDR
750 x86_stopped_data_address (void)
751 {
752 struct process_info *proc = current_process ();
753 CORE_ADDR addr;
754 if (x86_dr_stopped_data_address (&proc->priv->arch_private->debug_reg_state,
755 &addr))
756 return addr;
757 return 0;
758 }
759 \f
760 /* Called when a new process is created. */
761
762 static struct arch_process_info *
763 x86_linux_new_process (void)
764 {
765 struct arch_process_info *info = XCNEW (struct arch_process_info);
766
767 x86_low_init_dregs (&info->debug_reg_state);
768
769 return info;
770 }
771
772 /* Called when a new thread is detected. */
773
774 static struct arch_lwp_info *
775 x86_linux_new_thread (void)
776 {
777 struct arch_lwp_info *info = XCNEW (struct arch_lwp_info);
778
779 info->debug_registers_changed = 1;
780
781 return info;
782 }
783
784 /* Called when resuming a thread.
785 If the debug regs have changed, update the thread's copies. */
786
787 static void
788 x86_linux_prepare_to_resume (struct lwp_info *lwp)
789 {
790 ptid_t ptid = ptid_of (get_lwp_thread (lwp));
791 int clear_status = 0;
792
793 if (lwp->arch_private->debug_registers_changed)
794 {
795 int i;
796 int pid = ptid_get_pid (ptid);
797 struct process_info *proc = find_process_pid (pid);
798 struct x86_debug_reg_state *state
799 = &proc->priv->arch_private->debug_reg_state;
800
801 x86_linux_dr_set (ptid, DR_CONTROL, 0);
802
803 ALL_DEBUG_ADDRESS_REGISTERS (i)
804 if (state->dr_ref_count[i] > 0)
805 {
806 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
807
808 /* If we're setting a watchpoint, any change the inferior
809 had done itself to the debug registers needs to be
810 discarded, otherwise, x86_dr_stopped_data_address can
811 get confused. */
812 clear_status = 1;
813 }
814
815 if (state->dr_control_mirror != 0)
816 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
817
818 lwp->arch_private->debug_registers_changed = 0;
819 }
820
821 if (clear_status || lwp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
822 x86_linux_dr_set (ptid, DR_STATUS, 0);
823 }
824 \f
825 /* When GDBSERVER is built as a 64-bit application on linux, the
826 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
827 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
828 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
829 conversion in-place ourselves. */
830
831 /* These types below (compat_*) define a siginfo type that is layout
832 compatible with the siginfo type exported by the 32-bit userspace
833 support. */
834
835 #ifdef __x86_64__
836
837 typedef int compat_int_t;
838 typedef unsigned int compat_uptr_t;
839
840 typedef int compat_time_t;
841 typedef int compat_timer_t;
842 typedef int compat_clock_t;
843
844 struct compat_timeval
845 {
846 compat_time_t tv_sec;
847 int tv_usec;
848 };
849
850 typedef union compat_sigval
851 {
852 compat_int_t sival_int;
853 compat_uptr_t sival_ptr;
854 } compat_sigval_t;
855
856 typedef struct compat_siginfo
857 {
858 int si_signo;
859 int si_errno;
860 int si_code;
861
862 union
863 {
864 int _pad[((128 / sizeof (int)) - 3)];
865
866 /* kill() */
867 struct
868 {
869 unsigned int _pid;
870 unsigned int _uid;
871 } _kill;
872
873 /* POSIX.1b timers */
874 struct
875 {
876 compat_timer_t _tid;
877 int _overrun;
878 compat_sigval_t _sigval;
879 } _timer;
880
881 /* POSIX.1b signals */
882 struct
883 {
884 unsigned int _pid;
885 unsigned int _uid;
886 compat_sigval_t _sigval;
887 } _rt;
888
889 /* SIGCHLD */
890 struct
891 {
892 unsigned int _pid;
893 unsigned int _uid;
894 int _status;
895 compat_clock_t _utime;
896 compat_clock_t _stime;
897 } _sigchld;
898
899 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
900 struct
901 {
902 unsigned int _addr;
903 } _sigfault;
904
905 /* SIGPOLL */
906 struct
907 {
908 int _band;
909 int _fd;
910 } _sigpoll;
911 } _sifields;
912 } compat_siginfo_t;
913
914 /* For x32, clock_t in _sigchld is 64bit aligned at 4 bytes. */
915 typedef long __attribute__ ((__aligned__ (4))) compat_x32_clock_t;
916
917 typedef struct compat_x32_siginfo
918 {
919 int si_signo;
920 int si_errno;
921 int si_code;
922
923 union
924 {
925 int _pad[((128 / sizeof (int)) - 3)];
926
927 /* kill() */
928 struct
929 {
930 unsigned int _pid;
931 unsigned int _uid;
932 } _kill;
933
934 /* POSIX.1b timers */
935 struct
936 {
937 compat_timer_t _tid;
938 int _overrun;
939 compat_sigval_t _sigval;
940 } _timer;
941
942 /* POSIX.1b signals */
943 struct
944 {
945 unsigned int _pid;
946 unsigned int _uid;
947 compat_sigval_t _sigval;
948 } _rt;
949
950 /* SIGCHLD */
951 struct
952 {
953 unsigned int _pid;
954 unsigned int _uid;
955 int _status;
956 compat_x32_clock_t _utime;
957 compat_x32_clock_t _stime;
958 } _sigchld;
959
960 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
961 struct
962 {
963 unsigned int _addr;
964 } _sigfault;
965
966 /* SIGPOLL */
967 struct
968 {
969 int _band;
970 int _fd;
971 } _sigpoll;
972 } _sifields;
973 } compat_x32_siginfo_t __attribute__ ((__aligned__ (8)));
974
975 #define cpt_si_pid _sifields._kill._pid
976 #define cpt_si_uid _sifields._kill._uid
977 #define cpt_si_timerid _sifields._timer._tid
978 #define cpt_si_overrun _sifields._timer._overrun
979 #define cpt_si_status _sifields._sigchld._status
980 #define cpt_si_utime _sifields._sigchld._utime
981 #define cpt_si_stime _sifields._sigchld._stime
982 #define cpt_si_ptr _sifields._rt._sigval.sival_ptr
983 #define cpt_si_addr _sifields._sigfault._addr
984 #define cpt_si_band _sifields._sigpoll._band
985 #define cpt_si_fd _sifields._sigpoll._fd
986
987 /* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
988 In their place is si_timer1,si_timer2. */
989 #ifndef si_timerid
990 #define si_timerid si_timer1
991 #endif
992 #ifndef si_overrun
993 #define si_overrun si_timer2
994 #endif
995
996 static void
997 compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
998 {
999 memset (to, 0, sizeof (*to));
1000
1001 to->si_signo = from->si_signo;
1002 to->si_errno = from->si_errno;
1003 to->si_code = from->si_code;
1004
1005 if (to->si_code == SI_TIMER)
1006 {
1007 to->cpt_si_timerid = from->si_timerid;
1008 to->cpt_si_overrun = from->si_overrun;
1009 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1010 }
1011 else if (to->si_code == SI_USER)
1012 {
1013 to->cpt_si_pid = from->si_pid;
1014 to->cpt_si_uid = from->si_uid;
1015 }
1016 else if (to->si_code < 0)
1017 {
1018 to->cpt_si_pid = from->si_pid;
1019 to->cpt_si_uid = from->si_uid;
1020 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1021 }
1022 else
1023 {
1024 switch (to->si_signo)
1025 {
1026 case SIGCHLD:
1027 to->cpt_si_pid = from->si_pid;
1028 to->cpt_si_uid = from->si_uid;
1029 to->cpt_si_status = from->si_status;
1030 to->cpt_si_utime = from->si_utime;
1031 to->cpt_si_stime = from->si_stime;
1032 break;
1033 case SIGILL:
1034 case SIGFPE:
1035 case SIGSEGV:
1036 case SIGBUS:
1037 to->cpt_si_addr = (intptr_t) from->si_addr;
1038 break;
1039 case SIGPOLL:
1040 to->cpt_si_band = from->si_band;
1041 to->cpt_si_fd = from->si_fd;
1042 break;
1043 default:
1044 to->cpt_si_pid = from->si_pid;
1045 to->cpt_si_uid = from->si_uid;
1046 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1047 break;
1048 }
1049 }
1050 }
1051
1052 static void
1053 siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
1054 {
1055 memset (to, 0, sizeof (*to));
1056
1057 to->si_signo = from->si_signo;
1058 to->si_errno = from->si_errno;
1059 to->si_code = from->si_code;
1060
1061 if (to->si_code == SI_TIMER)
1062 {
1063 to->si_timerid = from->cpt_si_timerid;
1064 to->si_overrun = from->cpt_si_overrun;
1065 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1066 }
1067 else if (to->si_code == SI_USER)
1068 {
1069 to->si_pid = from->cpt_si_pid;
1070 to->si_uid = from->cpt_si_uid;
1071 }
1072 else if (to->si_code < 0)
1073 {
1074 to->si_pid = from->cpt_si_pid;
1075 to->si_uid = from->cpt_si_uid;
1076 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1077 }
1078 else
1079 {
1080 switch (to->si_signo)
1081 {
1082 case SIGCHLD:
1083 to->si_pid = from->cpt_si_pid;
1084 to->si_uid = from->cpt_si_uid;
1085 to->si_status = from->cpt_si_status;
1086 to->si_utime = from->cpt_si_utime;
1087 to->si_stime = from->cpt_si_stime;
1088 break;
1089 case SIGILL:
1090 case SIGFPE:
1091 case SIGSEGV:
1092 case SIGBUS:
1093 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1094 break;
1095 case SIGPOLL:
1096 to->si_band = from->cpt_si_band;
1097 to->si_fd = from->cpt_si_fd;
1098 break;
1099 default:
1100 to->si_pid = from->cpt_si_pid;
1101 to->si_uid = from->cpt_si_uid;
1102 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1103 break;
1104 }
1105 }
1106 }
1107
1108 static void
1109 compat_x32_siginfo_from_siginfo (compat_x32_siginfo_t *to,
1110 siginfo_t *from)
1111 {
1112 memset (to, 0, sizeof (*to));
1113
1114 to->si_signo = from->si_signo;
1115 to->si_errno = from->si_errno;
1116 to->si_code = from->si_code;
1117
1118 if (to->si_code == SI_TIMER)
1119 {
1120 to->cpt_si_timerid = from->si_timerid;
1121 to->cpt_si_overrun = from->si_overrun;
1122 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1123 }
1124 else if (to->si_code == SI_USER)
1125 {
1126 to->cpt_si_pid = from->si_pid;
1127 to->cpt_si_uid = from->si_uid;
1128 }
1129 else if (to->si_code < 0)
1130 {
1131 to->cpt_si_pid = from->si_pid;
1132 to->cpt_si_uid = from->si_uid;
1133 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1134 }
1135 else
1136 {
1137 switch (to->si_signo)
1138 {
1139 case SIGCHLD:
1140 to->cpt_si_pid = from->si_pid;
1141 to->cpt_si_uid = from->si_uid;
1142 to->cpt_si_status = from->si_status;
1143 to->cpt_si_utime = from->si_utime;
1144 to->cpt_si_stime = from->si_stime;
1145 break;
1146 case SIGILL:
1147 case SIGFPE:
1148 case SIGSEGV:
1149 case SIGBUS:
1150 to->cpt_si_addr = (intptr_t) from->si_addr;
1151 break;
1152 case SIGPOLL:
1153 to->cpt_si_band = from->si_band;
1154 to->cpt_si_fd = from->si_fd;
1155 break;
1156 default:
1157 to->cpt_si_pid = from->si_pid;
1158 to->cpt_si_uid = from->si_uid;
1159 to->cpt_si_ptr = (intptr_t) from->si_ptr;
1160 break;
1161 }
1162 }
1163 }
1164
1165 static void
1166 siginfo_from_compat_x32_siginfo (siginfo_t *to,
1167 compat_x32_siginfo_t *from)
1168 {
1169 memset (to, 0, sizeof (*to));
1170
1171 to->si_signo = from->si_signo;
1172 to->si_errno = from->si_errno;
1173 to->si_code = from->si_code;
1174
1175 if (to->si_code == SI_TIMER)
1176 {
1177 to->si_timerid = from->cpt_si_timerid;
1178 to->si_overrun = from->cpt_si_overrun;
1179 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1180 }
1181 else if (to->si_code == SI_USER)
1182 {
1183 to->si_pid = from->cpt_si_pid;
1184 to->si_uid = from->cpt_si_uid;
1185 }
1186 else if (to->si_code < 0)
1187 {
1188 to->si_pid = from->cpt_si_pid;
1189 to->si_uid = from->cpt_si_uid;
1190 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
1191 }
1192 else
1193 {
1194 switch (to->si_signo)
1195 {
1196 case SIGCHLD:
1197 to->si_pid = from->cpt_si_pid;
1198 to->si_uid = from->cpt_si_uid;
1199 to->si_status = from->cpt_si_status;
1200 to->si_utime = from->cpt_si_utime;
1201 to->si_stime = from->cpt_si_stime;
1202 break;
1203 case SIGILL:
1204 case SIGFPE:
1205 case SIGSEGV:
1206 case SIGBUS:
1207 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
1208 break;
1209 case SIGPOLL:
1210 to->si_band = from->cpt_si_band;
1211 to->si_fd = from->cpt_si_fd;
1212 break;
1213 default:
1214 to->si_pid = from->cpt_si_pid;
1215 to->si_uid = from->cpt_si_uid;
1216 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
1217 break;
1218 }
1219 }
1220 }
1221
1222 #endif /* __x86_64__ */
1223
1224 /* Convert a native/host siginfo object, into/from the siginfo in the
1225 layout of the inferiors' architecture. Returns true if any
1226 conversion was done; false otherwise. If DIRECTION is 1, then copy
1227 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
1228 INF. */
1229
1230 static int
1231 x86_siginfo_fixup (siginfo_t *native, void *inf, int direction)
1232 {
1233 #ifdef __x86_64__
1234 unsigned int machine;
1235 int tid = lwpid_of (current_thread);
1236 int is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1237
1238 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
1239 if (!is_64bit_tdesc ())
1240 {
1241 gdb_assert (sizeof (siginfo_t) == sizeof (compat_siginfo_t));
1242
1243 if (direction == 0)
1244 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
1245 else
1246 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
1247
1248 return 1;
1249 }
1250 /* No fixup for native x32 GDB. */
1251 else if (!is_elf64 && sizeof (void *) == 8)
1252 {
1253 gdb_assert (sizeof (siginfo_t) == sizeof (compat_x32_siginfo_t));
1254
1255 if (direction == 0)
1256 compat_x32_siginfo_from_siginfo ((struct compat_x32_siginfo *) inf,
1257 native);
1258 else
1259 siginfo_from_compat_x32_siginfo (native,
1260 (struct compat_x32_siginfo *) inf);
1261
1262 return 1;
1263 }
1264 #endif
1265
1266 return 0;
1267 }
1268 \f
1269 static int use_xml;
1270
1271 /* Format of XSAVE extended state is:
1272 struct
1273 {
1274 fxsave_bytes[0..463]
1275 sw_usable_bytes[464..511]
1276 xstate_hdr_bytes[512..575]
1277 avx_bytes[576..831]
1278 future_state etc
1279 };
1280
1281 Same memory layout will be used for the coredump NT_X86_XSTATE
1282 representing the XSAVE extended state registers.
1283
1284 The first 8 bytes of the sw_usable_bytes[464..467] is the OS enabled
1285 extended state mask, which is the same as the extended control register
1286 0 (the XFEATURE_ENABLED_MASK register), XCR0. We can use this mask
1287 together with the mask saved in the xstate_hdr_bytes to determine what
1288 states the processor/OS supports and what state, used or initialized,
1289 the process/thread is in. */
1290 #define I386_LINUX_XSAVE_XCR0_OFFSET 464
1291
1292 /* Does the current host support the GETFPXREGS request? The header
1293 file may or may not define it, and even if it is defined, the
1294 kernel will return EIO if it's running on a pre-SSE processor. */
1295 int have_ptrace_getfpxregs =
1296 #ifdef HAVE_PTRACE_GETFPXREGS
1297 -1
1298 #else
1299 0
1300 #endif
1301 ;
1302
1303 /* Does the current host support PTRACE_GETREGSET? */
1304 static int have_ptrace_getregset = -1;
1305
1306 /* Get Linux/x86 target description from running target. */
1307
1308 static const struct target_desc *
1309 x86_linux_read_description (void)
1310 {
1311 unsigned int machine;
1312 int is_elf64;
1313 int xcr0_features;
1314 int tid;
1315 static uint64_t xcr0;
1316 struct regset_info *regset;
1317
1318 tid = lwpid_of (current_thread);
1319
1320 is_elf64 = linux_pid_exe_is_elf_64_file (tid, &machine);
1321
1322 if (sizeof (void *) == 4)
1323 {
1324 if (is_elf64 > 0)
1325 error (_("Can't debug 64-bit process with 32-bit GDBserver"));
1326 #ifndef __x86_64__
1327 else if (machine == EM_X86_64)
1328 error (_("Can't debug x86-64 process with 32-bit GDBserver"));
1329 #endif
1330 }
1331
1332 #if !defined __x86_64__ && defined HAVE_PTRACE_GETFPXREGS
1333 if (machine == EM_386 && have_ptrace_getfpxregs == -1)
1334 {
1335 elf_fpxregset_t fpxregs;
1336
1337 if (ptrace (PTRACE_GETFPXREGS, tid, 0, (long) &fpxregs) < 0)
1338 {
1339 have_ptrace_getfpxregs = 0;
1340 have_ptrace_getregset = 0;
1341 return tdesc_i386_mmx_linux;
1342 }
1343 else
1344 have_ptrace_getfpxregs = 1;
1345 }
1346 #endif
1347
1348 if (!use_xml)
1349 {
1350 x86_xcr0 = X86_XSTATE_SSE_MASK;
1351
1352 /* Don't use XML. */
1353 #ifdef __x86_64__
1354 if (machine == EM_X86_64)
1355 return tdesc_amd64_linux_no_xml;
1356 else
1357 #endif
1358 return tdesc_i386_linux_no_xml;
1359 }
1360
1361 if (have_ptrace_getregset == -1)
1362 {
1363 uint64_t xstateregs[(X86_XSTATE_SSE_SIZE / sizeof (uint64_t))];
1364 struct iovec iov;
1365
1366 iov.iov_base = xstateregs;
1367 iov.iov_len = sizeof (xstateregs);
1368
1369 /* Check if PTRACE_GETREGSET works. */
1370 if (ptrace (PTRACE_GETREGSET, tid,
1371 (unsigned int) NT_X86_XSTATE, (long) &iov) < 0)
1372 have_ptrace_getregset = 0;
1373 else
1374 {
1375 have_ptrace_getregset = 1;
1376
1377 /* Get XCR0 from XSAVE extended state. */
1378 xcr0 = xstateregs[(I386_LINUX_XSAVE_XCR0_OFFSET
1379 / sizeof (uint64_t))];
1380
1381 /* Use PTRACE_GETREGSET if it is available. */
1382 for (regset = x86_regsets;
1383 regset->fill_function != NULL; regset++)
1384 if (regset->get_request == PTRACE_GETREGSET)
1385 regset->size = X86_XSTATE_SIZE (xcr0);
1386 else if (regset->type != GENERAL_REGS)
1387 regset->size = 0;
1388 }
1389 }
1390
1391 /* Check the native XCR0 only if PTRACE_GETREGSET is available. */
1392 xcr0_features = (have_ptrace_getregset
1393 && (xcr0 & X86_XSTATE_ALL_MASK));
1394
1395 if (xcr0_features)
1396 x86_xcr0 = xcr0;
1397
1398 if (machine == EM_X86_64)
1399 {
1400 #ifdef __x86_64__
1401 if (is_elf64)
1402 {
1403 if (xcr0_features)
1404 {
1405 switch (xcr0 & X86_XSTATE_ALL_MASK)
1406 {
1407 case X86_XSTATE_AVX512_MASK:
1408 return tdesc_amd64_avx512_linux;
1409
1410 case X86_XSTATE_MPX_MASK:
1411 return tdesc_amd64_mpx_linux;
1412
1413 case X86_XSTATE_AVX_MASK:
1414 return tdesc_amd64_avx_linux;
1415
1416 default:
1417 return tdesc_amd64_linux;
1418 }
1419 }
1420 else
1421 return tdesc_amd64_linux;
1422 }
1423 else
1424 {
1425 if (xcr0_features)
1426 {
1427 switch (xcr0 & X86_XSTATE_ALL_MASK)
1428 {
1429 case X86_XSTATE_AVX512_MASK:
1430 return tdesc_x32_avx512_linux;
1431
1432 case X86_XSTATE_MPX_MASK: /* No MPX on x32. */
1433 case X86_XSTATE_AVX_MASK:
1434 return tdesc_x32_avx_linux;
1435
1436 default:
1437 return tdesc_x32_linux;
1438 }
1439 }
1440 else
1441 return tdesc_x32_linux;
1442 }
1443 #endif
1444 }
1445 else
1446 {
1447 if (xcr0_features)
1448 {
1449 switch (xcr0 & X86_XSTATE_ALL_MASK)
1450 {
1451 case (X86_XSTATE_AVX512_MASK):
1452 return tdesc_i386_avx512_linux;
1453
1454 case (X86_XSTATE_MPX_MASK):
1455 return tdesc_i386_mpx_linux;
1456
1457 case (X86_XSTATE_AVX_MASK):
1458 return tdesc_i386_avx_linux;
1459
1460 default:
1461 return tdesc_i386_linux;
1462 }
1463 }
1464 else
1465 return tdesc_i386_linux;
1466 }
1467
1468 gdb_assert_not_reached ("failed to return tdesc");
1469 }
1470
1471 /* Callback for find_inferior. Stops iteration when a thread with a
1472 given PID is found. */
1473
1474 static int
1475 same_process_callback (struct inferior_list_entry *entry, void *data)
1476 {
1477 int pid = *(int *) data;
1478
1479 return (ptid_get_pid (entry->id) == pid);
1480 }
1481
1482 /* Callback for for_each_inferior. Calls the arch_setup routine for
1483 each process. */
1484
1485 static void
1486 x86_arch_setup_process_callback (struct inferior_list_entry *entry)
1487 {
1488 int pid = ptid_get_pid (entry->id);
1489
1490 /* Look up any thread of this processes. */
1491 current_thread
1492 = (struct thread_info *) find_inferior (&all_threads,
1493 same_process_callback, &pid);
1494
1495 the_low_target.arch_setup ();
1496 }
1497
1498 /* Update all the target description of all processes; a new GDB
1499 connected, and it may or not support xml target descriptions. */
1500
1501 static void
1502 x86_linux_update_xmltarget (void)
1503 {
1504 struct thread_info *saved_thread = current_thread;
1505
1506 /* Before changing the register cache's internal layout, flush the
1507 contents of the current valid caches back to the threads, and
1508 release the current regcache objects. */
1509 regcache_release ();
1510
1511 for_each_inferior (&all_processes, x86_arch_setup_process_callback);
1512
1513 current_thread = saved_thread;
1514 }
1515
1516 /* Process qSupported query, "xmlRegisters=". Update the buffer size for
1517 PTRACE_GETREGSET. */
1518
1519 static void
1520 x86_linux_process_qsupported (const char *query)
1521 {
1522 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1523 with "i386" in qSupported query, it supports x86 XML target
1524 descriptions. */
1525 use_xml = 0;
1526 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1527 {
1528 char *copy = xstrdup (query + 13);
1529 char *p;
1530
1531 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1532 {
1533 if (strcmp (p, "i386") == 0)
1534 {
1535 use_xml = 1;
1536 break;
1537 }
1538 }
1539
1540 free (copy);
1541 }
1542
1543 x86_linux_update_xmltarget ();
1544 }
1545
1546 /* Common for x86/x86-64. */
1547
1548 static struct regsets_info x86_regsets_info =
1549 {
1550 x86_regsets, /* regsets */
1551 0, /* num_regsets */
1552 NULL, /* disabled_regsets */
1553 };
1554
1555 #ifdef __x86_64__
1556 static struct regs_info amd64_linux_regs_info =
1557 {
1558 NULL, /* regset_bitmap */
1559 NULL, /* usrregs_info */
1560 &x86_regsets_info
1561 };
1562 #endif
1563 static struct usrregs_info i386_linux_usrregs_info =
1564 {
1565 I386_NUM_REGS,
1566 i386_regmap,
1567 };
1568
1569 static struct regs_info i386_linux_regs_info =
1570 {
1571 NULL, /* regset_bitmap */
1572 &i386_linux_usrregs_info,
1573 &x86_regsets_info
1574 };
1575
1576 const struct regs_info *
1577 x86_linux_regs_info (void)
1578 {
1579 #ifdef __x86_64__
1580 if (is_64bit_tdesc ())
1581 return &amd64_linux_regs_info;
1582 else
1583 #endif
1584 return &i386_linux_regs_info;
1585 }
1586
1587 /* Initialize the target description for the architecture of the
1588 inferior. */
1589
1590 static void
1591 x86_arch_setup (void)
1592 {
1593 current_process ()->tdesc = x86_linux_read_description ();
1594 }
1595
1596 static int
1597 x86_supports_tracepoints (void)
1598 {
1599 return 1;
1600 }
1601
1602 static void
1603 append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1604 {
1605 write_inferior_memory (*to, buf, len);
1606 *to += len;
1607 }
1608
1609 static int
1610 push_opcode (unsigned char *buf, char *op)
1611 {
1612 unsigned char *buf_org = buf;
1613
1614 while (1)
1615 {
1616 char *endptr;
1617 unsigned long ul = strtoul (op, &endptr, 16);
1618
1619 if (endptr == op)
1620 break;
1621
1622 *buf++ = ul;
1623 op = endptr;
1624 }
1625
1626 return buf - buf_org;
1627 }
1628
1629 #ifdef __x86_64__
1630
1631 /* Build a jump pad that saves registers and calls a collection
1632 function. Writes a jump instruction to the jump pad to
1633 JJUMPAD_INSN. The caller is responsible to write it in at the
1634 tracepoint address. */
1635
1636 static int
1637 amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1638 CORE_ADDR collector,
1639 CORE_ADDR lockaddr,
1640 ULONGEST orig_size,
1641 CORE_ADDR *jump_entry,
1642 CORE_ADDR *trampoline,
1643 ULONGEST *trampoline_size,
1644 unsigned char *jjump_pad_insn,
1645 ULONGEST *jjump_pad_insn_size,
1646 CORE_ADDR *adjusted_insn_addr,
1647 CORE_ADDR *adjusted_insn_addr_end,
1648 char *err)
1649 {
1650 unsigned char buf[40];
1651 int i, offset;
1652 int64_t loffset;
1653
1654 CORE_ADDR buildaddr = *jump_entry;
1655
1656 /* Build the jump pad. */
1657
1658 /* First, do tracepoint data collection. Save registers. */
1659 i = 0;
1660 /* Need to ensure stack pointer saved first. */
1661 buf[i++] = 0x54; /* push %rsp */
1662 buf[i++] = 0x55; /* push %rbp */
1663 buf[i++] = 0x57; /* push %rdi */
1664 buf[i++] = 0x56; /* push %rsi */
1665 buf[i++] = 0x52; /* push %rdx */
1666 buf[i++] = 0x51; /* push %rcx */
1667 buf[i++] = 0x53; /* push %rbx */
1668 buf[i++] = 0x50; /* push %rax */
1669 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1670 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1671 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1672 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1673 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1674 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1675 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1676 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1677 buf[i++] = 0x9c; /* pushfq */
1678 buf[i++] = 0x48; /* movl <addr>,%rdi */
1679 buf[i++] = 0xbf;
1680 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1681 i += sizeof (unsigned long);
1682 buf[i++] = 0x57; /* push %rdi */
1683 append_insns (&buildaddr, i, buf);
1684
1685 /* Stack space for the collecting_t object. */
1686 i = 0;
1687 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1688 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1689 memcpy (buf + i, &tpoint, 8);
1690 i += 8;
1691 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1692 i += push_opcode (&buf[i],
1693 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1694 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1695 append_insns (&buildaddr, i, buf);
1696
1697 /* spin-lock. */
1698 i = 0;
1699 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1700 memcpy (&buf[i], (void *) &lockaddr, 8);
1701 i += 8;
1702 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1703 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1704 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1705 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1706 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1707 append_insns (&buildaddr, i, buf);
1708
1709 /* Set up the gdb_collect call. */
1710 /* At this point, (stack pointer + 0x18) is the base of our saved
1711 register block. */
1712
1713 i = 0;
1714 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1715 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1716
1717 /* tpoint address may be 64-bit wide. */
1718 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1719 memcpy (buf + i, &tpoint, 8);
1720 i += 8;
1721 append_insns (&buildaddr, i, buf);
1722
1723 /* The collector function being in the shared library, may be
1724 >31-bits away off the jump pad. */
1725 i = 0;
1726 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1727 memcpy (buf + i, &collector, 8);
1728 i += 8;
1729 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1730 append_insns (&buildaddr, i, buf);
1731
1732 /* Clear the spin-lock. */
1733 i = 0;
1734 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1735 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1736 memcpy (buf + i, &lockaddr, 8);
1737 i += 8;
1738 append_insns (&buildaddr, i, buf);
1739
1740 /* Remove stack that had been used for the collect_t object. */
1741 i = 0;
1742 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1743 append_insns (&buildaddr, i, buf);
1744
1745 /* Restore register state. */
1746 i = 0;
1747 buf[i++] = 0x48; /* add $0x8,%rsp */
1748 buf[i++] = 0x83;
1749 buf[i++] = 0xc4;
1750 buf[i++] = 0x08;
1751 buf[i++] = 0x9d; /* popfq */
1752 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1753 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1754 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1755 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1756 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1757 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1758 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1759 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1760 buf[i++] = 0x58; /* pop %rax */
1761 buf[i++] = 0x5b; /* pop %rbx */
1762 buf[i++] = 0x59; /* pop %rcx */
1763 buf[i++] = 0x5a; /* pop %rdx */
1764 buf[i++] = 0x5e; /* pop %rsi */
1765 buf[i++] = 0x5f; /* pop %rdi */
1766 buf[i++] = 0x5d; /* pop %rbp */
1767 buf[i++] = 0x5c; /* pop %rsp */
1768 append_insns (&buildaddr, i, buf);
1769
1770 /* Now, adjust the original instruction to execute in the jump
1771 pad. */
1772 *adjusted_insn_addr = buildaddr;
1773 relocate_instruction (&buildaddr, tpaddr);
1774 *adjusted_insn_addr_end = buildaddr;
1775
1776 /* Finally, write a jump back to the program. */
1777
1778 loffset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1779 if (loffset > INT_MAX || loffset < INT_MIN)
1780 {
1781 sprintf (err,
1782 "E.Jump back from jump pad too far from tracepoint "
1783 "(offset 0x%" PRIx64 " > int32).", loffset);
1784 return 1;
1785 }
1786
1787 offset = (int) loffset;
1788 memcpy (buf, jump_insn, sizeof (jump_insn));
1789 memcpy (buf + 1, &offset, 4);
1790 append_insns (&buildaddr, sizeof (jump_insn), buf);
1791
1792 /* The jump pad is now built. Wire in a jump to our jump pad. This
1793 is always done last (by our caller actually), so that we can
1794 install fast tracepoints with threads running. This relies on
1795 the agent's atomic write support. */
1796 loffset = *jump_entry - (tpaddr + sizeof (jump_insn));
1797 if (loffset > INT_MAX || loffset < INT_MIN)
1798 {
1799 sprintf (err,
1800 "E.Jump pad too far from tracepoint "
1801 "(offset 0x%" PRIx64 " > int32).", loffset);
1802 return 1;
1803 }
1804
1805 offset = (int) loffset;
1806
1807 memcpy (buf, jump_insn, sizeof (jump_insn));
1808 memcpy (buf + 1, &offset, 4);
1809 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1810 *jjump_pad_insn_size = sizeof (jump_insn);
1811
1812 /* Return the end address of our pad. */
1813 *jump_entry = buildaddr;
1814
1815 return 0;
1816 }
1817
1818 #endif /* __x86_64__ */
1819
1820 /* Build a jump pad that saves registers and calls a collection
1821 function. Writes a jump instruction to the jump pad to
1822 JJUMPAD_INSN. The caller is responsible to write it in at the
1823 tracepoint address. */
1824
1825 static int
1826 i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1827 CORE_ADDR collector,
1828 CORE_ADDR lockaddr,
1829 ULONGEST orig_size,
1830 CORE_ADDR *jump_entry,
1831 CORE_ADDR *trampoline,
1832 ULONGEST *trampoline_size,
1833 unsigned char *jjump_pad_insn,
1834 ULONGEST *jjump_pad_insn_size,
1835 CORE_ADDR *adjusted_insn_addr,
1836 CORE_ADDR *adjusted_insn_addr_end,
1837 char *err)
1838 {
1839 unsigned char buf[0x100];
1840 int i, offset;
1841 CORE_ADDR buildaddr = *jump_entry;
1842
1843 /* Build the jump pad. */
1844
1845 /* First, do tracepoint data collection. Save registers. */
1846 i = 0;
1847 buf[i++] = 0x60; /* pushad */
1848 buf[i++] = 0x68; /* push tpaddr aka $pc */
1849 *((int *)(buf + i)) = (int) tpaddr;
1850 i += 4;
1851 buf[i++] = 0x9c; /* pushf */
1852 buf[i++] = 0x1e; /* push %ds */
1853 buf[i++] = 0x06; /* push %es */
1854 buf[i++] = 0x0f; /* push %fs */
1855 buf[i++] = 0xa0;
1856 buf[i++] = 0x0f; /* push %gs */
1857 buf[i++] = 0xa8;
1858 buf[i++] = 0x16; /* push %ss */
1859 buf[i++] = 0x0e; /* push %cs */
1860 append_insns (&buildaddr, i, buf);
1861
1862 /* Stack space for the collecting_t object. */
1863 i = 0;
1864 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1865
1866 /* Build the object. */
1867 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1868 memcpy (buf + i, &tpoint, 4);
1869 i += 4;
1870 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1871
1872 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1873 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1874 append_insns (&buildaddr, i, buf);
1875
1876 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1877 If we cared for it, this could be using xchg alternatively. */
1878
1879 i = 0;
1880 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1881 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1882 %esp,<lockaddr> */
1883 memcpy (&buf[i], (void *) &lockaddr, 4);
1884 i += 4;
1885 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1886 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1887 append_insns (&buildaddr, i, buf);
1888
1889
1890 /* Set up arguments to the gdb_collect call. */
1891 i = 0;
1892 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1893 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1894 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1895 append_insns (&buildaddr, i, buf);
1896
1897 i = 0;
1898 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1899 append_insns (&buildaddr, i, buf);
1900
1901 i = 0;
1902 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1903 memcpy (&buf[i], (void *) &tpoint, 4);
1904 i += 4;
1905 append_insns (&buildaddr, i, buf);
1906
1907 buf[0] = 0xe8; /* call <reladdr> */
1908 offset = collector - (buildaddr + sizeof (jump_insn));
1909 memcpy (buf + 1, &offset, 4);
1910 append_insns (&buildaddr, 5, buf);
1911 /* Clean up after the call. */
1912 buf[0] = 0x83; /* add $0x8,%esp */
1913 buf[1] = 0xc4;
1914 buf[2] = 0x08;
1915 append_insns (&buildaddr, 3, buf);
1916
1917
1918 /* Clear the spin-lock. This would need the LOCK prefix on older
1919 broken archs. */
1920 i = 0;
1921 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1922 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1923 memcpy (buf + i, &lockaddr, 4);
1924 i += 4;
1925 append_insns (&buildaddr, i, buf);
1926
1927
1928 /* Remove stack that had been used for the collect_t object. */
1929 i = 0;
1930 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1931 append_insns (&buildaddr, i, buf);
1932
1933 i = 0;
1934 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1935 buf[i++] = 0xc4;
1936 buf[i++] = 0x04;
1937 buf[i++] = 0x17; /* pop %ss */
1938 buf[i++] = 0x0f; /* pop %gs */
1939 buf[i++] = 0xa9;
1940 buf[i++] = 0x0f; /* pop %fs */
1941 buf[i++] = 0xa1;
1942 buf[i++] = 0x07; /* pop %es */
1943 buf[i++] = 0x1f; /* pop %ds */
1944 buf[i++] = 0x9d; /* popf */
1945 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1946 buf[i++] = 0xc4;
1947 buf[i++] = 0x04;
1948 buf[i++] = 0x61; /* popad */
1949 append_insns (&buildaddr, i, buf);
1950
1951 /* Now, adjust the original instruction to execute in the jump
1952 pad. */
1953 *adjusted_insn_addr = buildaddr;
1954 relocate_instruction (&buildaddr, tpaddr);
1955 *adjusted_insn_addr_end = buildaddr;
1956
1957 /* Write the jump back to the program. */
1958 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1959 memcpy (buf, jump_insn, sizeof (jump_insn));
1960 memcpy (buf + 1, &offset, 4);
1961 append_insns (&buildaddr, sizeof (jump_insn), buf);
1962
1963 /* The jump pad is now built. Wire in a jump to our jump pad. This
1964 is always done last (by our caller actually), so that we can
1965 install fast tracepoints with threads running. This relies on
1966 the agent's atomic write support. */
1967 if (orig_size == 4)
1968 {
1969 /* Create a trampoline. */
1970 *trampoline_size = sizeof (jump_insn);
1971 if (!claim_trampoline_space (*trampoline_size, trampoline))
1972 {
1973 /* No trampoline space available. */
1974 strcpy (err,
1975 "E.Cannot allocate trampoline space needed for fast "
1976 "tracepoints on 4-byte instructions.");
1977 return 1;
1978 }
1979
1980 offset = *jump_entry - (*trampoline + sizeof (jump_insn));
1981 memcpy (buf, jump_insn, sizeof (jump_insn));
1982 memcpy (buf + 1, &offset, 4);
1983 write_inferior_memory (*trampoline, buf, sizeof (jump_insn));
1984
1985 /* Use a 16-bit relative jump instruction to jump to the trampoline. */
1986 offset = (*trampoline - (tpaddr + sizeof (small_jump_insn))) & 0xffff;
1987 memcpy (buf, small_jump_insn, sizeof (small_jump_insn));
1988 memcpy (buf + 2, &offset, 2);
1989 memcpy (jjump_pad_insn, buf, sizeof (small_jump_insn));
1990 *jjump_pad_insn_size = sizeof (small_jump_insn);
1991 }
1992 else
1993 {
1994 /* Else use a 32-bit relative jump instruction. */
1995 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1996 memcpy (buf, jump_insn, sizeof (jump_insn));
1997 memcpy (buf + 1, &offset, 4);
1998 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1999 *jjump_pad_insn_size = sizeof (jump_insn);
2000 }
2001
2002 /* Return the end address of our pad. */
2003 *jump_entry = buildaddr;
2004
2005 return 0;
2006 }
2007
2008 static int
2009 x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
2010 CORE_ADDR collector,
2011 CORE_ADDR lockaddr,
2012 ULONGEST orig_size,
2013 CORE_ADDR *jump_entry,
2014 CORE_ADDR *trampoline,
2015 ULONGEST *trampoline_size,
2016 unsigned char *jjump_pad_insn,
2017 ULONGEST *jjump_pad_insn_size,
2018 CORE_ADDR *adjusted_insn_addr,
2019 CORE_ADDR *adjusted_insn_addr_end,
2020 char *err)
2021 {
2022 #ifdef __x86_64__
2023 if (is_64bit_tdesc ())
2024 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2025 collector, lockaddr,
2026 orig_size, jump_entry,
2027 trampoline, trampoline_size,
2028 jjump_pad_insn,
2029 jjump_pad_insn_size,
2030 adjusted_insn_addr,
2031 adjusted_insn_addr_end,
2032 err);
2033 #endif
2034
2035 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
2036 collector, lockaddr,
2037 orig_size, jump_entry,
2038 trampoline, trampoline_size,
2039 jjump_pad_insn,
2040 jjump_pad_insn_size,
2041 adjusted_insn_addr,
2042 adjusted_insn_addr_end,
2043 err);
2044 }
2045
2046 /* Return the minimum instruction length for fast tracepoints on x86/x86-64
2047 architectures. */
2048
2049 static int
2050 x86_get_min_fast_tracepoint_insn_len (void)
2051 {
2052 static int warned_about_fast_tracepoints = 0;
2053
2054 #ifdef __x86_64__
2055 /* On x86-64, 5-byte jump instructions with a 4-byte offset are always
2056 used for fast tracepoints. */
2057 if (is_64bit_tdesc ())
2058 return 5;
2059 #endif
2060
2061 if (agent_loaded_p ())
2062 {
2063 char errbuf[IPA_BUFSIZ];
2064
2065 errbuf[0] = '\0';
2066
2067 /* On x86, if trampolines are available, then 4-byte jump instructions
2068 with a 2-byte offset may be used, otherwise 5-byte jump instructions
2069 with a 4-byte offset are used instead. */
2070 if (have_fast_tracepoint_trampoline_buffer (errbuf))
2071 return 4;
2072 else
2073 {
2074 /* GDB has no channel to explain to user why a shorter fast
2075 tracepoint is not possible, but at least make GDBserver
2076 mention that something has gone awry. */
2077 if (!warned_about_fast_tracepoints)
2078 {
2079 warning ("4-byte fast tracepoints not available; %s\n", errbuf);
2080 warned_about_fast_tracepoints = 1;
2081 }
2082 return 5;
2083 }
2084 }
2085 else
2086 {
2087 /* Indicate that the minimum length is currently unknown since the IPA
2088 has not loaded yet. */
2089 return 0;
2090 }
2091 }
2092
2093 static void
2094 add_insns (unsigned char *start, int len)
2095 {
2096 CORE_ADDR buildaddr = current_insn_ptr;
2097
2098 if (debug_threads)
2099 debug_printf ("Adding %d bytes of insn at %s\n",
2100 len, paddress (buildaddr));
2101
2102 append_insns (&buildaddr, len, start);
2103 current_insn_ptr = buildaddr;
2104 }
2105
2106 /* Our general strategy for emitting code is to avoid specifying raw
2107 bytes whenever possible, and instead copy a block of inline asm
2108 that is embedded in the function. This is a little messy, because
2109 we need to keep the compiler from discarding what looks like dead
2110 code, plus suppress various warnings. */
2111
2112 #define EMIT_ASM(NAME, INSNS) \
2113 do \
2114 { \
2115 extern unsigned char start_ ## NAME, end_ ## NAME; \
2116 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2117 __asm__ ("jmp end_" #NAME "\n" \
2118 "\t" "start_" #NAME ":" \
2119 "\t" INSNS "\n" \
2120 "\t" "end_" #NAME ":"); \
2121 } while (0)
2122
2123 #ifdef __x86_64__
2124
2125 #define EMIT_ASM32(NAME,INSNS) \
2126 do \
2127 { \
2128 extern unsigned char start_ ## NAME, end_ ## NAME; \
2129 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
2130 __asm__ (".code32\n" \
2131 "\t" "jmp end_" #NAME "\n" \
2132 "\t" "start_" #NAME ":\n" \
2133 "\t" INSNS "\n" \
2134 "\t" "end_" #NAME ":\n" \
2135 ".code64\n"); \
2136 } while (0)
2137
2138 #else
2139
2140 #define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
2141
2142 #endif
2143
2144 #ifdef __x86_64__
2145
2146 static void
2147 amd64_emit_prologue (void)
2148 {
2149 EMIT_ASM (amd64_prologue,
2150 "pushq %rbp\n\t"
2151 "movq %rsp,%rbp\n\t"
2152 "sub $0x20,%rsp\n\t"
2153 "movq %rdi,-8(%rbp)\n\t"
2154 "movq %rsi,-16(%rbp)");
2155 }
2156
2157
2158 static void
2159 amd64_emit_epilogue (void)
2160 {
2161 EMIT_ASM (amd64_epilogue,
2162 "movq -16(%rbp),%rdi\n\t"
2163 "movq %rax,(%rdi)\n\t"
2164 "xor %rax,%rax\n\t"
2165 "leave\n\t"
2166 "ret");
2167 }
2168
2169 static void
2170 amd64_emit_add (void)
2171 {
2172 EMIT_ASM (amd64_add,
2173 "add (%rsp),%rax\n\t"
2174 "lea 0x8(%rsp),%rsp");
2175 }
2176
2177 static void
2178 amd64_emit_sub (void)
2179 {
2180 EMIT_ASM (amd64_sub,
2181 "sub %rax,(%rsp)\n\t"
2182 "pop %rax");
2183 }
2184
2185 static void
2186 amd64_emit_mul (void)
2187 {
2188 emit_error = 1;
2189 }
2190
2191 static void
2192 amd64_emit_lsh (void)
2193 {
2194 emit_error = 1;
2195 }
2196
2197 static void
2198 amd64_emit_rsh_signed (void)
2199 {
2200 emit_error = 1;
2201 }
2202
2203 static void
2204 amd64_emit_rsh_unsigned (void)
2205 {
2206 emit_error = 1;
2207 }
2208
2209 static void
2210 amd64_emit_ext (int arg)
2211 {
2212 switch (arg)
2213 {
2214 case 8:
2215 EMIT_ASM (amd64_ext_8,
2216 "cbtw\n\t"
2217 "cwtl\n\t"
2218 "cltq");
2219 break;
2220 case 16:
2221 EMIT_ASM (amd64_ext_16,
2222 "cwtl\n\t"
2223 "cltq");
2224 break;
2225 case 32:
2226 EMIT_ASM (amd64_ext_32,
2227 "cltq");
2228 break;
2229 default:
2230 emit_error = 1;
2231 }
2232 }
2233
2234 static void
2235 amd64_emit_log_not (void)
2236 {
2237 EMIT_ASM (amd64_log_not,
2238 "test %rax,%rax\n\t"
2239 "sete %cl\n\t"
2240 "movzbq %cl,%rax");
2241 }
2242
2243 static void
2244 amd64_emit_bit_and (void)
2245 {
2246 EMIT_ASM (amd64_and,
2247 "and (%rsp),%rax\n\t"
2248 "lea 0x8(%rsp),%rsp");
2249 }
2250
2251 static void
2252 amd64_emit_bit_or (void)
2253 {
2254 EMIT_ASM (amd64_or,
2255 "or (%rsp),%rax\n\t"
2256 "lea 0x8(%rsp),%rsp");
2257 }
2258
2259 static void
2260 amd64_emit_bit_xor (void)
2261 {
2262 EMIT_ASM (amd64_xor,
2263 "xor (%rsp),%rax\n\t"
2264 "lea 0x8(%rsp),%rsp");
2265 }
2266
2267 static void
2268 amd64_emit_bit_not (void)
2269 {
2270 EMIT_ASM (amd64_bit_not,
2271 "xorq $0xffffffffffffffff,%rax");
2272 }
2273
2274 static void
2275 amd64_emit_equal (void)
2276 {
2277 EMIT_ASM (amd64_equal,
2278 "cmp %rax,(%rsp)\n\t"
2279 "je .Lamd64_equal_true\n\t"
2280 "xor %rax,%rax\n\t"
2281 "jmp .Lamd64_equal_end\n\t"
2282 ".Lamd64_equal_true:\n\t"
2283 "mov $0x1,%rax\n\t"
2284 ".Lamd64_equal_end:\n\t"
2285 "lea 0x8(%rsp),%rsp");
2286 }
2287
2288 static void
2289 amd64_emit_less_signed (void)
2290 {
2291 EMIT_ASM (amd64_less_signed,
2292 "cmp %rax,(%rsp)\n\t"
2293 "jl .Lamd64_less_signed_true\n\t"
2294 "xor %rax,%rax\n\t"
2295 "jmp .Lamd64_less_signed_end\n\t"
2296 ".Lamd64_less_signed_true:\n\t"
2297 "mov $1,%rax\n\t"
2298 ".Lamd64_less_signed_end:\n\t"
2299 "lea 0x8(%rsp),%rsp");
2300 }
2301
2302 static void
2303 amd64_emit_less_unsigned (void)
2304 {
2305 EMIT_ASM (amd64_less_unsigned,
2306 "cmp %rax,(%rsp)\n\t"
2307 "jb .Lamd64_less_unsigned_true\n\t"
2308 "xor %rax,%rax\n\t"
2309 "jmp .Lamd64_less_unsigned_end\n\t"
2310 ".Lamd64_less_unsigned_true:\n\t"
2311 "mov $1,%rax\n\t"
2312 ".Lamd64_less_unsigned_end:\n\t"
2313 "lea 0x8(%rsp),%rsp");
2314 }
2315
2316 static void
2317 amd64_emit_ref (int size)
2318 {
2319 switch (size)
2320 {
2321 case 1:
2322 EMIT_ASM (amd64_ref1,
2323 "movb (%rax),%al");
2324 break;
2325 case 2:
2326 EMIT_ASM (amd64_ref2,
2327 "movw (%rax),%ax");
2328 break;
2329 case 4:
2330 EMIT_ASM (amd64_ref4,
2331 "movl (%rax),%eax");
2332 break;
2333 case 8:
2334 EMIT_ASM (amd64_ref8,
2335 "movq (%rax),%rax");
2336 break;
2337 }
2338 }
2339
2340 static void
2341 amd64_emit_if_goto (int *offset_p, int *size_p)
2342 {
2343 EMIT_ASM (amd64_if_goto,
2344 "mov %rax,%rcx\n\t"
2345 "pop %rax\n\t"
2346 "cmp $0,%rcx\n\t"
2347 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2348 if (offset_p)
2349 *offset_p = 10;
2350 if (size_p)
2351 *size_p = 4;
2352 }
2353
2354 static void
2355 amd64_emit_goto (int *offset_p, int *size_p)
2356 {
2357 EMIT_ASM (amd64_goto,
2358 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2359 if (offset_p)
2360 *offset_p = 1;
2361 if (size_p)
2362 *size_p = 4;
2363 }
2364
2365 static void
2366 amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2367 {
2368 int diff = (to - (from + size));
2369 unsigned char buf[sizeof (int)];
2370
2371 if (size != 4)
2372 {
2373 emit_error = 1;
2374 return;
2375 }
2376
2377 memcpy (buf, &diff, sizeof (int));
2378 write_inferior_memory (from, buf, sizeof (int));
2379 }
2380
2381 static void
2382 amd64_emit_const (LONGEST num)
2383 {
2384 unsigned char buf[16];
2385 int i;
2386 CORE_ADDR buildaddr = current_insn_ptr;
2387
2388 i = 0;
2389 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
2390 memcpy (&buf[i], &num, sizeof (num));
2391 i += 8;
2392 append_insns (&buildaddr, i, buf);
2393 current_insn_ptr = buildaddr;
2394 }
2395
2396 static void
2397 amd64_emit_call (CORE_ADDR fn)
2398 {
2399 unsigned char buf[16];
2400 int i;
2401 CORE_ADDR buildaddr;
2402 LONGEST offset64;
2403
2404 /* The destination function being in the shared library, may be
2405 >31-bits away off the compiled code pad. */
2406
2407 buildaddr = current_insn_ptr;
2408
2409 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
2410
2411 i = 0;
2412
2413 if (offset64 > INT_MAX || offset64 < INT_MIN)
2414 {
2415 /* Offset is too large for a call. Use callq, but that requires
2416 a register, so avoid it if possible. Use r10, since it is
2417 call-clobbered, we don't have to push/pop it. */
2418 buf[i++] = 0x48; /* mov $fn,%r10 */
2419 buf[i++] = 0xba;
2420 memcpy (buf + i, &fn, 8);
2421 i += 8;
2422 buf[i++] = 0xff; /* callq *%r10 */
2423 buf[i++] = 0xd2;
2424 }
2425 else
2426 {
2427 int offset32 = offset64; /* we know we can't overflow here. */
2428 memcpy (buf + i, &offset32, 4);
2429 i += 4;
2430 }
2431
2432 append_insns (&buildaddr, i, buf);
2433 current_insn_ptr = buildaddr;
2434 }
2435
2436 static void
2437 amd64_emit_reg (int reg)
2438 {
2439 unsigned char buf[16];
2440 int i;
2441 CORE_ADDR buildaddr;
2442
2443 /* Assume raw_regs is still in %rdi. */
2444 buildaddr = current_insn_ptr;
2445 i = 0;
2446 buf[i++] = 0xbe; /* mov $<n>,%esi */
2447 memcpy (&buf[i], &reg, sizeof (reg));
2448 i += 4;
2449 append_insns (&buildaddr, i, buf);
2450 current_insn_ptr = buildaddr;
2451 amd64_emit_call (get_raw_reg_func_addr ());
2452 }
2453
2454 static void
2455 amd64_emit_pop (void)
2456 {
2457 EMIT_ASM (amd64_pop,
2458 "pop %rax");
2459 }
2460
2461 static void
2462 amd64_emit_stack_flush (void)
2463 {
2464 EMIT_ASM (amd64_stack_flush,
2465 "push %rax");
2466 }
2467
2468 static void
2469 amd64_emit_zero_ext (int arg)
2470 {
2471 switch (arg)
2472 {
2473 case 8:
2474 EMIT_ASM (amd64_zero_ext_8,
2475 "and $0xff,%rax");
2476 break;
2477 case 16:
2478 EMIT_ASM (amd64_zero_ext_16,
2479 "and $0xffff,%rax");
2480 break;
2481 case 32:
2482 EMIT_ASM (amd64_zero_ext_32,
2483 "mov $0xffffffff,%rcx\n\t"
2484 "and %rcx,%rax");
2485 break;
2486 default:
2487 emit_error = 1;
2488 }
2489 }
2490
2491 static void
2492 amd64_emit_swap (void)
2493 {
2494 EMIT_ASM (amd64_swap,
2495 "mov %rax,%rcx\n\t"
2496 "pop %rax\n\t"
2497 "push %rcx");
2498 }
2499
2500 static void
2501 amd64_emit_stack_adjust (int n)
2502 {
2503 unsigned char buf[16];
2504 int i;
2505 CORE_ADDR buildaddr = current_insn_ptr;
2506
2507 i = 0;
2508 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
2509 buf[i++] = 0x8d;
2510 buf[i++] = 0x64;
2511 buf[i++] = 0x24;
2512 /* This only handles adjustments up to 16, but we don't expect any more. */
2513 buf[i++] = n * 8;
2514 append_insns (&buildaddr, i, buf);
2515 current_insn_ptr = buildaddr;
2516 }
2517
2518 /* FN's prototype is `LONGEST(*fn)(int)'. */
2519
2520 static void
2521 amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
2522 {
2523 unsigned char buf[16];
2524 int i;
2525 CORE_ADDR buildaddr;
2526
2527 buildaddr = current_insn_ptr;
2528 i = 0;
2529 buf[i++] = 0xbf; /* movl $<n>,%edi */
2530 memcpy (&buf[i], &arg1, sizeof (arg1));
2531 i += 4;
2532 append_insns (&buildaddr, i, buf);
2533 current_insn_ptr = buildaddr;
2534 amd64_emit_call (fn);
2535 }
2536
2537 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
2538
2539 static void
2540 amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
2541 {
2542 unsigned char buf[16];
2543 int i;
2544 CORE_ADDR buildaddr;
2545
2546 buildaddr = current_insn_ptr;
2547 i = 0;
2548 buf[i++] = 0xbf; /* movl $<n>,%edi */
2549 memcpy (&buf[i], &arg1, sizeof (arg1));
2550 i += 4;
2551 append_insns (&buildaddr, i, buf);
2552 current_insn_ptr = buildaddr;
2553 EMIT_ASM (amd64_void_call_2_a,
2554 /* Save away a copy of the stack top. */
2555 "push %rax\n\t"
2556 /* Also pass top as the second argument. */
2557 "mov %rax,%rsi");
2558 amd64_emit_call (fn);
2559 EMIT_ASM (amd64_void_call_2_b,
2560 /* Restore the stack top, %rax may have been trashed. */
2561 "pop %rax");
2562 }
2563
2564 void
2565 amd64_emit_eq_goto (int *offset_p, int *size_p)
2566 {
2567 EMIT_ASM (amd64_eq,
2568 "cmp %rax,(%rsp)\n\t"
2569 "jne .Lamd64_eq_fallthru\n\t"
2570 "lea 0x8(%rsp),%rsp\n\t"
2571 "pop %rax\n\t"
2572 /* jmp, but don't trust the assembler to choose the right jump */
2573 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2574 ".Lamd64_eq_fallthru:\n\t"
2575 "lea 0x8(%rsp),%rsp\n\t"
2576 "pop %rax");
2577
2578 if (offset_p)
2579 *offset_p = 13;
2580 if (size_p)
2581 *size_p = 4;
2582 }
2583
2584 void
2585 amd64_emit_ne_goto (int *offset_p, int *size_p)
2586 {
2587 EMIT_ASM (amd64_ne,
2588 "cmp %rax,(%rsp)\n\t"
2589 "je .Lamd64_ne_fallthru\n\t"
2590 "lea 0x8(%rsp),%rsp\n\t"
2591 "pop %rax\n\t"
2592 /* jmp, but don't trust the assembler to choose the right jump */
2593 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2594 ".Lamd64_ne_fallthru:\n\t"
2595 "lea 0x8(%rsp),%rsp\n\t"
2596 "pop %rax");
2597
2598 if (offset_p)
2599 *offset_p = 13;
2600 if (size_p)
2601 *size_p = 4;
2602 }
2603
2604 void
2605 amd64_emit_lt_goto (int *offset_p, int *size_p)
2606 {
2607 EMIT_ASM (amd64_lt,
2608 "cmp %rax,(%rsp)\n\t"
2609 "jnl .Lamd64_lt_fallthru\n\t"
2610 "lea 0x8(%rsp),%rsp\n\t"
2611 "pop %rax\n\t"
2612 /* jmp, but don't trust the assembler to choose the right jump */
2613 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2614 ".Lamd64_lt_fallthru:\n\t"
2615 "lea 0x8(%rsp),%rsp\n\t"
2616 "pop %rax");
2617
2618 if (offset_p)
2619 *offset_p = 13;
2620 if (size_p)
2621 *size_p = 4;
2622 }
2623
2624 void
2625 amd64_emit_le_goto (int *offset_p, int *size_p)
2626 {
2627 EMIT_ASM (amd64_le,
2628 "cmp %rax,(%rsp)\n\t"
2629 "jnle .Lamd64_le_fallthru\n\t"
2630 "lea 0x8(%rsp),%rsp\n\t"
2631 "pop %rax\n\t"
2632 /* jmp, but don't trust the assembler to choose the right jump */
2633 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2634 ".Lamd64_le_fallthru:\n\t"
2635 "lea 0x8(%rsp),%rsp\n\t"
2636 "pop %rax");
2637
2638 if (offset_p)
2639 *offset_p = 13;
2640 if (size_p)
2641 *size_p = 4;
2642 }
2643
2644 void
2645 amd64_emit_gt_goto (int *offset_p, int *size_p)
2646 {
2647 EMIT_ASM (amd64_gt,
2648 "cmp %rax,(%rsp)\n\t"
2649 "jng .Lamd64_gt_fallthru\n\t"
2650 "lea 0x8(%rsp),%rsp\n\t"
2651 "pop %rax\n\t"
2652 /* jmp, but don't trust the assembler to choose the right jump */
2653 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2654 ".Lamd64_gt_fallthru:\n\t"
2655 "lea 0x8(%rsp),%rsp\n\t"
2656 "pop %rax");
2657
2658 if (offset_p)
2659 *offset_p = 13;
2660 if (size_p)
2661 *size_p = 4;
2662 }
2663
2664 void
2665 amd64_emit_ge_goto (int *offset_p, int *size_p)
2666 {
2667 EMIT_ASM (amd64_ge,
2668 "cmp %rax,(%rsp)\n\t"
2669 "jnge .Lamd64_ge_fallthru\n\t"
2670 ".Lamd64_ge_jump:\n\t"
2671 "lea 0x8(%rsp),%rsp\n\t"
2672 "pop %rax\n\t"
2673 /* jmp, but don't trust the assembler to choose the right jump */
2674 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
2675 ".Lamd64_ge_fallthru:\n\t"
2676 "lea 0x8(%rsp),%rsp\n\t"
2677 "pop %rax");
2678
2679 if (offset_p)
2680 *offset_p = 13;
2681 if (size_p)
2682 *size_p = 4;
2683 }
2684
2685 struct emit_ops amd64_emit_ops =
2686 {
2687 amd64_emit_prologue,
2688 amd64_emit_epilogue,
2689 amd64_emit_add,
2690 amd64_emit_sub,
2691 amd64_emit_mul,
2692 amd64_emit_lsh,
2693 amd64_emit_rsh_signed,
2694 amd64_emit_rsh_unsigned,
2695 amd64_emit_ext,
2696 amd64_emit_log_not,
2697 amd64_emit_bit_and,
2698 amd64_emit_bit_or,
2699 amd64_emit_bit_xor,
2700 amd64_emit_bit_not,
2701 amd64_emit_equal,
2702 amd64_emit_less_signed,
2703 amd64_emit_less_unsigned,
2704 amd64_emit_ref,
2705 amd64_emit_if_goto,
2706 amd64_emit_goto,
2707 amd64_write_goto_address,
2708 amd64_emit_const,
2709 amd64_emit_call,
2710 amd64_emit_reg,
2711 amd64_emit_pop,
2712 amd64_emit_stack_flush,
2713 amd64_emit_zero_ext,
2714 amd64_emit_swap,
2715 amd64_emit_stack_adjust,
2716 amd64_emit_int_call_1,
2717 amd64_emit_void_call_2,
2718 amd64_emit_eq_goto,
2719 amd64_emit_ne_goto,
2720 amd64_emit_lt_goto,
2721 amd64_emit_le_goto,
2722 amd64_emit_gt_goto,
2723 amd64_emit_ge_goto
2724 };
2725
2726 #endif /* __x86_64__ */
2727
2728 static void
2729 i386_emit_prologue (void)
2730 {
2731 EMIT_ASM32 (i386_prologue,
2732 "push %ebp\n\t"
2733 "mov %esp,%ebp\n\t"
2734 "push %ebx");
2735 /* At this point, the raw regs base address is at 8(%ebp), and the
2736 value pointer is at 12(%ebp). */
2737 }
2738
2739 static void
2740 i386_emit_epilogue (void)
2741 {
2742 EMIT_ASM32 (i386_epilogue,
2743 "mov 12(%ebp),%ecx\n\t"
2744 "mov %eax,(%ecx)\n\t"
2745 "mov %ebx,0x4(%ecx)\n\t"
2746 "xor %eax,%eax\n\t"
2747 "pop %ebx\n\t"
2748 "pop %ebp\n\t"
2749 "ret");
2750 }
2751
2752 static void
2753 i386_emit_add (void)
2754 {
2755 EMIT_ASM32 (i386_add,
2756 "add (%esp),%eax\n\t"
2757 "adc 0x4(%esp),%ebx\n\t"
2758 "lea 0x8(%esp),%esp");
2759 }
2760
2761 static void
2762 i386_emit_sub (void)
2763 {
2764 EMIT_ASM32 (i386_sub,
2765 "subl %eax,(%esp)\n\t"
2766 "sbbl %ebx,4(%esp)\n\t"
2767 "pop %eax\n\t"
2768 "pop %ebx\n\t");
2769 }
2770
2771 static void
2772 i386_emit_mul (void)
2773 {
2774 emit_error = 1;
2775 }
2776
2777 static void
2778 i386_emit_lsh (void)
2779 {
2780 emit_error = 1;
2781 }
2782
2783 static void
2784 i386_emit_rsh_signed (void)
2785 {
2786 emit_error = 1;
2787 }
2788
2789 static void
2790 i386_emit_rsh_unsigned (void)
2791 {
2792 emit_error = 1;
2793 }
2794
2795 static void
2796 i386_emit_ext (int arg)
2797 {
2798 switch (arg)
2799 {
2800 case 8:
2801 EMIT_ASM32 (i386_ext_8,
2802 "cbtw\n\t"
2803 "cwtl\n\t"
2804 "movl %eax,%ebx\n\t"
2805 "sarl $31,%ebx");
2806 break;
2807 case 16:
2808 EMIT_ASM32 (i386_ext_16,
2809 "cwtl\n\t"
2810 "movl %eax,%ebx\n\t"
2811 "sarl $31,%ebx");
2812 break;
2813 case 32:
2814 EMIT_ASM32 (i386_ext_32,
2815 "movl %eax,%ebx\n\t"
2816 "sarl $31,%ebx");
2817 break;
2818 default:
2819 emit_error = 1;
2820 }
2821 }
2822
2823 static void
2824 i386_emit_log_not (void)
2825 {
2826 EMIT_ASM32 (i386_log_not,
2827 "or %ebx,%eax\n\t"
2828 "test %eax,%eax\n\t"
2829 "sete %cl\n\t"
2830 "xor %ebx,%ebx\n\t"
2831 "movzbl %cl,%eax");
2832 }
2833
2834 static void
2835 i386_emit_bit_and (void)
2836 {
2837 EMIT_ASM32 (i386_and,
2838 "and (%esp),%eax\n\t"
2839 "and 0x4(%esp),%ebx\n\t"
2840 "lea 0x8(%esp),%esp");
2841 }
2842
2843 static void
2844 i386_emit_bit_or (void)
2845 {
2846 EMIT_ASM32 (i386_or,
2847 "or (%esp),%eax\n\t"
2848 "or 0x4(%esp),%ebx\n\t"
2849 "lea 0x8(%esp),%esp");
2850 }
2851
2852 static void
2853 i386_emit_bit_xor (void)
2854 {
2855 EMIT_ASM32 (i386_xor,
2856 "xor (%esp),%eax\n\t"
2857 "xor 0x4(%esp),%ebx\n\t"
2858 "lea 0x8(%esp),%esp");
2859 }
2860
2861 static void
2862 i386_emit_bit_not (void)
2863 {
2864 EMIT_ASM32 (i386_bit_not,
2865 "xor $0xffffffff,%eax\n\t"
2866 "xor $0xffffffff,%ebx\n\t");
2867 }
2868
2869 static void
2870 i386_emit_equal (void)
2871 {
2872 EMIT_ASM32 (i386_equal,
2873 "cmpl %ebx,4(%esp)\n\t"
2874 "jne .Li386_equal_false\n\t"
2875 "cmpl %eax,(%esp)\n\t"
2876 "je .Li386_equal_true\n\t"
2877 ".Li386_equal_false:\n\t"
2878 "xor %eax,%eax\n\t"
2879 "jmp .Li386_equal_end\n\t"
2880 ".Li386_equal_true:\n\t"
2881 "mov $1,%eax\n\t"
2882 ".Li386_equal_end:\n\t"
2883 "xor %ebx,%ebx\n\t"
2884 "lea 0x8(%esp),%esp");
2885 }
2886
2887 static void
2888 i386_emit_less_signed (void)
2889 {
2890 EMIT_ASM32 (i386_less_signed,
2891 "cmpl %ebx,4(%esp)\n\t"
2892 "jl .Li386_less_signed_true\n\t"
2893 "jne .Li386_less_signed_false\n\t"
2894 "cmpl %eax,(%esp)\n\t"
2895 "jl .Li386_less_signed_true\n\t"
2896 ".Li386_less_signed_false:\n\t"
2897 "xor %eax,%eax\n\t"
2898 "jmp .Li386_less_signed_end\n\t"
2899 ".Li386_less_signed_true:\n\t"
2900 "mov $1,%eax\n\t"
2901 ".Li386_less_signed_end:\n\t"
2902 "xor %ebx,%ebx\n\t"
2903 "lea 0x8(%esp),%esp");
2904 }
2905
2906 static void
2907 i386_emit_less_unsigned (void)
2908 {
2909 EMIT_ASM32 (i386_less_unsigned,
2910 "cmpl %ebx,4(%esp)\n\t"
2911 "jb .Li386_less_unsigned_true\n\t"
2912 "jne .Li386_less_unsigned_false\n\t"
2913 "cmpl %eax,(%esp)\n\t"
2914 "jb .Li386_less_unsigned_true\n\t"
2915 ".Li386_less_unsigned_false:\n\t"
2916 "xor %eax,%eax\n\t"
2917 "jmp .Li386_less_unsigned_end\n\t"
2918 ".Li386_less_unsigned_true:\n\t"
2919 "mov $1,%eax\n\t"
2920 ".Li386_less_unsigned_end:\n\t"
2921 "xor %ebx,%ebx\n\t"
2922 "lea 0x8(%esp),%esp");
2923 }
2924
2925 static void
2926 i386_emit_ref (int size)
2927 {
2928 switch (size)
2929 {
2930 case 1:
2931 EMIT_ASM32 (i386_ref1,
2932 "movb (%eax),%al");
2933 break;
2934 case 2:
2935 EMIT_ASM32 (i386_ref2,
2936 "movw (%eax),%ax");
2937 break;
2938 case 4:
2939 EMIT_ASM32 (i386_ref4,
2940 "movl (%eax),%eax");
2941 break;
2942 case 8:
2943 EMIT_ASM32 (i386_ref8,
2944 "movl 4(%eax),%ebx\n\t"
2945 "movl (%eax),%eax");
2946 break;
2947 }
2948 }
2949
2950 static void
2951 i386_emit_if_goto (int *offset_p, int *size_p)
2952 {
2953 EMIT_ASM32 (i386_if_goto,
2954 "mov %eax,%ecx\n\t"
2955 "or %ebx,%ecx\n\t"
2956 "pop %eax\n\t"
2957 "pop %ebx\n\t"
2958 "cmpl $0,%ecx\n\t"
2959 /* Don't trust the assembler to choose the right jump */
2960 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2961
2962 if (offset_p)
2963 *offset_p = 11; /* be sure that this matches the sequence above */
2964 if (size_p)
2965 *size_p = 4;
2966 }
2967
2968 static void
2969 i386_emit_goto (int *offset_p, int *size_p)
2970 {
2971 EMIT_ASM32 (i386_goto,
2972 /* Don't trust the assembler to choose the right jump */
2973 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2974 if (offset_p)
2975 *offset_p = 1;
2976 if (size_p)
2977 *size_p = 4;
2978 }
2979
2980 static void
2981 i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2982 {
2983 int diff = (to - (from + size));
2984 unsigned char buf[sizeof (int)];
2985
2986 /* We're only doing 4-byte sizes at the moment. */
2987 if (size != 4)
2988 {
2989 emit_error = 1;
2990 return;
2991 }
2992
2993 memcpy (buf, &diff, sizeof (int));
2994 write_inferior_memory (from, buf, sizeof (int));
2995 }
2996
2997 static void
2998 i386_emit_const (LONGEST num)
2999 {
3000 unsigned char buf[16];
3001 int i, hi, lo;
3002 CORE_ADDR buildaddr = current_insn_ptr;
3003
3004 i = 0;
3005 buf[i++] = 0xb8; /* mov $<n>,%eax */
3006 lo = num & 0xffffffff;
3007 memcpy (&buf[i], &lo, sizeof (lo));
3008 i += 4;
3009 hi = ((num >> 32) & 0xffffffff);
3010 if (hi)
3011 {
3012 buf[i++] = 0xbb; /* mov $<n>,%ebx */
3013 memcpy (&buf[i], &hi, sizeof (hi));
3014 i += 4;
3015 }
3016 else
3017 {
3018 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
3019 }
3020 append_insns (&buildaddr, i, buf);
3021 current_insn_ptr = buildaddr;
3022 }
3023
3024 static void
3025 i386_emit_call (CORE_ADDR fn)
3026 {
3027 unsigned char buf[16];
3028 int i, offset;
3029 CORE_ADDR buildaddr;
3030
3031 buildaddr = current_insn_ptr;
3032 i = 0;
3033 buf[i++] = 0xe8; /* call <reladdr> */
3034 offset = ((int) fn) - (buildaddr + 5);
3035 memcpy (buf + 1, &offset, 4);
3036 append_insns (&buildaddr, 5, buf);
3037 current_insn_ptr = buildaddr;
3038 }
3039
3040 static void
3041 i386_emit_reg (int reg)
3042 {
3043 unsigned char buf[16];
3044 int i;
3045 CORE_ADDR buildaddr;
3046
3047 EMIT_ASM32 (i386_reg_a,
3048 "sub $0x8,%esp");
3049 buildaddr = current_insn_ptr;
3050 i = 0;
3051 buf[i++] = 0xb8; /* mov $<n>,%eax */
3052 memcpy (&buf[i], &reg, sizeof (reg));
3053 i += 4;
3054 append_insns (&buildaddr, i, buf);
3055 current_insn_ptr = buildaddr;
3056 EMIT_ASM32 (i386_reg_b,
3057 "mov %eax,4(%esp)\n\t"
3058 "mov 8(%ebp),%eax\n\t"
3059 "mov %eax,(%esp)");
3060 i386_emit_call (get_raw_reg_func_addr ());
3061 EMIT_ASM32 (i386_reg_c,
3062 "xor %ebx,%ebx\n\t"
3063 "lea 0x8(%esp),%esp");
3064 }
3065
3066 static void
3067 i386_emit_pop (void)
3068 {
3069 EMIT_ASM32 (i386_pop,
3070 "pop %eax\n\t"
3071 "pop %ebx");
3072 }
3073
3074 static void
3075 i386_emit_stack_flush (void)
3076 {
3077 EMIT_ASM32 (i386_stack_flush,
3078 "push %ebx\n\t"
3079 "push %eax");
3080 }
3081
3082 static void
3083 i386_emit_zero_ext (int arg)
3084 {
3085 switch (arg)
3086 {
3087 case 8:
3088 EMIT_ASM32 (i386_zero_ext_8,
3089 "and $0xff,%eax\n\t"
3090 "xor %ebx,%ebx");
3091 break;
3092 case 16:
3093 EMIT_ASM32 (i386_zero_ext_16,
3094 "and $0xffff,%eax\n\t"
3095 "xor %ebx,%ebx");
3096 break;
3097 case 32:
3098 EMIT_ASM32 (i386_zero_ext_32,
3099 "xor %ebx,%ebx");
3100 break;
3101 default:
3102 emit_error = 1;
3103 }
3104 }
3105
3106 static void
3107 i386_emit_swap (void)
3108 {
3109 EMIT_ASM32 (i386_swap,
3110 "mov %eax,%ecx\n\t"
3111 "mov %ebx,%edx\n\t"
3112 "pop %eax\n\t"
3113 "pop %ebx\n\t"
3114 "push %edx\n\t"
3115 "push %ecx");
3116 }
3117
3118 static void
3119 i386_emit_stack_adjust (int n)
3120 {
3121 unsigned char buf[16];
3122 int i;
3123 CORE_ADDR buildaddr = current_insn_ptr;
3124
3125 i = 0;
3126 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
3127 buf[i++] = 0x64;
3128 buf[i++] = 0x24;
3129 buf[i++] = n * 8;
3130 append_insns (&buildaddr, i, buf);
3131 current_insn_ptr = buildaddr;
3132 }
3133
3134 /* FN's prototype is `LONGEST(*fn)(int)'. */
3135
3136 static void
3137 i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
3138 {
3139 unsigned char buf[16];
3140 int i;
3141 CORE_ADDR buildaddr;
3142
3143 EMIT_ASM32 (i386_int_call_1_a,
3144 /* Reserve a bit of stack space. */
3145 "sub $0x8,%esp");
3146 /* Put the one argument on the stack. */
3147 buildaddr = current_insn_ptr;
3148 i = 0;
3149 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3150 buf[i++] = 0x04;
3151 buf[i++] = 0x24;
3152 memcpy (&buf[i], &arg1, sizeof (arg1));
3153 i += 4;
3154 append_insns (&buildaddr, i, buf);
3155 current_insn_ptr = buildaddr;
3156 i386_emit_call (fn);
3157 EMIT_ASM32 (i386_int_call_1_c,
3158 "mov %edx,%ebx\n\t"
3159 "lea 0x8(%esp),%esp");
3160 }
3161
3162 /* FN's prototype is `void(*fn)(int,LONGEST)'. */
3163
3164 static void
3165 i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
3166 {
3167 unsigned char buf[16];
3168 int i;
3169 CORE_ADDR buildaddr;
3170
3171 EMIT_ASM32 (i386_void_call_2_a,
3172 /* Preserve %eax only; we don't have to worry about %ebx. */
3173 "push %eax\n\t"
3174 /* Reserve a bit of stack space for arguments. */
3175 "sub $0x10,%esp\n\t"
3176 /* Copy "top" to the second argument position. (Note that
3177 we can't assume function won't scribble on its
3178 arguments, so don't try to restore from this.) */
3179 "mov %eax,4(%esp)\n\t"
3180 "mov %ebx,8(%esp)");
3181 /* Put the first argument on the stack. */
3182 buildaddr = current_insn_ptr;
3183 i = 0;
3184 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
3185 buf[i++] = 0x04;
3186 buf[i++] = 0x24;
3187 memcpy (&buf[i], &arg1, sizeof (arg1));
3188 i += 4;
3189 append_insns (&buildaddr, i, buf);
3190 current_insn_ptr = buildaddr;
3191 i386_emit_call (fn);
3192 EMIT_ASM32 (i386_void_call_2_b,
3193 "lea 0x10(%esp),%esp\n\t"
3194 /* Restore original stack top. */
3195 "pop %eax");
3196 }
3197
3198
3199 void
3200 i386_emit_eq_goto (int *offset_p, int *size_p)
3201 {
3202 EMIT_ASM32 (eq,
3203 /* Check low half first, more likely to be decider */
3204 "cmpl %eax,(%esp)\n\t"
3205 "jne .Leq_fallthru\n\t"
3206 "cmpl %ebx,4(%esp)\n\t"
3207 "jne .Leq_fallthru\n\t"
3208 "lea 0x8(%esp),%esp\n\t"
3209 "pop %eax\n\t"
3210 "pop %ebx\n\t"
3211 /* jmp, but don't trust the assembler to choose the right jump */
3212 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3213 ".Leq_fallthru:\n\t"
3214 "lea 0x8(%esp),%esp\n\t"
3215 "pop %eax\n\t"
3216 "pop %ebx");
3217
3218 if (offset_p)
3219 *offset_p = 18;
3220 if (size_p)
3221 *size_p = 4;
3222 }
3223
3224 void
3225 i386_emit_ne_goto (int *offset_p, int *size_p)
3226 {
3227 EMIT_ASM32 (ne,
3228 /* Check low half first, more likely to be decider */
3229 "cmpl %eax,(%esp)\n\t"
3230 "jne .Lne_jump\n\t"
3231 "cmpl %ebx,4(%esp)\n\t"
3232 "je .Lne_fallthru\n\t"
3233 ".Lne_jump:\n\t"
3234 "lea 0x8(%esp),%esp\n\t"
3235 "pop %eax\n\t"
3236 "pop %ebx\n\t"
3237 /* jmp, but don't trust the assembler to choose the right jump */
3238 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3239 ".Lne_fallthru:\n\t"
3240 "lea 0x8(%esp),%esp\n\t"
3241 "pop %eax\n\t"
3242 "pop %ebx");
3243
3244 if (offset_p)
3245 *offset_p = 18;
3246 if (size_p)
3247 *size_p = 4;
3248 }
3249
3250 void
3251 i386_emit_lt_goto (int *offset_p, int *size_p)
3252 {
3253 EMIT_ASM32 (lt,
3254 "cmpl %ebx,4(%esp)\n\t"
3255 "jl .Llt_jump\n\t"
3256 "jne .Llt_fallthru\n\t"
3257 "cmpl %eax,(%esp)\n\t"
3258 "jnl .Llt_fallthru\n\t"
3259 ".Llt_jump:\n\t"
3260 "lea 0x8(%esp),%esp\n\t"
3261 "pop %eax\n\t"
3262 "pop %ebx\n\t"
3263 /* jmp, but don't trust the assembler to choose the right jump */
3264 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3265 ".Llt_fallthru:\n\t"
3266 "lea 0x8(%esp),%esp\n\t"
3267 "pop %eax\n\t"
3268 "pop %ebx");
3269
3270 if (offset_p)
3271 *offset_p = 20;
3272 if (size_p)
3273 *size_p = 4;
3274 }
3275
3276 void
3277 i386_emit_le_goto (int *offset_p, int *size_p)
3278 {
3279 EMIT_ASM32 (le,
3280 "cmpl %ebx,4(%esp)\n\t"
3281 "jle .Lle_jump\n\t"
3282 "jne .Lle_fallthru\n\t"
3283 "cmpl %eax,(%esp)\n\t"
3284 "jnle .Lle_fallthru\n\t"
3285 ".Lle_jump:\n\t"
3286 "lea 0x8(%esp),%esp\n\t"
3287 "pop %eax\n\t"
3288 "pop %ebx\n\t"
3289 /* jmp, but don't trust the assembler to choose the right jump */
3290 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3291 ".Lle_fallthru:\n\t"
3292 "lea 0x8(%esp),%esp\n\t"
3293 "pop %eax\n\t"
3294 "pop %ebx");
3295
3296 if (offset_p)
3297 *offset_p = 20;
3298 if (size_p)
3299 *size_p = 4;
3300 }
3301
3302 void
3303 i386_emit_gt_goto (int *offset_p, int *size_p)
3304 {
3305 EMIT_ASM32 (gt,
3306 "cmpl %ebx,4(%esp)\n\t"
3307 "jg .Lgt_jump\n\t"
3308 "jne .Lgt_fallthru\n\t"
3309 "cmpl %eax,(%esp)\n\t"
3310 "jng .Lgt_fallthru\n\t"
3311 ".Lgt_jump:\n\t"
3312 "lea 0x8(%esp),%esp\n\t"
3313 "pop %eax\n\t"
3314 "pop %ebx\n\t"
3315 /* jmp, but don't trust the assembler to choose the right jump */
3316 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3317 ".Lgt_fallthru:\n\t"
3318 "lea 0x8(%esp),%esp\n\t"
3319 "pop %eax\n\t"
3320 "pop %ebx");
3321
3322 if (offset_p)
3323 *offset_p = 20;
3324 if (size_p)
3325 *size_p = 4;
3326 }
3327
3328 void
3329 i386_emit_ge_goto (int *offset_p, int *size_p)
3330 {
3331 EMIT_ASM32 (ge,
3332 "cmpl %ebx,4(%esp)\n\t"
3333 "jge .Lge_jump\n\t"
3334 "jne .Lge_fallthru\n\t"
3335 "cmpl %eax,(%esp)\n\t"
3336 "jnge .Lge_fallthru\n\t"
3337 ".Lge_jump:\n\t"
3338 "lea 0x8(%esp),%esp\n\t"
3339 "pop %eax\n\t"
3340 "pop %ebx\n\t"
3341 /* jmp, but don't trust the assembler to choose the right jump */
3342 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0\n\t"
3343 ".Lge_fallthru:\n\t"
3344 "lea 0x8(%esp),%esp\n\t"
3345 "pop %eax\n\t"
3346 "pop %ebx");
3347
3348 if (offset_p)
3349 *offset_p = 20;
3350 if (size_p)
3351 *size_p = 4;
3352 }
3353
3354 struct emit_ops i386_emit_ops =
3355 {
3356 i386_emit_prologue,
3357 i386_emit_epilogue,
3358 i386_emit_add,
3359 i386_emit_sub,
3360 i386_emit_mul,
3361 i386_emit_lsh,
3362 i386_emit_rsh_signed,
3363 i386_emit_rsh_unsigned,
3364 i386_emit_ext,
3365 i386_emit_log_not,
3366 i386_emit_bit_and,
3367 i386_emit_bit_or,
3368 i386_emit_bit_xor,
3369 i386_emit_bit_not,
3370 i386_emit_equal,
3371 i386_emit_less_signed,
3372 i386_emit_less_unsigned,
3373 i386_emit_ref,
3374 i386_emit_if_goto,
3375 i386_emit_goto,
3376 i386_write_goto_address,
3377 i386_emit_const,
3378 i386_emit_call,
3379 i386_emit_reg,
3380 i386_emit_pop,
3381 i386_emit_stack_flush,
3382 i386_emit_zero_ext,
3383 i386_emit_swap,
3384 i386_emit_stack_adjust,
3385 i386_emit_int_call_1,
3386 i386_emit_void_call_2,
3387 i386_emit_eq_goto,
3388 i386_emit_ne_goto,
3389 i386_emit_lt_goto,
3390 i386_emit_le_goto,
3391 i386_emit_gt_goto,
3392 i386_emit_ge_goto
3393 };
3394
3395
3396 static struct emit_ops *
3397 x86_emit_ops (void)
3398 {
3399 #ifdef __x86_64__
3400 if (is_64bit_tdesc ())
3401 return &amd64_emit_ops;
3402 else
3403 #endif
3404 return &i386_emit_ops;
3405 }
3406
3407 static int
3408 x86_supports_range_stepping (void)
3409 {
3410 return 1;
3411 }
3412
3413 /* This is initialized assuming an amd64 target.
3414 x86_arch_setup will correct it for i386 or amd64 targets. */
3415
3416 struct linux_target_ops the_low_target =
3417 {
3418 x86_arch_setup,
3419 x86_linux_regs_info,
3420 x86_cannot_fetch_register,
3421 x86_cannot_store_register,
3422 NULL, /* fetch_register */
3423 x86_get_pc,
3424 x86_set_pc,
3425 x86_breakpoint,
3426 x86_breakpoint_len,
3427 NULL,
3428 1,
3429 x86_breakpoint_at,
3430 x86_supports_z_point_type,
3431 x86_insert_point,
3432 x86_remove_point,
3433 x86_stopped_by_watchpoint,
3434 x86_stopped_data_address,
3435 /* collect_ptrace_register/supply_ptrace_register are not needed in the
3436 native i386 case (no registers smaller than an xfer unit), and are not
3437 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
3438 NULL,
3439 NULL,
3440 /* need to fix up i386 siginfo if host is amd64 */
3441 x86_siginfo_fixup,
3442 x86_linux_new_process,
3443 x86_linux_new_thread,
3444 x86_linux_prepare_to_resume,
3445 x86_linux_process_qsupported,
3446 x86_supports_tracepoints,
3447 x86_get_thread_area,
3448 x86_install_fast_tracepoint_jump_pad,
3449 x86_emit_ops,
3450 x86_get_min_fast_tracepoint_insn_len,
3451 x86_supports_range_stepping,
3452 };
3453
3454 void
3455 initialize_low_arch (void)
3456 {
3457 /* Initialize the Linux target descriptions. */
3458 #ifdef __x86_64__
3459 init_registers_amd64_linux ();
3460 init_registers_amd64_avx_linux ();
3461 init_registers_amd64_avx512_linux ();
3462 init_registers_amd64_mpx_linux ();
3463
3464 init_registers_x32_linux ();
3465 init_registers_x32_avx_linux ();
3466 init_registers_x32_avx512_linux ();
3467
3468 tdesc_amd64_linux_no_xml = xmalloc (sizeof (struct target_desc));
3469 copy_target_description (tdesc_amd64_linux_no_xml, tdesc_amd64_linux);
3470 tdesc_amd64_linux_no_xml->xmltarget = xmltarget_amd64_linux_no_xml;
3471 #endif
3472 init_registers_i386_linux ();
3473 init_registers_i386_mmx_linux ();
3474 init_registers_i386_avx_linux ();
3475 init_registers_i386_avx512_linux ();
3476 init_registers_i386_mpx_linux ();
3477
3478 tdesc_i386_linux_no_xml = xmalloc (sizeof (struct target_desc));
3479 copy_target_description (tdesc_i386_linux_no_xml, tdesc_i386_linux);
3480 tdesc_i386_linux_no_xml->xmltarget = xmltarget_i386_linux_no_xml;
3481
3482 initialize_regsets_info (&x86_regsets_info);
3483 }