]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-x86-low.c
* linux-x86-low.c (amd64_emit_const, amd64_emit_void_call_2)
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
4c38e0a4 3 Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010
d0722149
DE
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
aa5ca48f 21#include <stddef.h>
d0722149 22#include <signal.h>
6a271cae 23#include <limits.h>
d0722149
DE
24#include "server.h"
25#include "linux-low.h"
26#include "i387-fp.h"
aa5ca48f 27#include "i386-low.h"
1570b33e
L
28#include "i386-xstate.h"
29#include "elf/common.h"
d0722149
DE
30
31#include "gdb_proc_service.h"
32
90884b2b 33/* Defined in auto-generated file i386-linux.c. */
d0722149 34void init_registers_i386_linux (void);
90884b2b
L
35/* Defined in auto-generated file amd64-linux.c. */
36void init_registers_amd64_linux (void);
1570b33e
L
37/* Defined in auto-generated file i386-avx-linux.c. */
38void init_registers_i386_avx_linux (void);
39/* Defined in auto-generated file amd64-avx-linux.c. */
40void init_registers_amd64_avx_linux (void);
3a13a53b
L
41/* Defined in auto-generated file i386-mmx-linux.c. */
42void init_registers_i386_mmx_linux (void);
1570b33e 43
fa593d66
PA
44static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
45
1570b33e
L
46/* Backward compatibility for gdb without XML support. */
47
48static const char *xmltarget_i386_linux_no_xml = "@<target>\
49<architecture>i386</architecture>\
50<osabi>GNU/Linux</osabi>\
51</target>";
f6d1620c
L
52
53#ifdef __x86_64__
1570b33e
L
54static const char *xmltarget_amd64_linux_no_xml = "@<target>\
55<architecture>i386:x86-64</architecture>\
56<osabi>GNU/Linux</osabi>\
57</target>";
f6d1620c 58#endif
d0722149
DE
59
60#include <sys/reg.h>
61#include <sys/procfs.h>
62#include <sys/ptrace.h>
1570b33e
L
63#include <sys/uio.h>
64
65#ifndef PTRACE_GETREGSET
66#define PTRACE_GETREGSET 0x4204
67#endif
68
69#ifndef PTRACE_SETREGSET
70#define PTRACE_SETREGSET 0x4205
71#endif
72
d0722149
DE
73
74#ifndef PTRACE_GET_THREAD_AREA
75#define PTRACE_GET_THREAD_AREA 25
76#endif
77
78/* This definition comes from prctl.h, but some kernels may not have it. */
79#ifndef PTRACE_ARCH_PRCTL
80#define PTRACE_ARCH_PRCTL 30
81#endif
82
83/* The following definitions come from prctl.h, but may be absent
84 for certain configurations. */
85#ifndef ARCH_GET_FS
86#define ARCH_SET_GS 0x1001
87#define ARCH_SET_FS 0x1002
88#define ARCH_GET_FS 0x1003
89#define ARCH_GET_GS 0x1004
90#endif
91
aa5ca48f
DE
92/* Per-process arch-specific data we want to keep. */
93
94struct arch_process_info
95{
96 struct i386_debug_reg_state debug_reg_state;
97};
98
99/* Per-thread arch-specific data we want to keep. */
100
101struct arch_lwp_info
102{
103 /* Non-zero if our copy differs from what's recorded in the thread. */
104 int debug_registers_changed;
105};
106
d0722149
DE
107#ifdef __x86_64__
108
109/* Mapping between the general-purpose registers in `struct user'
110 format and GDB's register array layout.
111 Note that the transfer layout uses 64-bit regs. */
112static /*const*/ int i386_regmap[] =
113{
114 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
115 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
116 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
117 DS * 8, ES * 8, FS * 8, GS * 8
118};
119
120#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
121
122/* So code below doesn't have to care, i386 or amd64. */
123#define ORIG_EAX ORIG_RAX
124
125static const int x86_64_regmap[] =
126{
127 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
128 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
129 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
130 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
131 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
132 DS * 8, ES * 8, FS * 8, GS * 8,
133 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1, -1,
137 ORIG_RAX * 8
138};
139
140#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
141
142#else /* ! __x86_64__ */
143
144/* Mapping between the general-purpose registers in `struct user'
145 format and GDB's register array layout. */
146static /*const*/ int i386_regmap[] =
147{
148 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
149 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
150 EIP * 4, EFL * 4, CS * 4, SS * 4,
151 DS * 4, ES * 4, FS * 4, GS * 4
152};
153
154#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
155
156#endif
157\f
158/* Called by libthread_db. */
159
160ps_err_e
161ps_get_thread_area (const struct ps_prochandle *ph,
162 lwpid_t lwpid, int idx, void **base)
163{
164#ifdef __x86_64__
165 int use_64bit = register_size (0) == 8;
166
167 if (use_64bit)
168 {
169 switch (idx)
170 {
171 case FS:
172 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
173 return PS_OK;
174 break;
175 case GS:
176 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
177 return PS_OK;
178 break;
179 default:
180 return PS_BADADDR;
181 }
182 return PS_ERR;
183 }
184#endif
185
186 {
187 unsigned int desc[4];
188
189 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
190 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
191 return PS_ERR;
192
193 *(int *)base = desc[1];
194 return PS_OK;
195 }
196}
fa593d66
PA
197
198/* Get the thread area address. This is used to recognize which
199 thread is which when tracing with the in-process agent library. We
200 don't read anything from the address, and treat it as opaque; it's
201 the address itself that we assume is unique per-thread. */
202
203static int
204x86_get_thread_area (int lwpid, CORE_ADDR *addr)
205{
206#ifdef __x86_64__
207 int use_64bit = register_size (0) == 8;
208
209 if (use_64bit)
210 {
211 void *base;
212 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
213 {
214 *addr = (CORE_ADDR) (uintptr_t) base;
215 return 0;
216 }
217
218 return -1;
219 }
220#endif
221
222 {
223 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
224 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
225 unsigned int desc[4];
226 ULONGEST gs = 0;
227 const int reg_thread_area = 3; /* bits to scale down register value. */
228 int idx;
229
230 collect_register_by_name (regcache, "gs", &gs);
231
232 idx = gs >> reg_thread_area;
233
234 if (ptrace (PTRACE_GET_THREAD_AREA,
235 lwpid_of (lwp), (void *) (long) idx, (unsigned long) &desc) < 0)
236 return -1;
237
238 *addr = desc[1];
239 return 0;
240 }
241}
242
243
d0722149
DE
244\f
245static int
246i386_cannot_store_register (int regno)
247{
248 return regno >= I386_NUM_REGS;
249}
250
251static int
252i386_cannot_fetch_register (int regno)
253{
254 return regno >= I386_NUM_REGS;
255}
256
257static void
442ea881 258x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
259{
260 int i;
261
262#ifdef __x86_64__
263 if (register_size (0) == 8)
264 {
265 for (i = 0; i < X86_64_NUM_REGS; i++)
266 if (x86_64_regmap[i] != -1)
442ea881 267 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
268 return;
269 }
270#endif
271
272 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 273 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 274
442ea881
PA
275 collect_register_by_name (regcache, "orig_eax",
276 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
277}
278
279static void
442ea881 280x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
281{
282 int i;
283
284#ifdef __x86_64__
285 if (register_size (0) == 8)
286 {
287 for (i = 0; i < X86_64_NUM_REGS; i++)
288 if (x86_64_regmap[i] != -1)
442ea881 289 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
290 return;
291 }
292#endif
293
294 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 295 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 296
442ea881
PA
297 supply_register_by_name (regcache, "orig_eax",
298 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
299}
300
301static void
442ea881 302x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
303{
304#ifdef __x86_64__
442ea881 305 i387_cache_to_fxsave (regcache, buf);
d0722149 306#else
442ea881 307 i387_cache_to_fsave (regcache, buf);
d0722149
DE
308#endif
309}
310
311static void
442ea881 312x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
313{
314#ifdef __x86_64__
442ea881 315 i387_fxsave_to_cache (regcache, buf);
d0722149 316#else
442ea881 317 i387_fsave_to_cache (regcache, buf);
d0722149
DE
318#endif
319}
320
321#ifndef __x86_64__
322
323static void
442ea881 324x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 325{
442ea881 326 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
327}
328
329static void
442ea881 330x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 331{
442ea881 332 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
333}
334
335#endif
336
1570b33e
L
337static void
338x86_fill_xstateregset (struct regcache *regcache, void *buf)
339{
340 i387_cache_to_xsave (regcache, buf);
341}
342
343static void
344x86_store_xstateregset (struct regcache *regcache, const void *buf)
345{
346 i387_xsave_to_cache (regcache, buf);
347}
348
d0722149
DE
349/* ??? The non-biarch i386 case stores all the i387 regs twice.
350 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
351 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
352 doesn't work. IWBN to avoid the duplication in the case where it
353 does work. Maybe the arch_setup routine could check whether it works
354 and update target_regsets accordingly, maybe by moving target_regsets
355 to linux_target_ops and set the right one there, rather than having to
356 modify the target_regsets global. */
357
358struct regset_info target_regsets[] =
359{
360#ifdef HAVE_PTRACE_GETREGS
1570b33e 361 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
362 GENERAL_REGS,
363 x86_fill_gregset, x86_store_gregset },
1570b33e
L
364 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
365 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
366# ifndef __x86_64__
367# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 368 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
369 EXTENDED_REGS,
370 x86_fill_fpxregset, x86_store_fpxregset },
371# endif
372# endif
1570b33e 373 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
374 FP_REGS,
375 x86_fill_fpregset, x86_store_fpregset },
376#endif /* HAVE_PTRACE_GETREGS */
1570b33e 377 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
378};
379
380static CORE_ADDR
442ea881 381x86_get_pc (struct regcache *regcache)
d0722149
DE
382{
383 int use_64bit = register_size (0) == 8;
384
385 if (use_64bit)
386 {
387 unsigned long pc;
442ea881 388 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
389 return (CORE_ADDR) pc;
390 }
391 else
392 {
393 unsigned int pc;
442ea881 394 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
395 return (CORE_ADDR) pc;
396 }
397}
398
399static void
442ea881 400x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149
DE
401{
402 int use_64bit = register_size (0) == 8;
403
404 if (use_64bit)
405 {
406 unsigned long newpc = pc;
442ea881 407 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
408 }
409 else
410 {
411 unsigned int newpc = pc;
442ea881 412 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
413 }
414}
415\f
416static const unsigned char x86_breakpoint[] = { 0xCC };
417#define x86_breakpoint_len 1
418
419static int
420x86_breakpoint_at (CORE_ADDR pc)
421{
422 unsigned char c;
423
fc7238bb 424 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
425 if (c == 0xCC)
426 return 1;
427
428 return 0;
429}
430\f
aa5ca48f
DE
431/* Support for debug registers. */
432
433static unsigned long
434x86_linux_dr_get (ptid_t ptid, int regnum)
435{
436 int tid;
437 unsigned long value;
438
439 tid = ptid_get_lwp (ptid);
440
441 errno = 0;
442 value = ptrace (PTRACE_PEEKUSER, tid,
443 offsetof (struct user, u_debugreg[regnum]), 0);
444 if (errno != 0)
445 error ("Couldn't read debug register");
446
447 return value;
448}
449
450static void
451x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
452{
453 int tid;
454
455 tid = ptid_get_lwp (ptid);
456
457 errno = 0;
458 ptrace (PTRACE_POKEUSER, tid,
459 offsetof (struct user, u_debugreg[regnum]), value);
460 if (errno != 0)
461 error ("Couldn't write debug register");
462}
463
464/* Update the inferior's debug register REGNUM from STATE. */
465
466void
467i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
468{
469 struct inferior_list_entry *lp;
470 CORE_ADDR addr;
471 /* Only need to update the threads of this process. */
472 int pid = pid_of (get_thread_lwp (current_inferior));
473
474 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
475 fatal ("Invalid debug register %d", regnum);
476
477 addr = state->dr_mirror[regnum];
478
479 for (lp = all_lwps.head; lp; lp = lp->next)
480 {
481 struct lwp_info *lwp = (struct lwp_info *) lp;
482
483 /* The actual update is done later, we just mark that the register
484 needs updating. */
485 if (pid_of (lwp) == pid)
486 lwp->arch_private->debug_registers_changed = 1;
487 }
488}
489
490/* Update the inferior's DR7 debug control register from STATE. */
491
492void
493i386_dr_low_set_control (const struct i386_debug_reg_state *state)
494{
495 struct inferior_list_entry *lp;
496 /* Only need to update the threads of this process. */
497 int pid = pid_of (get_thread_lwp (current_inferior));
498
499 for (lp = all_lwps.head; lp; lp = lp->next)
500 {
501 struct lwp_info *lwp = (struct lwp_info *) lp;
502
503 /* The actual update is done later, we just mark that the register
504 needs updating. */
505 if (pid_of (lwp) == pid)
506 lwp->arch_private->debug_registers_changed = 1;
507 }
508}
509
510/* Get the value of the DR6 debug status register from the inferior
511 and record it in STATE. */
512
513void
514i386_dr_low_get_status (struct i386_debug_reg_state *state)
515{
516 struct lwp_info *lwp = get_thread_lwp (current_inferior);
517 ptid_t ptid = ptid_of (lwp);
518
519 state->dr_status_mirror = x86_linux_dr_get (ptid, DR_STATUS);
520}
521\f
522/* Watchpoint support. */
523
524static int
525x86_insert_point (char type, CORE_ADDR addr, int len)
526{
527 struct process_info *proc = current_process ();
528 switch (type)
529 {
8b07ae33
PA
530 case '0':
531 return set_gdb_breakpoint_at (addr);
aa5ca48f
DE
532 case '2':
533 case '3':
534 case '4':
535 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
536 type, addr, len);
537 default:
538 /* Unsupported. */
539 return 1;
540 }
541}
542
543static int
544x86_remove_point (char type, CORE_ADDR addr, int len)
545{
546 struct process_info *proc = current_process ();
547 switch (type)
548 {
8b07ae33
PA
549 case '0':
550 return delete_gdb_breakpoint_at (addr);
aa5ca48f
DE
551 case '2':
552 case '3':
553 case '4':
554 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
555 type, addr, len);
556 default:
557 /* Unsupported. */
558 return 1;
559 }
560}
561
562static int
563x86_stopped_by_watchpoint (void)
564{
565 struct process_info *proc = current_process ();
566 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
567}
568
569static CORE_ADDR
570x86_stopped_data_address (void)
571{
572 struct process_info *proc = current_process ();
573 CORE_ADDR addr;
574 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
575 &addr))
576 return addr;
577 return 0;
578}
579\f
580/* Called when a new process is created. */
581
582static struct arch_process_info *
583x86_linux_new_process (void)
584{
585 struct arch_process_info *info = xcalloc (1, sizeof (*info));
586
587 i386_low_init_dregs (&info->debug_reg_state);
588
589 return info;
590}
591
592/* Called when a new thread is detected. */
593
594static struct arch_lwp_info *
595x86_linux_new_thread (void)
596{
597 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
598
599 info->debug_registers_changed = 1;
600
601 return info;
602}
603
604/* Called when resuming a thread.
605 If the debug regs have changed, update the thread's copies. */
606
607static void
608x86_linux_prepare_to_resume (struct lwp_info *lwp)
609{
b9a881c2
PA
610 ptid_t ptid = ptid_of (lwp);
611
aa5ca48f
DE
612 if (lwp->arch_private->debug_registers_changed)
613 {
614 int i;
aa5ca48f
DE
615 int pid = ptid_get_pid (ptid);
616 struct process_info *proc = find_process_pid (pid);
617 struct i386_debug_reg_state *state = &proc->private->arch_private->debug_reg_state;
618
619 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
620 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
621
622 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
623
624 lwp->arch_private->debug_registers_changed = 0;
625 }
b9a881c2
PA
626
627 if (lwp->stopped_by_watchpoint)
628 x86_linux_dr_set (ptid, DR_STATUS, 0);
aa5ca48f
DE
629}
630\f
d0722149
DE
631/* When GDBSERVER is built as a 64-bit application on linux, the
632 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
633 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
634 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
635 conversion in-place ourselves. */
636
637/* These types below (compat_*) define a siginfo type that is layout
638 compatible with the siginfo type exported by the 32-bit userspace
639 support. */
640
641#ifdef __x86_64__
642
643typedef int compat_int_t;
644typedef unsigned int compat_uptr_t;
645
646typedef int compat_time_t;
647typedef int compat_timer_t;
648typedef int compat_clock_t;
649
650struct compat_timeval
651{
652 compat_time_t tv_sec;
653 int tv_usec;
654};
655
656typedef union compat_sigval
657{
658 compat_int_t sival_int;
659 compat_uptr_t sival_ptr;
660} compat_sigval_t;
661
662typedef struct compat_siginfo
663{
664 int si_signo;
665 int si_errno;
666 int si_code;
667
668 union
669 {
670 int _pad[((128 / sizeof (int)) - 3)];
671
672 /* kill() */
673 struct
674 {
675 unsigned int _pid;
676 unsigned int _uid;
677 } _kill;
678
679 /* POSIX.1b timers */
680 struct
681 {
682 compat_timer_t _tid;
683 int _overrun;
684 compat_sigval_t _sigval;
685 } _timer;
686
687 /* POSIX.1b signals */
688 struct
689 {
690 unsigned int _pid;
691 unsigned int _uid;
692 compat_sigval_t _sigval;
693 } _rt;
694
695 /* SIGCHLD */
696 struct
697 {
698 unsigned int _pid;
699 unsigned int _uid;
700 int _status;
701 compat_clock_t _utime;
702 compat_clock_t _stime;
703 } _sigchld;
704
705 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
706 struct
707 {
708 unsigned int _addr;
709 } _sigfault;
710
711 /* SIGPOLL */
712 struct
713 {
714 int _band;
715 int _fd;
716 } _sigpoll;
717 } _sifields;
718} compat_siginfo_t;
719
720#define cpt_si_pid _sifields._kill._pid
721#define cpt_si_uid _sifields._kill._uid
722#define cpt_si_timerid _sifields._timer._tid
723#define cpt_si_overrun _sifields._timer._overrun
724#define cpt_si_status _sifields._sigchld._status
725#define cpt_si_utime _sifields._sigchld._utime
726#define cpt_si_stime _sifields._sigchld._stime
727#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
728#define cpt_si_addr _sifields._sigfault._addr
729#define cpt_si_band _sifields._sigpoll._band
730#define cpt_si_fd _sifields._sigpoll._fd
731
732/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
733 In their place is si_timer1,si_timer2. */
734#ifndef si_timerid
735#define si_timerid si_timer1
736#endif
737#ifndef si_overrun
738#define si_overrun si_timer2
739#endif
740
741static void
742compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
743{
744 memset (to, 0, sizeof (*to));
745
746 to->si_signo = from->si_signo;
747 to->si_errno = from->si_errno;
748 to->si_code = from->si_code;
749
750 if (to->si_code < 0)
751 {
752 to->cpt_si_ptr = (intptr_t) from->si_ptr;
753 }
754 else if (to->si_code == SI_USER)
755 {
756 to->cpt_si_pid = from->si_pid;
757 to->cpt_si_uid = from->si_uid;
758 }
759 else if (to->si_code == SI_TIMER)
760 {
761 to->cpt_si_timerid = from->si_timerid;
762 to->cpt_si_overrun = from->si_overrun;
763 to->cpt_si_ptr = (intptr_t) from->si_ptr;
764 }
765 else
766 {
767 switch (to->si_signo)
768 {
769 case SIGCHLD:
770 to->cpt_si_pid = from->si_pid;
771 to->cpt_si_uid = from->si_uid;
772 to->cpt_si_status = from->si_status;
773 to->cpt_si_utime = from->si_utime;
774 to->cpt_si_stime = from->si_stime;
775 break;
776 case SIGILL:
777 case SIGFPE:
778 case SIGSEGV:
779 case SIGBUS:
780 to->cpt_si_addr = (intptr_t) from->si_addr;
781 break;
782 case SIGPOLL:
783 to->cpt_si_band = from->si_band;
784 to->cpt_si_fd = from->si_fd;
785 break;
786 default:
787 to->cpt_si_pid = from->si_pid;
788 to->cpt_si_uid = from->si_uid;
789 to->cpt_si_ptr = (intptr_t) from->si_ptr;
790 break;
791 }
792 }
793}
794
795static void
796siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
797{
798 memset (to, 0, sizeof (*to));
799
800 to->si_signo = from->si_signo;
801 to->si_errno = from->si_errno;
802 to->si_code = from->si_code;
803
804 if (to->si_code < 0)
805 {
806 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
807 }
808 else if (to->si_code == SI_USER)
809 {
810 to->si_pid = from->cpt_si_pid;
811 to->si_uid = from->cpt_si_uid;
812 }
813 else if (to->si_code == SI_TIMER)
814 {
815 to->si_timerid = from->cpt_si_timerid;
816 to->si_overrun = from->cpt_si_overrun;
817 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
818 }
819 else
820 {
821 switch (to->si_signo)
822 {
823 case SIGCHLD:
824 to->si_pid = from->cpt_si_pid;
825 to->si_uid = from->cpt_si_uid;
826 to->si_status = from->cpt_si_status;
827 to->si_utime = from->cpt_si_utime;
828 to->si_stime = from->cpt_si_stime;
829 break;
830 case SIGILL:
831 case SIGFPE:
832 case SIGSEGV:
833 case SIGBUS:
834 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
835 break;
836 case SIGPOLL:
837 to->si_band = from->cpt_si_band;
838 to->si_fd = from->cpt_si_fd;
839 break;
840 default:
841 to->si_pid = from->cpt_si_pid;
842 to->si_uid = from->cpt_si_uid;
843 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
844 break;
845 }
846 }
847}
848
849#endif /* __x86_64__ */
850
851/* Convert a native/host siginfo object, into/from the siginfo in the
852 layout of the inferiors' architecture. Returns true if any
853 conversion was done; false otherwise. If DIRECTION is 1, then copy
854 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
855 INF. */
856
857static int
858x86_siginfo_fixup (struct siginfo *native, void *inf, int direction)
859{
860#ifdef __x86_64__
861 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
862 if (register_size (0) == 4)
863 {
9f1036c1
DE
864 if (sizeof (struct siginfo) != sizeof (compat_siginfo_t))
865 fatal ("unexpected difference in siginfo");
d0722149
DE
866
867 if (direction == 0)
868 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
869 else
870 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
871
872 return 1;
873 }
874#endif
875
876 return 0;
877}
878\f
1570b33e
L
879static int use_xml;
880
881/* Update gdbserver_xmltarget. */
882
883static void
884x86_linux_update_xmltarget (void)
885{
3a13a53b
L
886 int pid;
887 struct regset_info *regset;
1570b33e
L
888 static unsigned long long xcr0;
889 static int have_ptrace_getregset = -1;
59e04013 890#if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
3a13a53b
L
891 static int have_ptrace_getfpxregs = -1;
892#endif
1570b33e
L
893
894 if (!current_inferior)
895 return;
896
45ba0d02
PA
897 /* Before changing the register cache internal layout or the target
898 regsets, flush the contents of the current valid caches back to
899 the threads. */
900 regcache_invalidate ();
901
3a13a53b 902 pid = pid_of (get_thread_lwp (current_inferior));
1570b33e
L
903#ifdef __x86_64__
904 if (num_xmm_registers == 8)
905 init_registers_i386_linux ();
906 else
907 init_registers_amd64_linux ();
908#else
3a13a53b
L
909 {
910# ifdef HAVE_PTRACE_GETFPXREGS
911 if (have_ptrace_getfpxregs == -1)
912 {
913 elf_fpxregset_t fpxregs;
914
915 if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
916 {
917 have_ptrace_getfpxregs = 0;
918 x86_xcr0 = I386_XSTATE_X87_MASK;
919
920 /* Disable PTRACE_GETFPXREGS. */
921 for (regset = target_regsets;
922 regset->fill_function != NULL; regset++)
923 if (regset->get_request == PTRACE_GETFPXREGS)
924 {
925 regset->size = 0;
926 break;
927 }
928 }
929 else
930 have_ptrace_getfpxregs = 1;
931 }
932
933 if (!have_ptrace_getfpxregs)
934 {
935 init_registers_i386_mmx_linux ();
936 return;
937 }
938# endif
939 init_registers_i386_linux ();
940 }
1570b33e
L
941#endif
942
943 if (!use_xml)
944 {
945 /* Don't use XML. */
946#ifdef __x86_64__
947 if (num_xmm_registers == 8)
948 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
949 else
950 gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
951#else
952 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
953#endif
954
955 x86_xcr0 = I386_XSTATE_SSE_MASK;
956
957 return;
958 }
959
960 /* Check if XSAVE extended state is supported. */
961 if (have_ptrace_getregset == -1)
962 {
1570b33e
L
963 unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
964 struct iovec iov;
1570b33e
L
965
966 iov.iov_base = xstateregs;
967 iov.iov_len = sizeof (xstateregs);
968
969 /* Check if PTRACE_GETREGSET works. */
970 if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
971 &iov) < 0)
972 {
973 have_ptrace_getregset = 0;
974 return;
975 }
976 else
977 have_ptrace_getregset = 1;
978
979 /* Get XCR0 from XSAVE extended state at byte 464. */
980 xcr0 = xstateregs[464 / sizeof (long long)];
981
982 /* Use PTRACE_GETREGSET if it is available. */
983 for (regset = target_regsets;
984 regset->fill_function != NULL; regset++)
985 if (regset->get_request == PTRACE_GETREGSET)
986 regset->size = I386_XSTATE_SIZE (xcr0);
987 else if (regset->type != GENERAL_REGS)
988 regset->size = 0;
989 }
990
991 if (have_ptrace_getregset)
992 {
993 /* AVX is the highest feature we support. */
994 if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
995 {
996 x86_xcr0 = xcr0;
997
998#ifdef __x86_64__
999 /* I386 has 8 xmm regs. */
1000 if (num_xmm_registers == 8)
1001 init_registers_i386_avx_linux ();
1002 else
1003 init_registers_amd64_avx_linux ();
1004#else
1005 init_registers_i386_avx_linux ();
1006#endif
1007 }
1008 }
1009}
1010
1011/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1012 PTRACE_GETREGSET. */
1013
1014static void
1015x86_linux_process_qsupported (const char *query)
1016{
1017 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1018 with "i386" in qSupported query, it supports x86 XML target
1019 descriptions. */
1020 use_xml = 0;
1021 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1022 {
1023 char *copy = xstrdup (query + 13);
1024 char *p;
1025
1026 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1027 {
1028 if (strcmp (p, "i386") == 0)
1029 {
1030 use_xml = 1;
1031 break;
1032 }
1033 }
1034
1035 free (copy);
1036 }
1037
1038 x86_linux_update_xmltarget ();
1039}
1040
9f1036c1 1041/* Initialize gdbserver for the architecture of the inferior. */
d0722149
DE
1042
1043static void
1044x86_arch_setup (void)
1045{
1046#ifdef __x86_64__
1047 int pid = pid_of (get_thread_lwp (current_inferior));
1048 char *file = linux_child_pid_to_exec_file (pid);
1049 int use_64bit = elf_64_file_p (file);
1050
1051 free (file);
1052
1053 if (use_64bit < 0)
1054 {
1055 /* This can only happen if /proc/<pid>/exe is unreadable,
1056 but "that can't happen" if we've gotten this far.
1057 Fall through and assume this is a 32-bit program. */
1058 }
1059 else if (use_64bit)
1060 {
d0722149
DE
1061 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1062 the_low_target.num_regs = -1;
1063 the_low_target.regmap = NULL;
1064 the_low_target.cannot_fetch_register = NULL;
1065 the_low_target.cannot_store_register = NULL;
1066
1067 /* Amd64 has 16 xmm regs. */
1068 num_xmm_registers = 16;
1069
1570b33e 1070 x86_linux_update_xmltarget ();
d0722149
DE
1071 return;
1072 }
1073#endif
1074
1075 /* Ok we have a 32-bit inferior. */
1076
d0722149
DE
1077 the_low_target.num_regs = I386_NUM_REGS;
1078 the_low_target.regmap = i386_regmap;
1079 the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
1080 the_low_target.cannot_store_register = i386_cannot_store_register;
1081
1082 /* I386 has 8 xmm regs. */
1083 num_xmm_registers = 8;
1570b33e
L
1084
1085 x86_linux_update_xmltarget ();
d0722149
DE
1086}
1087
219f2f23
PA
1088static int
1089x86_supports_tracepoints (void)
1090{
1091 return 1;
1092}
1093
fa593d66
PA
1094static void
1095append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1096{
1097 write_inferior_memory (*to, buf, len);
1098 *to += len;
1099}
1100
1101static int
1102push_opcode (unsigned char *buf, char *op)
1103{
1104 unsigned char *buf_org = buf;
1105
1106 while (1)
1107 {
1108 char *endptr;
1109 unsigned long ul = strtoul (op, &endptr, 16);
1110
1111 if (endptr == op)
1112 break;
1113
1114 *buf++ = ul;
1115 op = endptr;
1116 }
1117
1118 return buf - buf_org;
1119}
1120
1121#ifdef __x86_64__
1122
1123/* Build a jump pad that saves registers and calls a collection
1124 function. Writes a jump instruction to the jump pad to
1125 JJUMPAD_INSN. The caller is responsible to write it in at the
1126 tracepoint address. */
1127
1128static int
1129amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1130 CORE_ADDR collector,
1131 CORE_ADDR lockaddr,
1132 ULONGEST orig_size,
1133 CORE_ADDR *jump_entry,
1134 unsigned char *jjump_pad_insn,
1135 ULONGEST *jjump_pad_insn_size,
1136 CORE_ADDR *adjusted_insn_addr,
1137 CORE_ADDR *adjusted_insn_addr_end)
1138{
1139 unsigned char buf[40];
1140 int i, offset;
1141 CORE_ADDR buildaddr = *jump_entry;
1142
1143 /* Build the jump pad. */
1144
1145 /* First, do tracepoint data collection. Save registers. */
1146 i = 0;
1147 /* Need to ensure stack pointer saved first. */
1148 buf[i++] = 0x54; /* push %rsp */
1149 buf[i++] = 0x55; /* push %rbp */
1150 buf[i++] = 0x57; /* push %rdi */
1151 buf[i++] = 0x56; /* push %rsi */
1152 buf[i++] = 0x52; /* push %rdx */
1153 buf[i++] = 0x51; /* push %rcx */
1154 buf[i++] = 0x53; /* push %rbx */
1155 buf[i++] = 0x50; /* push %rax */
1156 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1157 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1158 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1159 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1160 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1161 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1162 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1163 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1164 buf[i++] = 0x9c; /* pushfq */
1165 buf[i++] = 0x48; /* movl <addr>,%rdi */
1166 buf[i++] = 0xbf;
1167 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1168 i += sizeof (unsigned long);
1169 buf[i++] = 0x57; /* push %rdi */
1170 append_insns (&buildaddr, i, buf);
1171
1172 /* Stack space for the collecting_t object. */
1173 i = 0;
1174 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1175 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1176 memcpy (buf + i, &tpoint, 8);
1177 i += 8;
1178 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1179 i += push_opcode (&buf[i],
1180 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1181 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1182 append_insns (&buildaddr, i, buf);
1183
1184 /* spin-lock. */
1185 i = 0;
1186 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1187 memcpy (&buf[i], (void *) &lockaddr, 8);
1188 i += 8;
1189 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1190 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1191 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1192 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1193 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1194 append_insns (&buildaddr, i, buf);
1195
1196 /* Set up the gdb_collect call. */
1197 /* At this point, (stack pointer + 0x18) is the base of our saved
1198 register block. */
1199
1200 i = 0;
1201 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1202 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1203
1204 /* tpoint address may be 64-bit wide. */
1205 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1206 memcpy (buf + i, &tpoint, 8);
1207 i += 8;
1208 append_insns (&buildaddr, i, buf);
1209
1210 /* The collector function being in the shared library, may be
1211 >31-bits away off the jump pad. */
1212 i = 0;
1213 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1214 memcpy (buf + i, &collector, 8);
1215 i += 8;
1216 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1217 append_insns (&buildaddr, i, buf);
1218
1219 /* Clear the spin-lock. */
1220 i = 0;
1221 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1222 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1223 memcpy (buf + i, &lockaddr, 8);
1224 i += 8;
1225 append_insns (&buildaddr, i, buf);
1226
1227 /* Remove stack that had been used for the collect_t object. */
1228 i = 0;
1229 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1230 append_insns (&buildaddr, i, buf);
1231
1232 /* Restore register state. */
1233 i = 0;
1234 buf[i++] = 0x48; /* add $0x8,%rsp */
1235 buf[i++] = 0x83;
1236 buf[i++] = 0xc4;
1237 buf[i++] = 0x08;
1238 buf[i++] = 0x9d; /* popfq */
1239 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1240 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1241 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1242 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1243 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1244 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1245 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1246 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1247 buf[i++] = 0x58; /* pop %rax */
1248 buf[i++] = 0x5b; /* pop %rbx */
1249 buf[i++] = 0x59; /* pop %rcx */
1250 buf[i++] = 0x5a; /* pop %rdx */
1251 buf[i++] = 0x5e; /* pop %rsi */
1252 buf[i++] = 0x5f; /* pop %rdi */
1253 buf[i++] = 0x5d; /* pop %rbp */
1254 buf[i++] = 0x5c; /* pop %rsp */
1255 append_insns (&buildaddr, i, buf);
1256
1257 /* Now, adjust the original instruction to execute in the jump
1258 pad. */
1259 *adjusted_insn_addr = buildaddr;
1260 relocate_instruction (&buildaddr, tpaddr);
1261 *adjusted_insn_addr_end = buildaddr;
1262
1263 /* Finally, write a jump back to the program. */
1264 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1265 memcpy (buf, jump_insn, sizeof (jump_insn));
1266 memcpy (buf + 1, &offset, 4);
1267 append_insns (&buildaddr, sizeof (jump_insn), buf);
1268
1269 /* The jump pad is now built. Wire in a jump to our jump pad. This
1270 is always done last (by our caller actually), so that we can
1271 install fast tracepoints with threads running. This relies on
1272 the agent's atomic write support. */
1273 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1274 memcpy (buf, jump_insn, sizeof (jump_insn));
1275 memcpy (buf + 1, &offset, 4);
1276 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1277 *jjump_pad_insn_size = sizeof (jump_insn);
1278
1279 /* Return the end address of our pad. */
1280 *jump_entry = buildaddr;
1281
1282 return 0;
1283}
1284
1285#endif /* __x86_64__ */
1286
1287/* Build a jump pad that saves registers and calls a collection
1288 function. Writes a jump instruction to the jump pad to
1289 JJUMPAD_INSN. The caller is responsible to write it in at the
1290 tracepoint address. */
1291
1292static int
1293i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1294 CORE_ADDR collector,
1295 CORE_ADDR lockaddr,
1296 ULONGEST orig_size,
1297 CORE_ADDR *jump_entry,
1298 unsigned char *jjump_pad_insn,
1299 ULONGEST *jjump_pad_insn_size,
1300 CORE_ADDR *adjusted_insn_addr,
1301 CORE_ADDR *adjusted_insn_addr_end)
1302{
1303 unsigned char buf[0x100];
1304 int i, offset;
1305 CORE_ADDR buildaddr = *jump_entry;
1306
1307 /* Build the jump pad. */
1308
1309 /* First, do tracepoint data collection. Save registers. */
1310 i = 0;
1311 buf[i++] = 0x60; /* pushad */
1312 buf[i++] = 0x68; /* push tpaddr aka $pc */
1313 *((int *)(buf + i)) = (int) tpaddr;
1314 i += 4;
1315 buf[i++] = 0x9c; /* pushf */
1316 buf[i++] = 0x1e; /* push %ds */
1317 buf[i++] = 0x06; /* push %es */
1318 buf[i++] = 0x0f; /* push %fs */
1319 buf[i++] = 0xa0;
1320 buf[i++] = 0x0f; /* push %gs */
1321 buf[i++] = 0xa8;
1322 buf[i++] = 0x16; /* push %ss */
1323 buf[i++] = 0x0e; /* push %cs */
1324 append_insns (&buildaddr, i, buf);
1325
1326 /* Stack space for the collecting_t object. */
1327 i = 0;
1328 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1329
1330 /* Build the object. */
1331 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1332 memcpy (buf + i, &tpoint, 4);
1333 i += 4;
1334 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1335
1336 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1337 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1338 append_insns (&buildaddr, i, buf);
1339
1340 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1341 If we cared for it, this could be using xchg alternatively. */
1342
1343 i = 0;
1344 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1345 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1346 %esp,<lockaddr> */
1347 memcpy (&buf[i], (void *) &lockaddr, 4);
1348 i += 4;
1349 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1350 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1351 append_insns (&buildaddr, i, buf);
1352
1353
1354 /* Set up arguments to the gdb_collect call. */
1355 i = 0;
1356 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1357 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1358 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1359 append_insns (&buildaddr, i, buf);
1360
1361 i = 0;
1362 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1363 append_insns (&buildaddr, i, buf);
1364
1365 i = 0;
1366 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1367 memcpy (&buf[i], (void *) &tpoint, 4);
1368 i += 4;
1369 append_insns (&buildaddr, i, buf);
1370
1371 buf[0] = 0xe8; /* call <reladdr> */
1372 offset = collector - (buildaddr + sizeof (jump_insn));
1373 memcpy (buf + 1, &offset, 4);
1374 append_insns (&buildaddr, 5, buf);
1375 /* Clean up after the call. */
1376 buf[0] = 0x83; /* add $0x8,%esp */
1377 buf[1] = 0xc4;
1378 buf[2] = 0x08;
1379 append_insns (&buildaddr, 3, buf);
1380
1381
1382 /* Clear the spin-lock. This would need the LOCK prefix on older
1383 broken archs. */
1384 i = 0;
1385 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1386 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1387 memcpy (buf + i, &lockaddr, 4);
1388 i += 4;
1389 append_insns (&buildaddr, i, buf);
1390
1391
1392 /* Remove stack that had been used for the collect_t object. */
1393 i = 0;
1394 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1395 append_insns (&buildaddr, i, buf);
1396
1397 i = 0;
1398 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1399 buf[i++] = 0xc4;
1400 buf[i++] = 0x04;
1401 buf[i++] = 0x17; /* pop %ss */
1402 buf[i++] = 0x0f; /* pop %gs */
1403 buf[i++] = 0xa9;
1404 buf[i++] = 0x0f; /* pop %fs */
1405 buf[i++] = 0xa1;
1406 buf[i++] = 0x07; /* pop %es */
1407 buf[i++] = 0x1f; /* pop %de */
1408 buf[i++] = 0x9d; /* popf */
1409 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1410 buf[i++] = 0xc4;
1411 buf[i++] = 0x04;
1412 buf[i++] = 0x61; /* popad */
1413 append_insns (&buildaddr, i, buf);
1414
1415 /* Now, adjust the original instruction to execute in the jump
1416 pad. */
1417 *adjusted_insn_addr = buildaddr;
1418 relocate_instruction (&buildaddr, tpaddr);
1419 *adjusted_insn_addr_end = buildaddr;
1420
1421 /* Write the jump back to the program. */
1422 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1423 memcpy (buf, jump_insn, sizeof (jump_insn));
1424 memcpy (buf + 1, &offset, 4);
1425 append_insns (&buildaddr, sizeof (jump_insn), buf);
1426
1427 /* The jump pad is now built. Wire in a jump to our jump pad. This
1428 is always done last (by our caller actually), so that we can
1429 install fast tracepoints with threads running. This relies on
1430 the agent's atomic write support. */
1431 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1432 memcpy (buf, jump_insn, sizeof (jump_insn));
1433 memcpy (buf + 1, &offset, 4);
1434 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1435 *jjump_pad_insn_size = sizeof (jump_insn);
1436
1437 /* Return the end address of our pad. */
1438 *jump_entry = buildaddr;
1439
1440 return 0;
1441}
1442
1443static int
1444x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1445 CORE_ADDR collector,
1446 CORE_ADDR lockaddr,
1447 ULONGEST orig_size,
1448 CORE_ADDR *jump_entry,
1449 unsigned char *jjump_pad_insn,
1450 ULONGEST *jjump_pad_insn_size,
1451 CORE_ADDR *adjusted_insn_addr,
1452 CORE_ADDR *adjusted_insn_addr_end)
1453{
1454#ifdef __x86_64__
1455 if (register_size (0) == 8)
1456 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1457 collector, lockaddr,
1458 orig_size, jump_entry,
1459 jjump_pad_insn,
1460 jjump_pad_insn_size,
1461 adjusted_insn_addr,
1462 adjusted_insn_addr_end);
1463#endif
1464
1465 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1466 collector, lockaddr,
1467 orig_size, jump_entry,
1468 jjump_pad_insn,
1469 jjump_pad_insn_size,
1470 adjusted_insn_addr,
1471 adjusted_insn_addr_end);
1472}
1473
6a271cae
PA
1474static void
1475add_insns (unsigned char *start, int len)
1476{
1477 CORE_ADDR buildaddr = current_insn_ptr;
1478
1479 if (debug_threads)
1480 fprintf (stderr, "Adding %d bytes of insn at %s\n",
1481 len, paddress (buildaddr));
1482
1483 append_insns (&buildaddr, len, start);
1484 current_insn_ptr = buildaddr;
1485}
1486
1487/* A function used to trick optimizers. */
1488
1489int
1490always_true (void)
1491{
1492 return 1;
1493}
1494
1495/* Our general strategy for emitting code is to avoid specifying raw
1496 bytes whenever possible, and instead copy a block of inline asm
1497 that is embedded in the function. This is a little messy, because
1498 we need to keep the compiler from discarding what looks like dead
1499 code, plus suppress various warnings. */
1500
1501#define EMIT_ASM(NAME,INSNS) \
1502 { extern unsigned char start_ ## NAME, end_ ## NAME; \
1503 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1504 if (always_true ()) \
1505 goto skipover ## NAME; \
1506 __asm__ ("start_" #NAME ":\n\t" INSNS "\n\tend_" #NAME ":\n\t"); \
1507 skipover ## NAME: \
1508 ; }
1509
1510
1511#ifdef __x86_64__
1512
1513#define EMIT_ASM32(NAME,INSNS) \
1514 { extern unsigned char start_ ## NAME, end_ ## NAME; \
1515 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1516 if (always_true ()) \
1517 goto skipover ## NAME; \
1518 __asm__ (".code32\n\tstart_" #NAME ":\n\t" INSNS "\n\tend_" #NAME ":\n" \
1519 "\t.code64\n\t"); \
1520 skipover ## NAME: \
1521 ; }
1522
1523#else
1524
1525#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1526
1527#endif
1528
1529#ifdef __x86_64__
1530
1531static void
1532amd64_emit_prologue (void)
1533{
1534 EMIT_ASM (amd64_prologue,
1535 "pushq %rbp\n\t"
1536 "movq %rsp,%rbp\n\t"
1537 "sub $0x20,%rsp\n\t"
1538 "movq %rdi,-8(%rbp)\n\t"
1539 "movq %rsi,-16(%rbp)");
1540}
1541
1542
1543static void
1544amd64_emit_epilogue (void)
1545{
1546 EMIT_ASM (amd64_epilogue,
1547 "movq -16(%rbp),%rdi\n\t"
1548 "movq %rax,(%rdi)\n\t"
1549 "xor %rax,%rax\n\t"
1550 "leave\n\t"
1551 "ret");
1552}
1553
1554static void
1555amd64_emit_add (void)
1556{
1557 EMIT_ASM (amd64_add,
1558 "add (%rsp),%rax\n\t"
1559 "lea 0x8(%rsp),%rsp");
1560}
1561
1562static void
1563amd64_emit_sub (void)
1564{
1565 EMIT_ASM (amd64_sub,
1566 "sub %rax,(%rsp)\n\t"
1567 "pop %rax");
1568}
1569
1570static void
1571amd64_emit_mul (void)
1572{
1573 emit_error = 1;
1574}
1575
1576static void
1577amd64_emit_lsh (void)
1578{
1579 emit_error = 1;
1580}
1581
1582static void
1583amd64_emit_rsh_signed (void)
1584{
1585 emit_error = 1;
1586}
1587
1588static void
1589amd64_emit_rsh_unsigned (void)
1590{
1591 emit_error = 1;
1592}
1593
1594static void
1595amd64_emit_ext (int arg)
1596{
1597 switch (arg)
1598 {
1599 case 8:
1600 EMIT_ASM (amd64_ext_8,
1601 "cbtw\n\t"
1602 "cwtl\n\t"
1603 "cltq");
1604 break;
1605 case 16:
1606 EMIT_ASM (amd64_ext_16,
1607 "cwtl\n\t"
1608 "cltq");
1609 break;
1610 case 32:
1611 EMIT_ASM (amd64_ext_32,
1612 "cltq");
1613 break;
1614 default:
1615 emit_error = 1;
1616 }
1617}
1618
1619static void
1620amd64_emit_log_not (void)
1621{
1622 EMIT_ASM (amd64_log_not,
1623 "test %rax,%rax\n\t"
1624 "sete %cl\n\t"
1625 "movzbq %cl,%rax");
1626}
1627
1628static void
1629amd64_emit_bit_and (void)
1630{
1631 EMIT_ASM (amd64_and,
1632 "and (%rsp),%rax\n\t"
1633 "lea 0x8(%rsp),%rsp");
1634}
1635
1636static void
1637amd64_emit_bit_or (void)
1638{
1639 EMIT_ASM (amd64_or,
1640 "or (%rsp),%rax\n\t"
1641 "lea 0x8(%rsp),%rsp");
1642}
1643
1644static void
1645amd64_emit_bit_xor (void)
1646{
1647 EMIT_ASM (amd64_xor,
1648 "xor (%rsp),%rax\n\t"
1649 "lea 0x8(%rsp),%rsp");
1650}
1651
1652static void
1653amd64_emit_bit_not (void)
1654{
1655 EMIT_ASM (amd64_bit_not,
1656 "xorq $0xffffffffffffffff,%rax");
1657}
1658
1659static void
1660amd64_emit_equal (void)
1661{
1662 EMIT_ASM (amd64_equal,
1663 "cmp %rax,(%rsp)\n\t"
1664 "je .Lamd64_equal_true\n\t"
1665 "xor %rax,%rax\n\t"
1666 "jmp .Lamd64_equal_end\n\t"
1667 ".Lamd64_equal_true:\n\t"
1668 "mov $0x1,%rax\n\t"
1669 ".Lamd64_equal_end:\n\t"
1670 "lea 0x8(%rsp),%rsp");
1671}
1672
1673static void
1674amd64_emit_less_signed (void)
1675{
1676 EMIT_ASM (amd64_less_signed,
1677 "cmp %rax,(%rsp)\n\t"
1678 "jl .Lamd64_less_signed_true\n\t"
1679 "xor %rax,%rax\n\t"
1680 "jmp .Lamd64_less_signed_end\n\t"
1681 ".Lamd64_less_signed_true:\n\t"
1682 "mov $1,%rax\n\t"
1683 ".Lamd64_less_signed_end:\n\t"
1684 "lea 0x8(%rsp),%rsp");
1685}
1686
1687static void
1688amd64_emit_less_unsigned (void)
1689{
1690 EMIT_ASM (amd64_less_unsigned,
1691 "cmp %rax,(%rsp)\n\t"
1692 "jb .Lamd64_less_unsigned_true\n\t"
1693 "xor %rax,%rax\n\t"
1694 "jmp .Lamd64_less_unsigned_end\n\t"
1695 ".Lamd64_less_unsigned_true:\n\t"
1696 "mov $1,%rax\n\t"
1697 ".Lamd64_less_unsigned_end:\n\t"
1698 "lea 0x8(%rsp),%rsp");
1699}
1700
1701static void
1702amd64_emit_ref (int size)
1703{
1704 switch (size)
1705 {
1706 case 1:
1707 EMIT_ASM (amd64_ref1,
1708 "movb (%rax),%al");
1709 break;
1710 case 2:
1711 EMIT_ASM (amd64_ref2,
1712 "movw (%rax),%ax");
1713 break;
1714 case 4:
1715 EMIT_ASM (amd64_ref4,
1716 "movl (%rax),%eax");
1717 break;
1718 case 8:
1719 EMIT_ASM (amd64_ref8,
1720 "movq (%rax),%rax");
1721 break;
1722 }
1723}
1724
1725static void
1726amd64_emit_if_goto (int *offset_p, int *size_p)
1727{
1728 EMIT_ASM (amd64_if_goto,
1729 "mov %rax,%rcx\n\t"
1730 "pop %rax\n\t"
1731 "cmp $0,%rcx\n\t"
1732 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1733 if (offset_p)
1734 *offset_p = 10;
1735 if (size_p)
1736 *size_p = 4;
1737}
1738
1739static void
1740amd64_emit_goto (int *offset_p, int *size_p)
1741{
1742 EMIT_ASM (amd64_goto,
1743 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1744 if (offset_p)
1745 *offset_p = 1;
1746 if (size_p)
1747 *size_p = 4;
1748}
1749
1750static void
1751amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1752{
1753 int diff = (to - (from + size));
1754 unsigned char buf[sizeof (int)];
1755
1756 if (size != 4)
1757 {
1758 emit_error = 1;
1759 return;
1760 }
1761
1762 memcpy (buf, &diff, sizeof (int));
1763 write_inferior_memory (from, buf, sizeof (int));
1764}
1765
1766static void
4e29fb54 1767amd64_emit_const (LONGEST num)
6a271cae
PA
1768{
1769 unsigned char buf[16];
1770 int i;
1771 CORE_ADDR buildaddr = current_insn_ptr;
1772
1773 i = 0;
1774 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
4e29fb54 1775 *((LONGEST *) (&buf[i])) = num;
6a271cae
PA
1776 i += 8;
1777 append_insns (&buildaddr, i, buf);
1778 current_insn_ptr = buildaddr;
1779}
1780
1781static void
1782amd64_emit_call (CORE_ADDR fn)
1783{
1784 unsigned char buf[16];
1785 int i;
1786 CORE_ADDR buildaddr;
4e29fb54 1787 LONGEST offset64;
6a271cae
PA
1788
1789 /* The destination function being in the shared library, may be
1790 >31-bits away off the compiled code pad. */
1791
1792 buildaddr = current_insn_ptr;
1793
1794 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1795
1796 i = 0;
1797
1798 if (offset64 > INT_MAX || offset64 < INT_MIN)
1799 {
1800 /* Offset is too large for a call. Use callq, but that requires
1801 a register, so avoid it if possible. Use r10, since it is
1802 call-clobbered, we don't have to push/pop it. */
1803 buf[i++] = 0x48; /* mov $fn,%r10 */
1804 buf[i++] = 0xba;
1805 memcpy (buf + i, &fn, 8);
1806 i += 8;
1807 buf[i++] = 0xff; /* callq *%r10 */
1808 buf[i++] = 0xd2;
1809 }
1810 else
1811 {
1812 int offset32 = offset64; /* we know we can't overflow here. */
1813 memcpy (buf + i, &offset32, 4);
1814 i += 4;
1815 }
1816
1817 append_insns (&buildaddr, i, buf);
1818 current_insn_ptr = buildaddr;
1819}
1820
1821static void
1822amd64_emit_reg (int reg)
1823{
1824 unsigned char buf[16];
1825 int i;
1826 CORE_ADDR buildaddr;
1827
1828 /* Assume raw_regs is still in %rdi. */
1829 buildaddr = current_insn_ptr;
1830 i = 0;
1831 buf[i++] = 0xbe; /* mov $<n>,%esi */
1832 *((int *) (&buf[i])) = reg;
1833 i += 4;
1834 append_insns (&buildaddr, i, buf);
1835 current_insn_ptr = buildaddr;
1836 amd64_emit_call (get_raw_reg_func_addr ());
1837}
1838
1839static void
1840amd64_emit_pop (void)
1841{
1842 EMIT_ASM (amd64_pop,
1843 "pop %rax");
1844}
1845
1846static void
1847amd64_emit_stack_flush (void)
1848{
1849 EMIT_ASM (amd64_stack_flush,
1850 "push %rax");
1851}
1852
1853static void
1854amd64_emit_zero_ext (int arg)
1855{
1856 switch (arg)
1857 {
1858 case 8:
1859 EMIT_ASM (amd64_zero_ext_8,
1860 "and $0xff,%rax");
1861 break;
1862 case 16:
1863 EMIT_ASM (amd64_zero_ext_16,
1864 "and $0xffff,%rax");
1865 break;
1866 case 32:
1867 EMIT_ASM (amd64_zero_ext_32,
1868 "mov $0xffffffff,%rcx\n\t"
1869 "and %rcx,%rax");
1870 break;
1871 default:
1872 emit_error = 1;
1873 }
1874}
1875
1876static void
1877amd64_emit_swap (void)
1878{
1879 EMIT_ASM (amd64_swap,
1880 "mov %rax,%rcx\n\t"
1881 "pop %rax\n\t"
1882 "push %rcx");
1883}
1884
1885static void
1886amd64_emit_stack_adjust (int n)
1887{
1888 unsigned char buf[16];
1889 int i;
1890 CORE_ADDR buildaddr = current_insn_ptr;
1891
1892 i = 0;
1893 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1894 buf[i++] = 0x8d;
1895 buf[i++] = 0x64;
1896 buf[i++] = 0x24;
1897 /* This only handles adjustments up to 16, but we don't expect any more. */
1898 buf[i++] = n * 8;
1899 append_insns (&buildaddr, i, buf);
1900 current_insn_ptr = buildaddr;
1901}
1902
1903/* FN's prototype is `LONGEST(*fn)(int)'. */
1904
1905static void
1906amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1907{
1908 unsigned char buf[16];
1909 int i;
1910 CORE_ADDR buildaddr;
1911
1912 buildaddr = current_insn_ptr;
1913 i = 0;
1914 buf[i++] = 0xbf; /* movl $<n>,%edi */
1915 *((int *) (&buf[i])) = arg1;
1916 i += 4;
1917 append_insns (&buildaddr, i, buf);
1918 current_insn_ptr = buildaddr;
1919 amd64_emit_call (fn);
1920}
1921
4e29fb54 1922/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
1923
1924static void
1925amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1926{
1927 unsigned char buf[16];
1928 int i;
1929 CORE_ADDR buildaddr;
1930
1931 buildaddr = current_insn_ptr;
1932 i = 0;
1933 buf[i++] = 0xbf; /* movl $<n>,%edi */
1934 *((int *) (&buf[i])) = arg1;
1935 i += 4;
1936 append_insns (&buildaddr, i, buf);
1937 current_insn_ptr = buildaddr;
1938 EMIT_ASM (amd64_void_call_2_a,
1939 /* Save away a copy of the stack top. */
1940 "push %rax\n\t"
1941 /* Also pass top as the second argument. */
1942 "mov %rax,%rsi");
1943 amd64_emit_call (fn);
1944 EMIT_ASM (amd64_void_call_2_b,
1945 /* Restore the stack top, %rax may have been trashed. */
1946 "pop %rax");
1947}
1948
1949struct emit_ops amd64_emit_ops =
1950 {
1951 amd64_emit_prologue,
1952 amd64_emit_epilogue,
1953 amd64_emit_add,
1954 amd64_emit_sub,
1955 amd64_emit_mul,
1956 amd64_emit_lsh,
1957 amd64_emit_rsh_signed,
1958 amd64_emit_rsh_unsigned,
1959 amd64_emit_ext,
1960 amd64_emit_log_not,
1961 amd64_emit_bit_and,
1962 amd64_emit_bit_or,
1963 amd64_emit_bit_xor,
1964 amd64_emit_bit_not,
1965 amd64_emit_equal,
1966 amd64_emit_less_signed,
1967 amd64_emit_less_unsigned,
1968 amd64_emit_ref,
1969 amd64_emit_if_goto,
1970 amd64_emit_goto,
1971 amd64_write_goto_address,
1972 amd64_emit_const,
1973 amd64_emit_call,
1974 amd64_emit_reg,
1975 amd64_emit_pop,
1976 amd64_emit_stack_flush,
1977 amd64_emit_zero_ext,
1978 amd64_emit_swap,
1979 amd64_emit_stack_adjust,
1980 amd64_emit_int_call_1,
1981 amd64_emit_void_call_2
1982 };
1983
1984#endif /* __x86_64__ */
1985
1986static void
1987i386_emit_prologue (void)
1988{
1989 EMIT_ASM32 (i386_prologue,
1990 "push %ebp\n\t"
1991 "mov %esp,%ebp");
1992 /* At this point, the raw regs base address is at 8(%ebp), and the
1993 value pointer is at 12(%ebp). */
1994}
1995
1996static void
1997i386_emit_epilogue (void)
1998{
1999 EMIT_ASM32 (i386_epilogue,
2000 "mov 12(%ebp),%ecx\n\t"
2001 "mov %eax,(%ecx)\n\t"
2002 "mov %ebx,0x4(%ecx)\n\t"
2003 "xor %eax,%eax\n\t"
2004 "pop %ebp\n\t"
2005 "ret");
2006}
2007
2008static void
2009i386_emit_add (void)
2010{
2011 EMIT_ASM32 (i386_add,
2012 "add (%esp),%eax\n\t"
2013 "adc 0x4(%esp),%ebx\n\t"
2014 "lea 0x8(%esp),%esp");
2015}
2016
2017static void
2018i386_emit_sub (void)
2019{
2020 EMIT_ASM32 (i386_sub,
2021 "subl %eax,(%esp)\n\t"
2022 "sbbl %ebx,4(%esp)\n\t"
2023 "pop %eax\n\t"
2024 "pop %ebx\n\t");
2025}
2026
2027static void
2028i386_emit_mul (void)
2029{
2030 emit_error = 1;
2031}
2032
2033static void
2034i386_emit_lsh (void)
2035{
2036 emit_error = 1;
2037}
2038
2039static void
2040i386_emit_rsh_signed (void)
2041{
2042 emit_error = 1;
2043}
2044
2045static void
2046i386_emit_rsh_unsigned (void)
2047{
2048 emit_error = 1;
2049}
2050
2051static void
2052i386_emit_ext (int arg)
2053{
2054 switch (arg)
2055 {
2056 case 8:
2057 EMIT_ASM32 (i386_ext_8,
2058 "cbtw\n\t"
2059 "cwtl\n\t"
2060 "movl %eax,%ebx\n\t"
2061 "sarl $31,%ebx");
2062 break;
2063 case 16:
2064 EMIT_ASM32 (i386_ext_16,
2065 "cwtl\n\t"
2066 "movl %eax,%ebx\n\t"
2067 "sarl $31,%ebx");
2068 break;
2069 case 32:
2070 EMIT_ASM32 (i386_ext_32,
2071 "movl %eax,%ebx\n\t"
2072 "sarl $31,%ebx");
2073 break;
2074 default:
2075 emit_error = 1;
2076 }
2077}
2078
2079static void
2080i386_emit_log_not (void)
2081{
2082 EMIT_ASM32 (i386_log_not,
2083 "or %ebx,%eax\n\t"
2084 "test %eax,%eax\n\t"
2085 "sete %cl\n\t"
2086 "xor %ebx,%ebx\n\t"
2087 "movzbl %cl,%eax");
2088}
2089
2090static void
2091i386_emit_bit_and (void)
2092{
2093 EMIT_ASM32 (i386_and,
2094 "and (%esp),%eax\n\t"
2095 "and 0x4(%esp),%ebx\n\t"
2096 "lea 0x8(%esp),%esp");
2097}
2098
2099static void
2100i386_emit_bit_or (void)
2101{
2102 EMIT_ASM32 (i386_or,
2103 "or (%esp),%eax\n\t"
2104 "or 0x4(%esp),%ebx\n\t"
2105 "lea 0x8(%esp),%esp");
2106}
2107
2108static void
2109i386_emit_bit_xor (void)
2110{
2111 EMIT_ASM32 (i386_xor,
2112 "xor (%esp),%eax\n\t"
2113 "xor 0x4(%esp),%ebx\n\t"
2114 "lea 0x8(%esp),%esp");
2115}
2116
2117static void
2118i386_emit_bit_not (void)
2119{
2120 EMIT_ASM32 (i386_bit_not,
2121 "xor $0xffffffff,%eax\n\t"
2122 "xor $0xffffffff,%ebx\n\t");
2123}
2124
2125static void
2126i386_emit_equal (void)
2127{
2128 EMIT_ASM32 (i386_equal,
2129 "cmpl %ebx,4(%esp)\n\t"
2130 "jne .Li386_equal_false\n\t"
2131 "cmpl %eax,(%esp)\n\t"
2132 "je .Li386_equal_true\n\t"
2133 ".Li386_equal_false:\n\t"
2134 "xor %eax,%eax\n\t"
2135 "jmp .Li386_equal_end\n\t"
2136 ".Li386_equal_true:\n\t"
2137 "mov $1,%eax\n\t"
2138 ".Li386_equal_end:\n\t"
2139 "xor %ebx,%ebx\n\t"
2140 "lea 0x8(%esp),%esp");
2141}
2142
2143static void
2144i386_emit_less_signed (void)
2145{
2146 EMIT_ASM32 (i386_less_signed,
2147 "cmpl %ebx,4(%esp)\n\t"
2148 "jl .Li386_less_signed_true\n\t"
2149 "jne .Li386_less_signed_false\n\t"
2150 "cmpl %eax,(%esp)\n\t"
2151 "jl .Li386_less_signed_true\n\t"
2152 ".Li386_less_signed_false:\n\t"
2153 "xor %eax,%eax\n\t"
2154 "jmp .Li386_less_signed_end\n\t"
2155 ".Li386_less_signed_true:\n\t"
2156 "mov $1,%eax\n\t"
2157 ".Li386_less_signed_end:\n\t"
2158 "xor %ebx,%ebx\n\t"
2159 "lea 0x8(%esp),%esp");
2160}
2161
2162static void
2163i386_emit_less_unsigned (void)
2164{
2165 EMIT_ASM32 (i386_less_unsigned,
2166 "cmpl %ebx,4(%esp)\n\t"
2167 "jb .Li386_less_unsigned_true\n\t"
2168 "jne .Li386_less_unsigned_false\n\t"
2169 "cmpl %eax,(%esp)\n\t"
2170 "jb .Li386_less_unsigned_true\n\t"
2171 ".Li386_less_unsigned_false:\n\t"
2172 "xor %eax,%eax\n\t"
2173 "jmp .Li386_less_unsigned_end\n\t"
2174 ".Li386_less_unsigned_true:\n\t"
2175 "mov $1,%eax\n\t"
2176 ".Li386_less_unsigned_end:\n\t"
2177 "xor %ebx,%ebx\n\t"
2178 "lea 0x8(%esp),%esp");
2179}
2180
2181static void
2182i386_emit_ref (int size)
2183{
2184 switch (size)
2185 {
2186 case 1:
2187 EMIT_ASM32 (i386_ref1,
2188 "movb (%eax),%al");
2189 break;
2190 case 2:
2191 EMIT_ASM32 (i386_ref2,
2192 "movw (%eax),%ax");
2193 break;
2194 case 4:
2195 EMIT_ASM32 (i386_ref4,
2196 "movl (%eax),%eax");
2197 break;
2198 case 8:
2199 EMIT_ASM32 (i386_ref8,
2200 "movl 4(%eax),%ebx\n\t"
2201 "movl (%eax),%eax");
2202 break;
2203 }
2204}
2205
2206static void
2207i386_emit_if_goto (int *offset_p, int *size_p)
2208{
2209 EMIT_ASM32 (i386_if_goto,
2210 "mov %eax,%ecx\n\t"
2211 "or %ebx,%ecx\n\t"
2212 "pop %eax\n\t"
2213 "pop %ebx\n\t"
2214 "cmpl $0,%ecx\n\t"
2215 /* Don't trust the assembler to choose the right jump */
2216 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2217
2218 if (offset_p)
2219 *offset_p = 11; /* be sure that this matches the sequence above */
2220 if (size_p)
2221 *size_p = 4;
2222}
2223
2224static void
2225i386_emit_goto (int *offset_p, int *size_p)
2226{
2227 EMIT_ASM32 (i386_goto,
2228 /* Don't trust the assembler to choose the right jump */
2229 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2230 if (offset_p)
2231 *offset_p = 1;
2232 if (size_p)
2233 *size_p = 4;
2234}
2235
2236static void
2237i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2238{
2239 int diff = (to - (from + size));
2240 unsigned char buf[sizeof (int)];
2241
2242 /* We're only doing 4-byte sizes at the moment. */
2243 if (size != 4)
2244 {
2245 emit_error = 1;
2246 return;
2247 }
2248
2249 memcpy (buf, &diff, sizeof (int));
2250 write_inferior_memory (from, buf, sizeof (int));
2251}
2252
2253static void
4e29fb54 2254i386_emit_const (LONGEST num)
6a271cae
PA
2255{
2256 unsigned char buf[16];
2257 int i, hi;
2258 CORE_ADDR buildaddr = current_insn_ptr;
2259
2260 i = 0;
2261 buf[i++] = 0xb8; /* mov $<n>,%eax */
2262 *((int *) (&buf[i])) = (num & 0xffffffff);
2263 i += 4;
2264 hi = ((num >> 32) & 0xffffffff);
2265 if (hi)
2266 {
2267 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2268 *((int *) (&buf[i])) = hi;
2269 i += 4;
2270 }
2271 else
2272 {
2273 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2274 }
2275 append_insns (&buildaddr, i, buf);
2276 current_insn_ptr = buildaddr;
2277}
2278
2279static void
2280i386_emit_call (CORE_ADDR fn)
2281{
2282 unsigned char buf[16];
2283 int i, offset;
2284 CORE_ADDR buildaddr;
2285
2286 buildaddr = current_insn_ptr;
2287 i = 0;
2288 buf[i++] = 0xe8; /* call <reladdr> */
2289 offset = ((int) fn) - (buildaddr + 5);
2290 memcpy (buf + 1, &offset, 4);
2291 append_insns (&buildaddr, 5, buf);
2292 current_insn_ptr = buildaddr;
2293}
2294
2295static void
2296i386_emit_reg (int reg)
2297{
2298 unsigned char buf[16];
2299 int i;
2300 CORE_ADDR buildaddr;
2301
2302 EMIT_ASM32 (i386_reg_a,
2303 "sub $0x8,%esp");
2304 buildaddr = current_insn_ptr;
2305 i = 0;
2306 buf[i++] = 0xb8; /* mov $<n>,%eax */
2307 *((int *) (&buf[i])) = reg;
2308 i += 4;
2309 append_insns (&buildaddr, i, buf);
2310 current_insn_ptr = buildaddr;
2311 EMIT_ASM32 (i386_reg_b,
2312 "mov %eax,4(%esp)\n\t"
2313 "mov 8(%ebp),%eax\n\t"
2314 "mov %eax,(%esp)");
2315 i386_emit_call (get_raw_reg_func_addr ());
2316 EMIT_ASM32 (i386_reg_c,
2317 "xor %ebx,%ebx\n\t"
2318 "lea 0x8(%esp),%esp");
2319}
2320
2321static void
2322i386_emit_pop (void)
2323{
2324 EMIT_ASM32 (i386_pop,
2325 "pop %eax\n\t"
2326 "pop %ebx");
2327}
2328
2329static void
2330i386_emit_stack_flush (void)
2331{
2332 EMIT_ASM32 (i386_stack_flush,
2333 "push %ebx\n\t"
2334 "push %eax");
2335}
2336
2337static void
2338i386_emit_zero_ext (int arg)
2339{
2340 switch (arg)
2341 {
2342 case 8:
2343 EMIT_ASM32 (i386_zero_ext_8,
2344 "and $0xff,%eax\n\t"
2345 "xor %ebx,%ebx");
2346 break;
2347 case 16:
2348 EMIT_ASM32 (i386_zero_ext_16,
2349 "and $0xffff,%eax\n\t"
2350 "xor %ebx,%ebx");
2351 break;
2352 case 32:
2353 EMIT_ASM32 (i386_zero_ext_32,
2354 "xor %ebx,%ebx");
2355 break;
2356 default:
2357 emit_error = 1;
2358 }
2359}
2360
2361static void
2362i386_emit_swap (void)
2363{
2364 EMIT_ASM32 (i386_swap,
2365 "mov %eax,%ecx\n\t"
2366 "mov %ebx,%edx\n\t"
2367 "pop %eax\n\t"
2368 "pop %ebx\n\t"
2369 "push %edx\n\t"
2370 "push %ecx");
2371}
2372
2373static void
2374i386_emit_stack_adjust (int n)
2375{
2376 unsigned char buf[16];
2377 int i;
2378 CORE_ADDR buildaddr = current_insn_ptr;
2379
2380 i = 0;
2381 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2382 buf[i++] = 0x64;
2383 buf[i++] = 0x24;
2384 buf[i++] = n * 8;
2385 append_insns (&buildaddr, i, buf);
2386 current_insn_ptr = buildaddr;
2387}
2388
2389/* FN's prototype is `LONGEST(*fn)(int)'. */
2390
2391static void
2392i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2393{
2394 unsigned char buf[16];
2395 int i;
2396 CORE_ADDR buildaddr;
2397
2398 EMIT_ASM32 (i386_int_call_1_a,
2399 /* Reserve a bit of stack space. */
2400 "sub $0x8,%esp");
2401 /* Put the one argument on the stack. */
2402 buildaddr = current_insn_ptr;
2403 i = 0;
2404 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2405 buf[i++] = 0x04;
2406 buf[i++] = 0x24;
2407 *((int *) (&buf[i])) = arg1;
2408 i += 4;
2409 append_insns (&buildaddr, i, buf);
2410 current_insn_ptr = buildaddr;
2411 i386_emit_call (fn);
2412 EMIT_ASM32 (i386_int_call_1_c,
2413 "mov %edx,%ebx\n\t"
2414 "lea 0x8(%esp),%esp");
2415}
2416
4e29fb54 2417/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2418
2419static void
2420i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2421{
2422 unsigned char buf[16];
2423 int i;
2424 CORE_ADDR buildaddr;
2425
2426 EMIT_ASM32 (i386_void_call_2_a,
2427 /* Preserve %eax only; we don't have to worry about %ebx. */
2428 "push %eax\n\t"
2429 /* Reserve a bit of stack space for arguments. */
2430 "sub $0x10,%esp\n\t"
2431 /* Copy "top" to the second argument position. (Note that
2432 we can't assume function won't scribble on its
2433 arguments, so don't try to restore from this.) */
2434 "mov %eax,4(%esp)\n\t"
2435 "mov %ebx,8(%esp)");
2436 /* Put the first argument on the stack. */
2437 buildaddr = current_insn_ptr;
2438 i = 0;
2439 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2440 buf[i++] = 0x04;
2441 buf[i++] = 0x24;
2442 *((int *) (&buf[i])) = arg1;
2443 i += 4;
2444 append_insns (&buildaddr, i, buf);
2445 current_insn_ptr = buildaddr;
2446 i386_emit_call (fn);
2447 EMIT_ASM32 (i386_void_call_2_b,
2448 "lea 0x10(%esp),%esp\n\t"
2449 /* Restore original stack top. */
2450 "pop %eax");
2451}
2452
2453struct emit_ops i386_emit_ops =
2454 {
2455 i386_emit_prologue,
2456 i386_emit_epilogue,
2457 i386_emit_add,
2458 i386_emit_sub,
2459 i386_emit_mul,
2460 i386_emit_lsh,
2461 i386_emit_rsh_signed,
2462 i386_emit_rsh_unsigned,
2463 i386_emit_ext,
2464 i386_emit_log_not,
2465 i386_emit_bit_and,
2466 i386_emit_bit_or,
2467 i386_emit_bit_xor,
2468 i386_emit_bit_not,
2469 i386_emit_equal,
2470 i386_emit_less_signed,
2471 i386_emit_less_unsigned,
2472 i386_emit_ref,
2473 i386_emit_if_goto,
2474 i386_emit_goto,
2475 i386_write_goto_address,
2476 i386_emit_const,
2477 i386_emit_call,
2478 i386_emit_reg,
2479 i386_emit_pop,
2480 i386_emit_stack_flush,
2481 i386_emit_zero_ext,
2482 i386_emit_swap,
2483 i386_emit_stack_adjust,
2484 i386_emit_int_call_1,
2485 i386_emit_void_call_2
2486 };
2487
2488
2489static struct emit_ops *
2490x86_emit_ops (void)
2491{
2492#ifdef __x86_64__
2493 int use_64bit = register_size (0) == 8;
2494
2495 if (use_64bit)
2496 return &amd64_emit_ops;
2497 else
2498#endif
2499 return &i386_emit_ops;
2500}
2501
d0722149
DE
2502/* This is initialized assuming an amd64 target.
2503 x86_arch_setup will correct it for i386 or amd64 targets. */
2504
2505struct linux_target_ops the_low_target =
2506{
2507 x86_arch_setup,
2508 -1,
2509 NULL,
2510 NULL,
2511 NULL,
2512 x86_get_pc,
2513 x86_set_pc,
2514 x86_breakpoint,
2515 x86_breakpoint_len,
2516 NULL,
2517 1,
2518 x86_breakpoint_at,
aa5ca48f
DE
2519 x86_insert_point,
2520 x86_remove_point,
2521 x86_stopped_by_watchpoint,
2522 x86_stopped_data_address,
d0722149
DE
2523 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2524 native i386 case (no registers smaller than an xfer unit), and are not
2525 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2526 NULL,
2527 NULL,
2528 /* need to fix up i386 siginfo if host is amd64 */
2529 x86_siginfo_fixup,
aa5ca48f
DE
2530 x86_linux_new_process,
2531 x86_linux_new_thread,
1570b33e 2532 x86_linux_prepare_to_resume,
219f2f23 2533 x86_linux_process_qsupported,
fa593d66
PA
2534 x86_supports_tracepoints,
2535 x86_get_thread_area,
6a271cae
PA
2536 x86_install_fast_tracepoint_jump_pad,
2537 x86_emit_ops
d0722149 2538};