]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/gdbserver/linux-x86-low.c
* NEWS: Mention libthread_db debugging with core files.
[thirdparty/binutils-gdb.git] / gdb / gdbserver / linux-x86-low.c
CommitLineData
d0722149
DE
1/* GNU/Linux/x86-64 specific low level interface, for the remote server
2 for GDB.
4c38e0a4 3 Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010
d0722149
DE
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
aa5ca48f 21#include <stddef.h>
d0722149 22#include <signal.h>
6a271cae 23#include <limits.h>
d0722149
DE
24#include "server.h"
25#include "linux-low.h"
26#include "i387-fp.h"
aa5ca48f 27#include "i386-low.h"
1570b33e
L
28#include "i386-xstate.h"
29#include "elf/common.h"
d0722149
DE
30
31#include "gdb_proc_service.h"
32
90884b2b 33/* Defined in auto-generated file i386-linux.c. */
d0722149 34void init_registers_i386_linux (void);
90884b2b
L
35/* Defined in auto-generated file amd64-linux.c. */
36void init_registers_amd64_linux (void);
1570b33e
L
37/* Defined in auto-generated file i386-avx-linux.c. */
38void init_registers_i386_avx_linux (void);
39/* Defined in auto-generated file amd64-avx-linux.c. */
40void init_registers_amd64_avx_linux (void);
3a13a53b
L
41/* Defined in auto-generated file i386-mmx-linux.c. */
42void init_registers_i386_mmx_linux (void);
1570b33e 43
fa593d66
PA
44static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
45
1570b33e
L
46/* Backward compatibility for gdb without XML support. */
47
48static const char *xmltarget_i386_linux_no_xml = "@<target>\
49<architecture>i386</architecture>\
50<osabi>GNU/Linux</osabi>\
51</target>";
f6d1620c
L
52
53#ifdef __x86_64__
1570b33e
L
54static const char *xmltarget_amd64_linux_no_xml = "@<target>\
55<architecture>i386:x86-64</architecture>\
56<osabi>GNU/Linux</osabi>\
57</target>";
f6d1620c 58#endif
d0722149
DE
59
60#include <sys/reg.h>
61#include <sys/procfs.h>
62#include <sys/ptrace.h>
1570b33e
L
63#include <sys/uio.h>
64
65#ifndef PTRACE_GETREGSET
66#define PTRACE_GETREGSET 0x4204
67#endif
68
69#ifndef PTRACE_SETREGSET
70#define PTRACE_SETREGSET 0x4205
71#endif
72
d0722149
DE
73
74#ifndef PTRACE_GET_THREAD_AREA
75#define PTRACE_GET_THREAD_AREA 25
76#endif
77
78/* This definition comes from prctl.h, but some kernels may not have it. */
79#ifndef PTRACE_ARCH_PRCTL
80#define PTRACE_ARCH_PRCTL 30
81#endif
82
83/* The following definitions come from prctl.h, but may be absent
84 for certain configurations. */
85#ifndef ARCH_GET_FS
86#define ARCH_SET_GS 0x1001
87#define ARCH_SET_FS 0x1002
88#define ARCH_GET_FS 0x1003
89#define ARCH_GET_GS 0x1004
90#endif
91
aa5ca48f
DE
92/* Per-process arch-specific data we want to keep. */
93
94struct arch_process_info
95{
96 struct i386_debug_reg_state debug_reg_state;
97};
98
99/* Per-thread arch-specific data we want to keep. */
100
101struct arch_lwp_info
102{
103 /* Non-zero if our copy differs from what's recorded in the thread. */
104 int debug_registers_changed;
105};
106
d0722149
DE
107#ifdef __x86_64__
108
109/* Mapping between the general-purpose registers in `struct user'
110 format and GDB's register array layout.
111 Note that the transfer layout uses 64-bit regs. */
112static /*const*/ int i386_regmap[] =
113{
114 RAX * 8, RCX * 8, RDX * 8, RBX * 8,
115 RSP * 8, RBP * 8, RSI * 8, RDI * 8,
116 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
117 DS * 8, ES * 8, FS * 8, GS * 8
118};
119
120#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
121
122/* So code below doesn't have to care, i386 or amd64. */
123#define ORIG_EAX ORIG_RAX
124
125static const int x86_64_regmap[] =
126{
127 RAX * 8, RBX * 8, RCX * 8, RDX * 8,
128 RSI * 8, RDI * 8, RBP * 8, RSP * 8,
129 R8 * 8, R9 * 8, R10 * 8, R11 * 8,
130 R12 * 8, R13 * 8, R14 * 8, R15 * 8,
131 RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
132 DS * 8, ES * 8, FS * 8, GS * 8,
133 -1, -1, -1, -1, -1, -1, -1, -1,
134 -1, -1, -1, -1, -1, -1, -1, -1,
135 -1, -1, -1, -1, -1, -1, -1, -1,
136 -1, -1, -1, -1, -1, -1, -1, -1, -1,
137 ORIG_RAX * 8
138};
139
140#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
141
142#else /* ! __x86_64__ */
143
144/* Mapping between the general-purpose registers in `struct user'
145 format and GDB's register array layout. */
146static /*const*/ int i386_regmap[] =
147{
148 EAX * 4, ECX * 4, EDX * 4, EBX * 4,
149 UESP * 4, EBP * 4, ESI * 4, EDI * 4,
150 EIP * 4, EFL * 4, CS * 4, SS * 4,
151 DS * 4, ES * 4, FS * 4, GS * 4
152};
153
154#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
155
156#endif
157\f
158/* Called by libthread_db. */
159
160ps_err_e
161ps_get_thread_area (const struct ps_prochandle *ph,
162 lwpid_t lwpid, int idx, void **base)
163{
164#ifdef __x86_64__
165 int use_64bit = register_size (0) == 8;
166
167 if (use_64bit)
168 {
169 switch (idx)
170 {
171 case FS:
172 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
173 return PS_OK;
174 break;
175 case GS:
176 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
177 return PS_OK;
178 break;
179 default:
180 return PS_BADADDR;
181 }
182 return PS_ERR;
183 }
184#endif
185
186 {
187 unsigned int desc[4];
188
189 if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
190 (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
191 return PS_ERR;
192
193 *(int *)base = desc[1];
194 return PS_OK;
195 }
196}
fa593d66
PA
197
198/* Get the thread area address. This is used to recognize which
199 thread is which when tracing with the in-process agent library. We
200 don't read anything from the address, and treat it as opaque; it's
201 the address itself that we assume is unique per-thread. */
202
203static int
204x86_get_thread_area (int lwpid, CORE_ADDR *addr)
205{
206#ifdef __x86_64__
207 int use_64bit = register_size (0) == 8;
208
209 if (use_64bit)
210 {
211 void *base;
212 if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
213 {
214 *addr = (CORE_ADDR) (uintptr_t) base;
215 return 0;
216 }
217
218 return -1;
219 }
220#endif
221
222 {
223 struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
224 struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
225 unsigned int desc[4];
226 ULONGEST gs = 0;
227 const int reg_thread_area = 3; /* bits to scale down register value. */
228 int idx;
229
230 collect_register_by_name (regcache, "gs", &gs);
231
232 idx = gs >> reg_thread_area;
233
234 if (ptrace (PTRACE_GET_THREAD_AREA,
235 lwpid_of (lwp), (void *) (long) idx, (unsigned long) &desc) < 0)
236 return -1;
237
238 *addr = desc[1];
239 return 0;
240 }
241}
242
243
d0722149
DE
244\f
245static int
246i386_cannot_store_register (int regno)
247{
248 return regno >= I386_NUM_REGS;
249}
250
251static int
252i386_cannot_fetch_register (int regno)
253{
254 return regno >= I386_NUM_REGS;
255}
256
257static void
442ea881 258x86_fill_gregset (struct regcache *regcache, void *buf)
d0722149
DE
259{
260 int i;
261
262#ifdef __x86_64__
263 if (register_size (0) == 8)
264 {
265 for (i = 0; i < X86_64_NUM_REGS; i++)
266 if (x86_64_regmap[i] != -1)
442ea881 267 collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
268 return;
269 }
270#endif
271
272 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 273 collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 274
442ea881
PA
275 collect_register_by_name (regcache, "orig_eax",
276 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
277}
278
279static void
442ea881 280x86_store_gregset (struct regcache *regcache, const void *buf)
d0722149
DE
281{
282 int i;
283
284#ifdef __x86_64__
285 if (register_size (0) == 8)
286 {
287 for (i = 0; i < X86_64_NUM_REGS; i++)
288 if (x86_64_regmap[i] != -1)
442ea881 289 supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
d0722149
DE
290 return;
291 }
292#endif
293
294 for (i = 0; i < I386_NUM_REGS; i++)
442ea881 295 supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
d0722149 296
442ea881
PA
297 supply_register_by_name (regcache, "orig_eax",
298 ((char *) buf) + ORIG_EAX * 4);
d0722149
DE
299}
300
301static void
442ea881 302x86_fill_fpregset (struct regcache *regcache, void *buf)
d0722149
DE
303{
304#ifdef __x86_64__
442ea881 305 i387_cache_to_fxsave (regcache, buf);
d0722149 306#else
442ea881 307 i387_cache_to_fsave (regcache, buf);
d0722149
DE
308#endif
309}
310
311static void
442ea881 312x86_store_fpregset (struct regcache *regcache, const void *buf)
d0722149
DE
313{
314#ifdef __x86_64__
442ea881 315 i387_fxsave_to_cache (regcache, buf);
d0722149 316#else
442ea881 317 i387_fsave_to_cache (regcache, buf);
d0722149
DE
318#endif
319}
320
321#ifndef __x86_64__
322
323static void
442ea881 324x86_fill_fpxregset (struct regcache *regcache, void *buf)
d0722149 325{
442ea881 326 i387_cache_to_fxsave (regcache, buf);
d0722149
DE
327}
328
329static void
442ea881 330x86_store_fpxregset (struct regcache *regcache, const void *buf)
d0722149 331{
442ea881 332 i387_fxsave_to_cache (regcache, buf);
d0722149
DE
333}
334
335#endif
336
1570b33e
L
337static void
338x86_fill_xstateregset (struct regcache *regcache, void *buf)
339{
340 i387_cache_to_xsave (regcache, buf);
341}
342
343static void
344x86_store_xstateregset (struct regcache *regcache, const void *buf)
345{
346 i387_xsave_to_cache (regcache, buf);
347}
348
d0722149
DE
349/* ??? The non-biarch i386 case stores all the i387 regs twice.
350 Once in i387_.*fsave.* and once in i387_.*fxsave.*.
351 This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
352 doesn't work. IWBN to avoid the duplication in the case where it
353 does work. Maybe the arch_setup routine could check whether it works
354 and update target_regsets accordingly, maybe by moving target_regsets
355 to linux_target_ops and set the right one there, rather than having to
356 modify the target_regsets global. */
357
358struct regset_info target_regsets[] =
359{
360#ifdef HAVE_PTRACE_GETREGS
1570b33e 361 { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
d0722149
DE
362 GENERAL_REGS,
363 x86_fill_gregset, x86_store_gregset },
1570b33e
L
364 { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
365 EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
d0722149
DE
366# ifndef __x86_64__
367# ifdef HAVE_PTRACE_GETFPXREGS
1570b33e 368 { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
d0722149
DE
369 EXTENDED_REGS,
370 x86_fill_fpxregset, x86_store_fpxregset },
371# endif
372# endif
1570b33e 373 { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
d0722149
DE
374 FP_REGS,
375 x86_fill_fpregset, x86_store_fpregset },
376#endif /* HAVE_PTRACE_GETREGS */
1570b33e 377 { 0, 0, 0, -1, -1, NULL, NULL }
d0722149
DE
378};
379
380static CORE_ADDR
442ea881 381x86_get_pc (struct regcache *regcache)
d0722149
DE
382{
383 int use_64bit = register_size (0) == 8;
384
385 if (use_64bit)
386 {
387 unsigned long pc;
442ea881 388 collect_register_by_name (regcache, "rip", &pc);
d0722149
DE
389 return (CORE_ADDR) pc;
390 }
391 else
392 {
393 unsigned int pc;
442ea881 394 collect_register_by_name (regcache, "eip", &pc);
d0722149
DE
395 return (CORE_ADDR) pc;
396 }
397}
398
399static void
442ea881 400x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
d0722149
DE
401{
402 int use_64bit = register_size (0) == 8;
403
404 if (use_64bit)
405 {
406 unsigned long newpc = pc;
442ea881 407 supply_register_by_name (regcache, "rip", &newpc);
d0722149
DE
408 }
409 else
410 {
411 unsigned int newpc = pc;
442ea881 412 supply_register_by_name (regcache, "eip", &newpc);
d0722149
DE
413 }
414}
415\f
416static const unsigned char x86_breakpoint[] = { 0xCC };
417#define x86_breakpoint_len 1
418
419static int
420x86_breakpoint_at (CORE_ADDR pc)
421{
422 unsigned char c;
423
fc7238bb 424 (*the_target->read_memory) (pc, &c, 1);
d0722149
DE
425 if (c == 0xCC)
426 return 1;
427
428 return 0;
429}
430\f
aa5ca48f
DE
431/* Support for debug registers. */
432
433static unsigned long
434x86_linux_dr_get (ptid_t ptid, int regnum)
435{
436 int tid;
437 unsigned long value;
438
439 tid = ptid_get_lwp (ptid);
440
441 errno = 0;
442 value = ptrace (PTRACE_PEEKUSER, tid,
443 offsetof (struct user, u_debugreg[regnum]), 0);
444 if (errno != 0)
445 error ("Couldn't read debug register");
446
447 return value;
448}
449
450static void
451x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
452{
453 int tid;
454
455 tid = ptid_get_lwp (ptid);
456
457 errno = 0;
458 ptrace (PTRACE_POKEUSER, tid,
459 offsetof (struct user, u_debugreg[regnum]), value);
460 if (errno != 0)
461 error ("Couldn't write debug register");
462}
463
964e4306
PA
464static int
465update_debug_registers_callback (struct inferior_list_entry *entry,
466 void *pid_p)
467{
468 struct lwp_info *lwp = (struct lwp_info *) entry;
469 int pid = *(int *) pid_p;
470
471 /* Only update the threads of this process. */
472 if (pid_of (lwp) == pid)
473 {
474 /* The actual update is done later just before resuming the lwp,
475 we just mark that the registers need updating. */
476 lwp->arch_private->debug_registers_changed = 1;
477
478 /* If the lwp isn't stopped, force it to momentarily pause, so
479 we can update its debug registers. */
480 if (!lwp->stopped)
481 linux_stop_lwp (lwp);
482 }
483
484 return 0;
485}
486
aa5ca48f
DE
487/* Update the inferior's debug register REGNUM from STATE. */
488
489void
490i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
491{
964e4306 492 /* Only update the threads of this process. */
aa5ca48f
DE
493 int pid = pid_of (get_thread_lwp (current_inferior));
494
495 if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
496 fatal ("Invalid debug register %d", regnum);
497
964e4306
PA
498 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
499}
aa5ca48f 500
964e4306 501/* Return the inferior's debug register REGNUM. */
aa5ca48f 502
964e4306
PA
503CORE_ADDR
504i386_dr_low_get_addr (int regnum)
505{
506 struct lwp_info *lwp = get_thread_lwp (current_inferior);
507 ptid_t ptid = ptid_of (lwp);
508
509 /* DR6 and DR7 are retrieved with some other way. */
510 gdb_assert (DR_FIRSTADDR <= regnum && regnum < DR_LASTADDR);
511
512 return x86_linux_dr_get (ptid, regnum);
aa5ca48f
DE
513}
514
515/* Update the inferior's DR7 debug control register from STATE. */
516
517void
518i386_dr_low_set_control (const struct i386_debug_reg_state *state)
519{
964e4306 520 /* Only update the threads of this process. */
aa5ca48f
DE
521 int pid = pid_of (get_thread_lwp (current_inferior));
522
964e4306
PA
523 find_inferior (&all_lwps, update_debug_registers_callback, &pid);
524}
aa5ca48f 525
964e4306
PA
526/* Return the inferior's DR7 debug control register. */
527
528unsigned
529i386_dr_low_get_control (void)
530{
531 struct lwp_info *lwp = get_thread_lwp (current_inferior);
532 ptid_t ptid = ptid_of (lwp);
533
534 return x86_linux_dr_get (ptid, DR_CONTROL);
aa5ca48f
DE
535}
536
537/* Get the value of the DR6 debug status register from the inferior
538 and record it in STATE. */
539
964e4306
PA
540unsigned
541i386_dr_low_get_status (void)
aa5ca48f
DE
542{
543 struct lwp_info *lwp = get_thread_lwp (current_inferior);
544 ptid_t ptid = ptid_of (lwp);
545
964e4306 546 return x86_linux_dr_get (ptid, DR_STATUS);
aa5ca48f
DE
547}
548\f
549/* Watchpoint support. */
550
551static int
552x86_insert_point (char type, CORE_ADDR addr, int len)
553{
554 struct process_info *proc = current_process ();
555 switch (type)
556 {
8b07ae33
PA
557 case '0':
558 return set_gdb_breakpoint_at (addr);
aa5ca48f
DE
559 case '2':
560 case '3':
561 case '4':
562 return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
563 type, addr, len);
564 default:
565 /* Unsupported. */
566 return 1;
567 }
568}
569
570static int
571x86_remove_point (char type, CORE_ADDR addr, int len)
572{
573 struct process_info *proc = current_process ();
574 switch (type)
575 {
8b07ae33
PA
576 case '0':
577 return delete_gdb_breakpoint_at (addr);
aa5ca48f
DE
578 case '2':
579 case '3':
580 case '4':
581 return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
582 type, addr, len);
583 default:
584 /* Unsupported. */
585 return 1;
586 }
587}
588
589static int
590x86_stopped_by_watchpoint (void)
591{
592 struct process_info *proc = current_process ();
593 return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
594}
595
596static CORE_ADDR
597x86_stopped_data_address (void)
598{
599 struct process_info *proc = current_process ();
600 CORE_ADDR addr;
601 if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
602 &addr))
603 return addr;
604 return 0;
605}
606\f
607/* Called when a new process is created. */
608
609static struct arch_process_info *
610x86_linux_new_process (void)
611{
612 struct arch_process_info *info = xcalloc (1, sizeof (*info));
613
614 i386_low_init_dregs (&info->debug_reg_state);
615
616 return info;
617}
618
619/* Called when a new thread is detected. */
620
621static struct arch_lwp_info *
622x86_linux_new_thread (void)
623{
624 struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
625
626 info->debug_registers_changed = 1;
627
628 return info;
629}
630
631/* Called when resuming a thread.
632 If the debug regs have changed, update the thread's copies. */
633
634static void
635x86_linux_prepare_to_resume (struct lwp_info *lwp)
636{
b9a881c2
PA
637 ptid_t ptid = ptid_of (lwp);
638
aa5ca48f
DE
639 if (lwp->arch_private->debug_registers_changed)
640 {
641 int i;
aa5ca48f
DE
642 int pid = ptid_get_pid (ptid);
643 struct process_info *proc = find_process_pid (pid);
644 struct i386_debug_reg_state *state = &proc->private->arch_private->debug_reg_state;
645
646 for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
647 x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
648
649 x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
650
651 lwp->arch_private->debug_registers_changed = 0;
652 }
b9a881c2
PA
653
654 if (lwp->stopped_by_watchpoint)
655 x86_linux_dr_set (ptid, DR_STATUS, 0);
aa5ca48f
DE
656}
657\f
d0722149
DE
658/* When GDBSERVER is built as a 64-bit application on linux, the
659 PTRACE_GETSIGINFO data is always presented in 64-bit layout. Since
660 debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
661 as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
662 conversion in-place ourselves. */
663
664/* These types below (compat_*) define a siginfo type that is layout
665 compatible with the siginfo type exported by the 32-bit userspace
666 support. */
667
668#ifdef __x86_64__
669
670typedef int compat_int_t;
671typedef unsigned int compat_uptr_t;
672
673typedef int compat_time_t;
674typedef int compat_timer_t;
675typedef int compat_clock_t;
676
677struct compat_timeval
678{
679 compat_time_t tv_sec;
680 int tv_usec;
681};
682
683typedef union compat_sigval
684{
685 compat_int_t sival_int;
686 compat_uptr_t sival_ptr;
687} compat_sigval_t;
688
689typedef struct compat_siginfo
690{
691 int si_signo;
692 int si_errno;
693 int si_code;
694
695 union
696 {
697 int _pad[((128 / sizeof (int)) - 3)];
698
699 /* kill() */
700 struct
701 {
702 unsigned int _pid;
703 unsigned int _uid;
704 } _kill;
705
706 /* POSIX.1b timers */
707 struct
708 {
709 compat_timer_t _tid;
710 int _overrun;
711 compat_sigval_t _sigval;
712 } _timer;
713
714 /* POSIX.1b signals */
715 struct
716 {
717 unsigned int _pid;
718 unsigned int _uid;
719 compat_sigval_t _sigval;
720 } _rt;
721
722 /* SIGCHLD */
723 struct
724 {
725 unsigned int _pid;
726 unsigned int _uid;
727 int _status;
728 compat_clock_t _utime;
729 compat_clock_t _stime;
730 } _sigchld;
731
732 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
733 struct
734 {
735 unsigned int _addr;
736 } _sigfault;
737
738 /* SIGPOLL */
739 struct
740 {
741 int _band;
742 int _fd;
743 } _sigpoll;
744 } _sifields;
745} compat_siginfo_t;
746
747#define cpt_si_pid _sifields._kill._pid
748#define cpt_si_uid _sifields._kill._uid
749#define cpt_si_timerid _sifields._timer._tid
750#define cpt_si_overrun _sifields._timer._overrun
751#define cpt_si_status _sifields._sigchld._status
752#define cpt_si_utime _sifields._sigchld._utime
753#define cpt_si_stime _sifields._sigchld._stime
754#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
755#define cpt_si_addr _sifields._sigfault._addr
756#define cpt_si_band _sifields._sigpoll._band
757#define cpt_si_fd _sifields._sigpoll._fd
758
759/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
760 In their place is si_timer1,si_timer2. */
761#ifndef si_timerid
762#define si_timerid si_timer1
763#endif
764#ifndef si_overrun
765#define si_overrun si_timer2
766#endif
767
768static void
769compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
770{
771 memset (to, 0, sizeof (*to));
772
773 to->si_signo = from->si_signo;
774 to->si_errno = from->si_errno;
775 to->si_code = from->si_code;
776
777 if (to->si_code < 0)
778 {
779 to->cpt_si_ptr = (intptr_t) from->si_ptr;
780 }
781 else if (to->si_code == SI_USER)
782 {
783 to->cpt_si_pid = from->si_pid;
784 to->cpt_si_uid = from->si_uid;
785 }
786 else if (to->si_code == SI_TIMER)
787 {
788 to->cpt_si_timerid = from->si_timerid;
789 to->cpt_si_overrun = from->si_overrun;
790 to->cpt_si_ptr = (intptr_t) from->si_ptr;
791 }
792 else
793 {
794 switch (to->si_signo)
795 {
796 case SIGCHLD:
797 to->cpt_si_pid = from->si_pid;
798 to->cpt_si_uid = from->si_uid;
799 to->cpt_si_status = from->si_status;
800 to->cpt_si_utime = from->si_utime;
801 to->cpt_si_stime = from->si_stime;
802 break;
803 case SIGILL:
804 case SIGFPE:
805 case SIGSEGV:
806 case SIGBUS:
807 to->cpt_si_addr = (intptr_t) from->si_addr;
808 break;
809 case SIGPOLL:
810 to->cpt_si_band = from->si_band;
811 to->cpt_si_fd = from->si_fd;
812 break;
813 default:
814 to->cpt_si_pid = from->si_pid;
815 to->cpt_si_uid = from->si_uid;
816 to->cpt_si_ptr = (intptr_t) from->si_ptr;
817 break;
818 }
819 }
820}
821
822static void
823siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
824{
825 memset (to, 0, sizeof (*to));
826
827 to->si_signo = from->si_signo;
828 to->si_errno = from->si_errno;
829 to->si_code = from->si_code;
830
831 if (to->si_code < 0)
832 {
833 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
834 }
835 else if (to->si_code == SI_USER)
836 {
837 to->si_pid = from->cpt_si_pid;
838 to->si_uid = from->cpt_si_uid;
839 }
840 else if (to->si_code == SI_TIMER)
841 {
842 to->si_timerid = from->cpt_si_timerid;
843 to->si_overrun = from->cpt_si_overrun;
844 to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
845 }
846 else
847 {
848 switch (to->si_signo)
849 {
850 case SIGCHLD:
851 to->si_pid = from->cpt_si_pid;
852 to->si_uid = from->cpt_si_uid;
853 to->si_status = from->cpt_si_status;
854 to->si_utime = from->cpt_si_utime;
855 to->si_stime = from->cpt_si_stime;
856 break;
857 case SIGILL:
858 case SIGFPE:
859 case SIGSEGV:
860 case SIGBUS:
861 to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
862 break;
863 case SIGPOLL:
864 to->si_band = from->cpt_si_band;
865 to->si_fd = from->cpt_si_fd;
866 break;
867 default:
868 to->si_pid = from->cpt_si_pid;
869 to->si_uid = from->cpt_si_uid;
870 to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
871 break;
872 }
873 }
874}
875
876#endif /* __x86_64__ */
877
878/* Convert a native/host siginfo object, into/from the siginfo in the
879 layout of the inferiors' architecture. Returns true if any
880 conversion was done; false otherwise. If DIRECTION is 1, then copy
881 from INF to NATIVE. If DIRECTION is 0, copy from NATIVE to
882 INF. */
883
884static int
885x86_siginfo_fixup (struct siginfo *native, void *inf, int direction)
886{
887#ifdef __x86_64__
888 /* Is the inferior 32-bit? If so, then fixup the siginfo object. */
889 if (register_size (0) == 4)
890 {
9f1036c1
DE
891 if (sizeof (struct siginfo) != sizeof (compat_siginfo_t))
892 fatal ("unexpected difference in siginfo");
d0722149
DE
893
894 if (direction == 0)
895 compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
896 else
897 siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
898
899 return 1;
900 }
901#endif
902
903 return 0;
904}
905\f
1570b33e
L
906static int use_xml;
907
908/* Update gdbserver_xmltarget. */
909
910static void
911x86_linux_update_xmltarget (void)
912{
3a13a53b
L
913 int pid;
914 struct regset_info *regset;
1570b33e
L
915 static unsigned long long xcr0;
916 static int have_ptrace_getregset = -1;
59e04013 917#if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
3a13a53b
L
918 static int have_ptrace_getfpxregs = -1;
919#endif
1570b33e
L
920
921 if (!current_inferior)
922 return;
923
45ba0d02
PA
924 /* Before changing the register cache internal layout or the target
925 regsets, flush the contents of the current valid caches back to
926 the threads. */
927 regcache_invalidate ();
928
3a13a53b 929 pid = pid_of (get_thread_lwp (current_inferior));
1570b33e
L
930#ifdef __x86_64__
931 if (num_xmm_registers == 8)
932 init_registers_i386_linux ();
933 else
934 init_registers_amd64_linux ();
935#else
3a13a53b
L
936 {
937# ifdef HAVE_PTRACE_GETFPXREGS
938 if (have_ptrace_getfpxregs == -1)
939 {
940 elf_fpxregset_t fpxregs;
941
942 if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
943 {
944 have_ptrace_getfpxregs = 0;
945 x86_xcr0 = I386_XSTATE_X87_MASK;
946
947 /* Disable PTRACE_GETFPXREGS. */
948 for (regset = target_regsets;
949 regset->fill_function != NULL; regset++)
950 if (regset->get_request == PTRACE_GETFPXREGS)
951 {
952 regset->size = 0;
953 break;
954 }
955 }
956 else
957 have_ptrace_getfpxregs = 1;
958 }
959
960 if (!have_ptrace_getfpxregs)
961 {
962 init_registers_i386_mmx_linux ();
963 return;
964 }
965# endif
966 init_registers_i386_linux ();
967 }
1570b33e
L
968#endif
969
970 if (!use_xml)
971 {
972 /* Don't use XML. */
973#ifdef __x86_64__
974 if (num_xmm_registers == 8)
975 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
976 else
977 gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
978#else
979 gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
980#endif
981
982 x86_xcr0 = I386_XSTATE_SSE_MASK;
983
984 return;
985 }
986
987 /* Check if XSAVE extended state is supported. */
988 if (have_ptrace_getregset == -1)
989 {
1570b33e
L
990 unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
991 struct iovec iov;
1570b33e
L
992
993 iov.iov_base = xstateregs;
994 iov.iov_len = sizeof (xstateregs);
995
996 /* Check if PTRACE_GETREGSET works. */
997 if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
998 &iov) < 0)
999 {
1000 have_ptrace_getregset = 0;
1001 return;
1002 }
1003 else
1004 have_ptrace_getregset = 1;
1005
1006 /* Get XCR0 from XSAVE extended state at byte 464. */
1007 xcr0 = xstateregs[464 / sizeof (long long)];
1008
1009 /* Use PTRACE_GETREGSET if it is available. */
1010 for (regset = target_regsets;
1011 regset->fill_function != NULL; regset++)
1012 if (regset->get_request == PTRACE_GETREGSET)
1013 regset->size = I386_XSTATE_SIZE (xcr0);
1014 else if (regset->type != GENERAL_REGS)
1015 regset->size = 0;
1016 }
1017
1018 if (have_ptrace_getregset)
1019 {
1020 /* AVX is the highest feature we support. */
1021 if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
1022 {
1023 x86_xcr0 = xcr0;
1024
1025#ifdef __x86_64__
1026 /* I386 has 8 xmm regs. */
1027 if (num_xmm_registers == 8)
1028 init_registers_i386_avx_linux ();
1029 else
1030 init_registers_amd64_avx_linux ();
1031#else
1032 init_registers_i386_avx_linux ();
1033#endif
1034 }
1035 }
1036}
1037
1038/* Process qSupported query, "xmlRegisters=". Update the buffer size for
1039 PTRACE_GETREGSET. */
1040
1041static void
1042x86_linux_process_qsupported (const char *query)
1043{
1044 /* Return if gdb doesn't support XML. If gdb sends "xmlRegisters="
1045 with "i386" in qSupported query, it supports x86 XML target
1046 descriptions. */
1047 use_xml = 0;
1048 if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1049 {
1050 char *copy = xstrdup (query + 13);
1051 char *p;
1052
1053 for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1054 {
1055 if (strcmp (p, "i386") == 0)
1056 {
1057 use_xml = 1;
1058 break;
1059 }
1060 }
1061
1062 free (copy);
1063 }
1064
1065 x86_linux_update_xmltarget ();
1066}
1067
9f1036c1 1068/* Initialize gdbserver for the architecture of the inferior. */
d0722149
DE
1069
1070static void
1071x86_arch_setup (void)
1072{
1073#ifdef __x86_64__
1074 int pid = pid_of (get_thread_lwp (current_inferior));
1075 char *file = linux_child_pid_to_exec_file (pid);
1076 int use_64bit = elf_64_file_p (file);
1077
1078 free (file);
1079
1080 if (use_64bit < 0)
1081 {
1082 /* This can only happen if /proc/<pid>/exe is unreadable,
1083 but "that can't happen" if we've gotten this far.
1084 Fall through and assume this is a 32-bit program. */
1085 }
1086 else if (use_64bit)
1087 {
d0722149
DE
1088 /* Amd64 doesn't have HAVE_LINUX_USRREGS. */
1089 the_low_target.num_regs = -1;
1090 the_low_target.regmap = NULL;
1091 the_low_target.cannot_fetch_register = NULL;
1092 the_low_target.cannot_store_register = NULL;
1093
1094 /* Amd64 has 16 xmm regs. */
1095 num_xmm_registers = 16;
1096
1570b33e 1097 x86_linux_update_xmltarget ();
d0722149
DE
1098 return;
1099 }
1100#endif
1101
1102 /* Ok we have a 32-bit inferior. */
1103
d0722149
DE
1104 the_low_target.num_regs = I386_NUM_REGS;
1105 the_low_target.regmap = i386_regmap;
1106 the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
1107 the_low_target.cannot_store_register = i386_cannot_store_register;
1108
1109 /* I386 has 8 xmm regs. */
1110 num_xmm_registers = 8;
1570b33e
L
1111
1112 x86_linux_update_xmltarget ();
d0722149
DE
1113}
1114
219f2f23
PA
1115static int
1116x86_supports_tracepoints (void)
1117{
1118 return 1;
1119}
1120
fa593d66
PA
1121static void
1122append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1123{
1124 write_inferior_memory (*to, buf, len);
1125 *to += len;
1126}
1127
1128static int
1129push_opcode (unsigned char *buf, char *op)
1130{
1131 unsigned char *buf_org = buf;
1132
1133 while (1)
1134 {
1135 char *endptr;
1136 unsigned long ul = strtoul (op, &endptr, 16);
1137
1138 if (endptr == op)
1139 break;
1140
1141 *buf++ = ul;
1142 op = endptr;
1143 }
1144
1145 return buf - buf_org;
1146}
1147
1148#ifdef __x86_64__
1149
1150/* Build a jump pad that saves registers and calls a collection
1151 function. Writes a jump instruction to the jump pad to
1152 JJUMPAD_INSN. The caller is responsible to write it in at the
1153 tracepoint address. */
1154
1155static int
1156amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1157 CORE_ADDR collector,
1158 CORE_ADDR lockaddr,
1159 ULONGEST orig_size,
1160 CORE_ADDR *jump_entry,
1161 unsigned char *jjump_pad_insn,
1162 ULONGEST *jjump_pad_insn_size,
1163 CORE_ADDR *adjusted_insn_addr,
1164 CORE_ADDR *adjusted_insn_addr_end)
1165{
1166 unsigned char buf[40];
1167 int i, offset;
1168 CORE_ADDR buildaddr = *jump_entry;
1169
1170 /* Build the jump pad. */
1171
1172 /* First, do tracepoint data collection. Save registers. */
1173 i = 0;
1174 /* Need to ensure stack pointer saved first. */
1175 buf[i++] = 0x54; /* push %rsp */
1176 buf[i++] = 0x55; /* push %rbp */
1177 buf[i++] = 0x57; /* push %rdi */
1178 buf[i++] = 0x56; /* push %rsi */
1179 buf[i++] = 0x52; /* push %rdx */
1180 buf[i++] = 0x51; /* push %rcx */
1181 buf[i++] = 0x53; /* push %rbx */
1182 buf[i++] = 0x50; /* push %rax */
1183 buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1184 buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1185 buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1186 buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1187 buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1188 buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1189 buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1190 buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1191 buf[i++] = 0x9c; /* pushfq */
1192 buf[i++] = 0x48; /* movl <addr>,%rdi */
1193 buf[i++] = 0xbf;
1194 *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1195 i += sizeof (unsigned long);
1196 buf[i++] = 0x57; /* push %rdi */
1197 append_insns (&buildaddr, i, buf);
1198
1199 /* Stack space for the collecting_t object. */
1200 i = 0;
1201 i += push_opcode (&buf[i], "48 83 ec 18"); /* sub $0x18,%rsp */
1202 i += push_opcode (&buf[i], "48 b8"); /* mov <tpoint>,%rax */
1203 memcpy (buf + i, &tpoint, 8);
1204 i += 8;
1205 i += push_opcode (&buf[i], "48 89 04 24"); /* mov %rax,(%rsp) */
1206 i += push_opcode (&buf[i],
1207 "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1208 i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1209 append_insns (&buildaddr, i, buf);
1210
1211 /* spin-lock. */
1212 i = 0;
1213 i += push_opcode (&buf[i], "48 be"); /* movl <lockaddr>,%rsi */
1214 memcpy (&buf[i], (void *) &lockaddr, 8);
1215 i += 8;
1216 i += push_opcode (&buf[i], "48 89 e1"); /* mov %rsp,%rcx */
1217 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1218 i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1219 i += push_opcode (&buf[i], "48 85 c0"); /* test %rax,%rax */
1220 i += push_opcode (&buf[i], "75 f4"); /* jne <again> */
1221 append_insns (&buildaddr, i, buf);
1222
1223 /* Set up the gdb_collect call. */
1224 /* At this point, (stack pointer + 0x18) is the base of our saved
1225 register block. */
1226
1227 i = 0;
1228 i += push_opcode (&buf[i], "48 89 e6"); /* mov %rsp,%rsi */
1229 i += push_opcode (&buf[i], "48 83 c6 18"); /* add $0x18,%rsi */
1230
1231 /* tpoint address may be 64-bit wide. */
1232 i += push_opcode (&buf[i], "48 bf"); /* movl <addr>,%rdi */
1233 memcpy (buf + i, &tpoint, 8);
1234 i += 8;
1235 append_insns (&buildaddr, i, buf);
1236
1237 /* The collector function being in the shared library, may be
1238 >31-bits away off the jump pad. */
1239 i = 0;
1240 i += push_opcode (&buf[i], "48 b8"); /* mov $collector,%rax */
1241 memcpy (buf + i, &collector, 8);
1242 i += 8;
1243 i += push_opcode (&buf[i], "ff d0"); /* callq *%rax */
1244 append_insns (&buildaddr, i, buf);
1245
1246 /* Clear the spin-lock. */
1247 i = 0;
1248 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1249 i += push_opcode (&buf[i], "48 a3"); /* mov %rax, lockaddr */
1250 memcpy (buf + i, &lockaddr, 8);
1251 i += 8;
1252 append_insns (&buildaddr, i, buf);
1253
1254 /* Remove stack that had been used for the collect_t object. */
1255 i = 0;
1256 i += push_opcode (&buf[i], "48 83 c4 18"); /* add $0x18,%rsp */
1257 append_insns (&buildaddr, i, buf);
1258
1259 /* Restore register state. */
1260 i = 0;
1261 buf[i++] = 0x48; /* add $0x8,%rsp */
1262 buf[i++] = 0x83;
1263 buf[i++] = 0xc4;
1264 buf[i++] = 0x08;
1265 buf[i++] = 0x9d; /* popfq */
1266 buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1267 buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1268 buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1269 buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1270 buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1271 buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1272 buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1273 buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1274 buf[i++] = 0x58; /* pop %rax */
1275 buf[i++] = 0x5b; /* pop %rbx */
1276 buf[i++] = 0x59; /* pop %rcx */
1277 buf[i++] = 0x5a; /* pop %rdx */
1278 buf[i++] = 0x5e; /* pop %rsi */
1279 buf[i++] = 0x5f; /* pop %rdi */
1280 buf[i++] = 0x5d; /* pop %rbp */
1281 buf[i++] = 0x5c; /* pop %rsp */
1282 append_insns (&buildaddr, i, buf);
1283
1284 /* Now, adjust the original instruction to execute in the jump
1285 pad. */
1286 *adjusted_insn_addr = buildaddr;
1287 relocate_instruction (&buildaddr, tpaddr);
1288 *adjusted_insn_addr_end = buildaddr;
1289
1290 /* Finally, write a jump back to the program. */
1291 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1292 memcpy (buf, jump_insn, sizeof (jump_insn));
1293 memcpy (buf + 1, &offset, 4);
1294 append_insns (&buildaddr, sizeof (jump_insn), buf);
1295
1296 /* The jump pad is now built. Wire in a jump to our jump pad. This
1297 is always done last (by our caller actually), so that we can
1298 install fast tracepoints with threads running. This relies on
1299 the agent's atomic write support. */
1300 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1301 memcpy (buf, jump_insn, sizeof (jump_insn));
1302 memcpy (buf + 1, &offset, 4);
1303 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1304 *jjump_pad_insn_size = sizeof (jump_insn);
1305
1306 /* Return the end address of our pad. */
1307 *jump_entry = buildaddr;
1308
1309 return 0;
1310}
1311
1312#endif /* __x86_64__ */
1313
1314/* Build a jump pad that saves registers and calls a collection
1315 function. Writes a jump instruction to the jump pad to
1316 JJUMPAD_INSN. The caller is responsible to write it in at the
1317 tracepoint address. */
1318
1319static int
1320i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1321 CORE_ADDR collector,
1322 CORE_ADDR lockaddr,
1323 ULONGEST orig_size,
1324 CORE_ADDR *jump_entry,
1325 unsigned char *jjump_pad_insn,
1326 ULONGEST *jjump_pad_insn_size,
1327 CORE_ADDR *adjusted_insn_addr,
1328 CORE_ADDR *adjusted_insn_addr_end)
1329{
1330 unsigned char buf[0x100];
1331 int i, offset;
1332 CORE_ADDR buildaddr = *jump_entry;
1333
1334 /* Build the jump pad. */
1335
1336 /* First, do tracepoint data collection. Save registers. */
1337 i = 0;
1338 buf[i++] = 0x60; /* pushad */
1339 buf[i++] = 0x68; /* push tpaddr aka $pc */
1340 *((int *)(buf + i)) = (int) tpaddr;
1341 i += 4;
1342 buf[i++] = 0x9c; /* pushf */
1343 buf[i++] = 0x1e; /* push %ds */
1344 buf[i++] = 0x06; /* push %es */
1345 buf[i++] = 0x0f; /* push %fs */
1346 buf[i++] = 0xa0;
1347 buf[i++] = 0x0f; /* push %gs */
1348 buf[i++] = 0xa8;
1349 buf[i++] = 0x16; /* push %ss */
1350 buf[i++] = 0x0e; /* push %cs */
1351 append_insns (&buildaddr, i, buf);
1352
1353 /* Stack space for the collecting_t object. */
1354 i = 0;
1355 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1356
1357 /* Build the object. */
1358 i += push_opcode (&buf[i], "b8"); /* mov <tpoint>,%eax */
1359 memcpy (buf + i, &tpoint, 4);
1360 i += 4;
1361 i += push_opcode (&buf[i], "89 04 24"); /* mov %eax,(%esp) */
1362
1363 i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1364 i += push_opcode (&buf[i], "89 44 24 04"); /* mov %eax,0x4(%esp) */
1365 append_insns (&buildaddr, i, buf);
1366
1367 /* spin-lock. Note this is using cmpxchg, which leaves i386 behind.
1368 If we cared for it, this could be using xchg alternatively. */
1369
1370 i = 0;
1371 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1372 i += push_opcode (&buf[i], "f0 0f b1 25"); /* lock cmpxchg
1373 %esp,<lockaddr> */
1374 memcpy (&buf[i], (void *) &lockaddr, 4);
1375 i += 4;
1376 i += push_opcode (&buf[i], "85 c0"); /* test %eax,%eax */
1377 i += push_opcode (&buf[i], "75 f2"); /* jne <again> */
1378 append_insns (&buildaddr, i, buf);
1379
1380
1381 /* Set up arguments to the gdb_collect call. */
1382 i = 0;
1383 i += push_opcode (&buf[i], "89 e0"); /* mov %esp,%eax */
1384 i += push_opcode (&buf[i], "83 c0 08"); /* add $0x08,%eax */
1385 i += push_opcode (&buf[i], "89 44 24 fc"); /* mov %eax,-0x4(%esp) */
1386 append_insns (&buildaddr, i, buf);
1387
1388 i = 0;
1389 i += push_opcode (&buf[i], "83 ec 08"); /* sub $0x8,%esp */
1390 append_insns (&buildaddr, i, buf);
1391
1392 i = 0;
1393 i += push_opcode (&buf[i], "c7 04 24"); /* movl <addr>,(%esp) */
1394 memcpy (&buf[i], (void *) &tpoint, 4);
1395 i += 4;
1396 append_insns (&buildaddr, i, buf);
1397
1398 buf[0] = 0xe8; /* call <reladdr> */
1399 offset = collector - (buildaddr + sizeof (jump_insn));
1400 memcpy (buf + 1, &offset, 4);
1401 append_insns (&buildaddr, 5, buf);
1402 /* Clean up after the call. */
1403 buf[0] = 0x83; /* add $0x8,%esp */
1404 buf[1] = 0xc4;
1405 buf[2] = 0x08;
1406 append_insns (&buildaddr, 3, buf);
1407
1408
1409 /* Clear the spin-lock. This would need the LOCK prefix on older
1410 broken archs. */
1411 i = 0;
1412 i += push_opcode (&buf[i], "31 c0"); /* xor %eax,%eax */
1413 i += push_opcode (&buf[i], "a3"); /* mov %eax, lockaddr */
1414 memcpy (buf + i, &lockaddr, 4);
1415 i += 4;
1416 append_insns (&buildaddr, i, buf);
1417
1418
1419 /* Remove stack that had been used for the collect_t object. */
1420 i = 0;
1421 i += push_opcode (&buf[i], "83 c4 08"); /* add $0x08,%esp */
1422 append_insns (&buildaddr, i, buf);
1423
1424 i = 0;
1425 buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1426 buf[i++] = 0xc4;
1427 buf[i++] = 0x04;
1428 buf[i++] = 0x17; /* pop %ss */
1429 buf[i++] = 0x0f; /* pop %gs */
1430 buf[i++] = 0xa9;
1431 buf[i++] = 0x0f; /* pop %fs */
1432 buf[i++] = 0xa1;
1433 buf[i++] = 0x07; /* pop %es */
1434 buf[i++] = 0x1f; /* pop %de */
1435 buf[i++] = 0x9d; /* popf */
1436 buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1437 buf[i++] = 0xc4;
1438 buf[i++] = 0x04;
1439 buf[i++] = 0x61; /* popad */
1440 append_insns (&buildaddr, i, buf);
1441
1442 /* Now, adjust the original instruction to execute in the jump
1443 pad. */
1444 *adjusted_insn_addr = buildaddr;
1445 relocate_instruction (&buildaddr, tpaddr);
1446 *adjusted_insn_addr_end = buildaddr;
1447
1448 /* Write the jump back to the program. */
1449 offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1450 memcpy (buf, jump_insn, sizeof (jump_insn));
1451 memcpy (buf + 1, &offset, 4);
1452 append_insns (&buildaddr, sizeof (jump_insn), buf);
1453
1454 /* The jump pad is now built. Wire in a jump to our jump pad. This
1455 is always done last (by our caller actually), so that we can
1456 install fast tracepoints with threads running. This relies on
1457 the agent's atomic write support. */
1458 offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1459 memcpy (buf, jump_insn, sizeof (jump_insn));
1460 memcpy (buf + 1, &offset, 4);
1461 memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1462 *jjump_pad_insn_size = sizeof (jump_insn);
1463
1464 /* Return the end address of our pad. */
1465 *jump_entry = buildaddr;
1466
1467 return 0;
1468}
1469
1470static int
1471x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1472 CORE_ADDR collector,
1473 CORE_ADDR lockaddr,
1474 ULONGEST orig_size,
1475 CORE_ADDR *jump_entry,
1476 unsigned char *jjump_pad_insn,
1477 ULONGEST *jjump_pad_insn_size,
1478 CORE_ADDR *adjusted_insn_addr,
1479 CORE_ADDR *adjusted_insn_addr_end)
1480{
1481#ifdef __x86_64__
1482 if (register_size (0) == 8)
1483 return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1484 collector, lockaddr,
1485 orig_size, jump_entry,
1486 jjump_pad_insn,
1487 jjump_pad_insn_size,
1488 adjusted_insn_addr,
1489 adjusted_insn_addr_end);
1490#endif
1491
1492 return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1493 collector, lockaddr,
1494 orig_size, jump_entry,
1495 jjump_pad_insn,
1496 jjump_pad_insn_size,
1497 adjusted_insn_addr,
1498 adjusted_insn_addr_end);
1499}
1500
6a271cae
PA
1501static void
1502add_insns (unsigned char *start, int len)
1503{
1504 CORE_ADDR buildaddr = current_insn_ptr;
1505
1506 if (debug_threads)
1507 fprintf (stderr, "Adding %d bytes of insn at %s\n",
1508 len, paddress (buildaddr));
1509
1510 append_insns (&buildaddr, len, start);
1511 current_insn_ptr = buildaddr;
1512}
1513
6a271cae
PA
1514/* Our general strategy for emitting code is to avoid specifying raw
1515 bytes whenever possible, and instead copy a block of inline asm
1516 that is embedded in the function. This is a little messy, because
1517 we need to keep the compiler from discarding what looks like dead
1518 code, plus suppress various warnings. */
1519
9e4344e5
PA
1520#define EMIT_ASM(NAME, INSNS) \
1521 do \
1522 { \
1523 extern unsigned char start_ ## NAME, end_ ## NAME; \
1524 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1525 __asm__ ("jmp end_" #NAME "\n" \
1526 "\t" "start_" #NAME ":" \
1527 "\t" INSNS "\n" \
1528 "\t" "end_" #NAME ":"); \
1529 } while (0)
6a271cae
PA
1530
1531#ifdef __x86_64__
1532
1533#define EMIT_ASM32(NAME,INSNS) \
9e4344e5
PA
1534 do \
1535 { \
1536 extern unsigned char start_ ## NAME, end_ ## NAME; \
1537 add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME); \
1538 __asm__ (".code32\n" \
1539 "\t" "jmp end_" #NAME "\n" \
1540 "\t" "start_" #NAME ":\n" \
1541 "\t" INSNS "\n" \
1542 "\t" "end_" #NAME ":\n" \
1543 ".code64\n"); \
1544 } while (0)
6a271cae
PA
1545
1546#else
1547
1548#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1549
1550#endif
1551
1552#ifdef __x86_64__
1553
1554static void
1555amd64_emit_prologue (void)
1556{
1557 EMIT_ASM (amd64_prologue,
1558 "pushq %rbp\n\t"
1559 "movq %rsp,%rbp\n\t"
1560 "sub $0x20,%rsp\n\t"
1561 "movq %rdi,-8(%rbp)\n\t"
1562 "movq %rsi,-16(%rbp)");
1563}
1564
1565
1566static void
1567amd64_emit_epilogue (void)
1568{
1569 EMIT_ASM (amd64_epilogue,
1570 "movq -16(%rbp),%rdi\n\t"
1571 "movq %rax,(%rdi)\n\t"
1572 "xor %rax,%rax\n\t"
1573 "leave\n\t"
1574 "ret");
1575}
1576
1577static void
1578amd64_emit_add (void)
1579{
1580 EMIT_ASM (amd64_add,
1581 "add (%rsp),%rax\n\t"
1582 "lea 0x8(%rsp),%rsp");
1583}
1584
1585static void
1586amd64_emit_sub (void)
1587{
1588 EMIT_ASM (amd64_sub,
1589 "sub %rax,(%rsp)\n\t"
1590 "pop %rax");
1591}
1592
1593static void
1594amd64_emit_mul (void)
1595{
1596 emit_error = 1;
1597}
1598
1599static void
1600amd64_emit_lsh (void)
1601{
1602 emit_error = 1;
1603}
1604
1605static void
1606amd64_emit_rsh_signed (void)
1607{
1608 emit_error = 1;
1609}
1610
1611static void
1612amd64_emit_rsh_unsigned (void)
1613{
1614 emit_error = 1;
1615}
1616
1617static void
1618amd64_emit_ext (int arg)
1619{
1620 switch (arg)
1621 {
1622 case 8:
1623 EMIT_ASM (amd64_ext_8,
1624 "cbtw\n\t"
1625 "cwtl\n\t"
1626 "cltq");
1627 break;
1628 case 16:
1629 EMIT_ASM (amd64_ext_16,
1630 "cwtl\n\t"
1631 "cltq");
1632 break;
1633 case 32:
1634 EMIT_ASM (amd64_ext_32,
1635 "cltq");
1636 break;
1637 default:
1638 emit_error = 1;
1639 }
1640}
1641
1642static void
1643amd64_emit_log_not (void)
1644{
1645 EMIT_ASM (amd64_log_not,
1646 "test %rax,%rax\n\t"
1647 "sete %cl\n\t"
1648 "movzbq %cl,%rax");
1649}
1650
1651static void
1652amd64_emit_bit_and (void)
1653{
1654 EMIT_ASM (amd64_and,
1655 "and (%rsp),%rax\n\t"
1656 "lea 0x8(%rsp),%rsp");
1657}
1658
1659static void
1660amd64_emit_bit_or (void)
1661{
1662 EMIT_ASM (amd64_or,
1663 "or (%rsp),%rax\n\t"
1664 "lea 0x8(%rsp),%rsp");
1665}
1666
1667static void
1668amd64_emit_bit_xor (void)
1669{
1670 EMIT_ASM (amd64_xor,
1671 "xor (%rsp),%rax\n\t"
1672 "lea 0x8(%rsp),%rsp");
1673}
1674
1675static void
1676amd64_emit_bit_not (void)
1677{
1678 EMIT_ASM (amd64_bit_not,
1679 "xorq $0xffffffffffffffff,%rax");
1680}
1681
1682static void
1683amd64_emit_equal (void)
1684{
1685 EMIT_ASM (amd64_equal,
1686 "cmp %rax,(%rsp)\n\t"
1687 "je .Lamd64_equal_true\n\t"
1688 "xor %rax,%rax\n\t"
1689 "jmp .Lamd64_equal_end\n\t"
1690 ".Lamd64_equal_true:\n\t"
1691 "mov $0x1,%rax\n\t"
1692 ".Lamd64_equal_end:\n\t"
1693 "lea 0x8(%rsp),%rsp");
1694}
1695
1696static void
1697amd64_emit_less_signed (void)
1698{
1699 EMIT_ASM (amd64_less_signed,
1700 "cmp %rax,(%rsp)\n\t"
1701 "jl .Lamd64_less_signed_true\n\t"
1702 "xor %rax,%rax\n\t"
1703 "jmp .Lamd64_less_signed_end\n\t"
1704 ".Lamd64_less_signed_true:\n\t"
1705 "mov $1,%rax\n\t"
1706 ".Lamd64_less_signed_end:\n\t"
1707 "lea 0x8(%rsp),%rsp");
1708}
1709
1710static void
1711amd64_emit_less_unsigned (void)
1712{
1713 EMIT_ASM (amd64_less_unsigned,
1714 "cmp %rax,(%rsp)\n\t"
1715 "jb .Lamd64_less_unsigned_true\n\t"
1716 "xor %rax,%rax\n\t"
1717 "jmp .Lamd64_less_unsigned_end\n\t"
1718 ".Lamd64_less_unsigned_true:\n\t"
1719 "mov $1,%rax\n\t"
1720 ".Lamd64_less_unsigned_end:\n\t"
1721 "lea 0x8(%rsp),%rsp");
1722}
1723
1724static void
1725amd64_emit_ref (int size)
1726{
1727 switch (size)
1728 {
1729 case 1:
1730 EMIT_ASM (amd64_ref1,
1731 "movb (%rax),%al");
1732 break;
1733 case 2:
1734 EMIT_ASM (amd64_ref2,
1735 "movw (%rax),%ax");
1736 break;
1737 case 4:
1738 EMIT_ASM (amd64_ref4,
1739 "movl (%rax),%eax");
1740 break;
1741 case 8:
1742 EMIT_ASM (amd64_ref8,
1743 "movq (%rax),%rax");
1744 break;
1745 }
1746}
1747
1748static void
1749amd64_emit_if_goto (int *offset_p, int *size_p)
1750{
1751 EMIT_ASM (amd64_if_goto,
1752 "mov %rax,%rcx\n\t"
1753 "pop %rax\n\t"
1754 "cmp $0,%rcx\n\t"
1755 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1756 if (offset_p)
1757 *offset_p = 10;
1758 if (size_p)
1759 *size_p = 4;
1760}
1761
1762static void
1763amd64_emit_goto (int *offset_p, int *size_p)
1764{
1765 EMIT_ASM (amd64_goto,
1766 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1767 if (offset_p)
1768 *offset_p = 1;
1769 if (size_p)
1770 *size_p = 4;
1771}
1772
1773static void
1774amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1775{
1776 int diff = (to - (from + size));
1777 unsigned char buf[sizeof (int)];
1778
1779 if (size != 4)
1780 {
1781 emit_error = 1;
1782 return;
1783 }
1784
1785 memcpy (buf, &diff, sizeof (int));
1786 write_inferior_memory (from, buf, sizeof (int));
1787}
1788
1789static void
4e29fb54 1790amd64_emit_const (LONGEST num)
6a271cae
PA
1791{
1792 unsigned char buf[16];
1793 int i;
1794 CORE_ADDR buildaddr = current_insn_ptr;
1795
1796 i = 0;
1797 buf[i++] = 0x48; buf[i++] = 0xb8; /* mov $<n>,%rax */
4e29fb54 1798 *((LONGEST *) (&buf[i])) = num;
6a271cae
PA
1799 i += 8;
1800 append_insns (&buildaddr, i, buf);
1801 current_insn_ptr = buildaddr;
1802}
1803
1804static void
1805amd64_emit_call (CORE_ADDR fn)
1806{
1807 unsigned char buf[16];
1808 int i;
1809 CORE_ADDR buildaddr;
4e29fb54 1810 LONGEST offset64;
6a271cae
PA
1811
1812 /* The destination function being in the shared library, may be
1813 >31-bits away off the compiled code pad. */
1814
1815 buildaddr = current_insn_ptr;
1816
1817 offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1818
1819 i = 0;
1820
1821 if (offset64 > INT_MAX || offset64 < INT_MIN)
1822 {
1823 /* Offset is too large for a call. Use callq, but that requires
1824 a register, so avoid it if possible. Use r10, since it is
1825 call-clobbered, we don't have to push/pop it. */
1826 buf[i++] = 0x48; /* mov $fn,%r10 */
1827 buf[i++] = 0xba;
1828 memcpy (buf + i, &fn, 8);
1829 i += 8;
1830 buf[i++] = 0xff; /* callq *%r10 */
1831 buf[i++] = 0xd2;
1832 }
1833 else
1834 {
1835 int offset32 = offset64; /* we know we can't overflow here. */
1836 memcpy (buf + i, &offset32, 4);
1837 i += 4;
1838 }
1839
1840 append_insns (&buildaddr, i, buf);
1841 current_insn_ptr = buildaddr;
1842}
1843
1844static void
1845amd64_emit_reg (int reg)
1846{
1847 unsigned char buf[16];
1848 int i;
1849 CORE_ADDR buildaddr;
1850
1851 /* Assume raw_regs is still in %rdi. */
1852 buildaddr = current_insn_ptr;
1853 i = 0;
1854 buf[i++] = 0xbe; /* mov $<n>,%esi */
1855 *((int *) (&buf[i])) = reg;
1856 i += 4;
1857 append_insns (&buildaddr, i, buf);
1858 current_insn_ptr = buildaddr;
1859 amd64_emit_call (get_raw_reg_func_addr ());
1860}
1861
1862static void
1863amd64_emit_pop (void)
1864{
1865 EMIT_ASM (amd64_pop,
1866 "pop %rax");
1867}
1868
1869static void
1870amd64_emit_stack_flush (void)
1871{
1872 EMIT_ASM (amd64_stack_flush,
1873 "push %rax");
1874}
1875
1876static void
1877amd64_emit_zero_ext (int arg)
1878{
1879 switch (arg)
1880 {
1881 case 8:
1882 EMIT_ASM (amd64_zero_ext_8,
1883 "and $0xff,%rax");
1884 break;
1885 case 16:
1886 EMIT_ASM (amd64_zero_ext_16,
1887 "and $0xffff,%rax");
1888 break;
1889 case 32:
1890 EMIT_ASM (amd64_zero_ext_32,
1891 "mov $0xffffffff,%rcx\n\t"
1892 "and %rcx,%rax");
1893 break;
1894 default:
1895 emit_error = 1;
1896 }
1897}
1898
1899static void
1900amd64_emit_swap (void)
1901{
1902 EMIT_ASM (amd64_swap,
1903 "mov %rax,%rcx\n\t"
1904 "pop %rax\n\t"
1905 "push %rcx");
1906}
1907
1908static void
1909amd64_emit_stack_adjust (int n)
1910{
1911 unsigned char buf[16];
1912 int i;
1913 CORE_ADDR buildaddr = current_insn_ptr;
1914
1915 i = 0;
1916 buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1917 buf[i++] = 0x8d;
1918 buf[i++] = 0x64;
1919 buf[i++] = 0x24;
1920 /* This only handles adjustments up to 16, but we don't expect any more. */
1921 buf[i++] = n * 8;
1922 append_insns (&buildaddr, i, buf);
1923 current_insn_ptr = buildaddr;
1924}
1925
1926/* FN's prototype is `LONGEST(*fn)(int)'. */
1927
1928static void
1929amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1930{
1931 unsigned char buf[16];
1932 int i;
1933 CORE_ADDR buildaddr;
1934
1935 buildaddr = current_insn_ptr;
1936 i = 0;
1937 buf[i++] = 0xbf; /* movl $<n>,%edi */
1938 *((int *) (&buf[i])) = arg1;
1939 i += 4;
1940 append_insns (&buildaddr, i, buf);
1941 current_insn_ptr = buildaddr;
1942 amd64_emit_call (fn);
1943}
1944
4e29fb54 1945/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
1946
1947static void
1948amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1949{
1950 unsigned char buf[16];
1951 int i;
1952 CORE_ADDR buildaddr;
1953
1954 buildaddr = current_insn_ptr;
1955 i = 0;
1956 buf[i++] = 0xbf; /* movl $<n>,%edi */
1957 *((int *) (&buf[i])) = arg1;
1958 i += 4;
1959 append_insns (&buildaddr, i, buf);
1960 current_insn_ptr = buildaddr;
1961 EMIT_ASM (amd64_void_call_2_a,
1962 /* Save away a copy of the stack top. */
1963 "push %rax\n\t"
1964 /* Also pass top as the second argument. */
1965 "mov %rax,%rsi");
1966 amd64_emit_call (fn);
1967 EMIT_ASM (amd64_void_call_2_b,
1968 /* Restore the stack top, %rax may have been trashed. */
1969 "pop %rax");
1970}
1971
1972struct emit_ops amd64_emit_ops =
1973 {
1974 amd64_emit_prologue,
1975 amd64_emit_epilogue,
1976 amd64_emit_add,
1977 amd64_emit_sub,
1978 amd64_emit_mul,
1979 amd64_emit_lsh,
1980 amd64_emit_rsh_signed,
1981 amd64_emit_rsh_unsigned,
1982 amd64_emit_ext,
1983 amd64_emit_log_not,
1984 amd64_emit_bit_and,
1985 amd64_emit_bit_or,
1986 amd64_emit_bit_xor,
1987 amd64_emit_bit_not,
1988 amd64_emit_equal,
1989 amd64_emit_less_signed,
1990 amd64_emit_less_unsigned,
1991 amd64_emit_ref,
1992 amd64_emit_if_goto,
1993 amd64_emit_goto,
1994 amd64_write_goto_address,
1995 amd64_emit_const,
1996 amd64_emit_call,
1997 amd64_emit_reg,
1998 amd64_emit_pop,
1999 amd64_emit_stack_flush,
2000 amd64_emit_zero_ext,
2001 amd64_emit_swap,
2002 amd64_emit_stack_adjust,
2003 amd64_emit_int_call_1,
2004 amd64_emit_void_call_2
2005 };
2006
2007#endif /* __x86_64__ */
2008
2009static void
2010i386_emit_prologue (void)
2011{
2012 EMIT_ASM32 (i386_prologue,
2013 "push %ebp\n\t"
2014 "mov %esp,%ebp");
2015 /* At this point, the raw regs base address is at 8(%ebp), and the
2016 value pointer is at 12(%ebp). */
2017}
2018
2019static void
2020i386_emit_epilogue (void)
2021{
2022 EMIT_ASM32 (i386_epilogue,
2023 "mov 12(%ebp),%ecx\n\t"
2024 "mov %eax,(%ecx)\n\t"
2025 "mov %ebx,0x4(%ecx)\n\t"
2026 "xor %eax,%eax\n\t"
2027 "pop %ebp\n\t"
2028 "ret");
2029}
2030
2031static void
2032i386_emit_add (void)
2033{
2034 EMIT_ASM32 (i386_add,
2035 "add (%esp),%eax\n\t"
2036 "adc 0x4(%esp),%ebx\n\t"
2037 "lea 0x8(%esp),%esp");
2038}
2039
2040static void
2041i386_emit_sub (void)
2042{
2043 EMIT_ASM32 (i386_sub,
2044 "subl %eax,(%esp)\n\t"
2045 "sbbl %ebx,4(%esp)\n\t"
2046 "pop %eax\n\t"
2047 "pop %ebx\n\t");
2048}
2049
2050static void
2051i386_emit_mul (void)
2052{
2053 emit_error = 1;
2054}
2055
2056static void
2057i386_emit_lsh (void)
2058{
2059 emit_error = 1;
2060}
2061
2062static void
2063i386_emit_rsh_signed (void)
2064{
2065 emit_error = 1;
2066}
2067
2068static void
2069i386_emit_rsh_unsigned (void)
2070{
2071 emit_error = 1;
2072}
2073
2074static void
2075i386_emit_ext (int arg)
2076{
2077 switch (arg)
2078 {
2079 case 8:
2080 EMIT_ASM32 (i386_ext_8,
2081 "cbtw\n\t"
2082 "cwtl\n\t"
2083 "movl %eax,%ebx\n\t"
2084 "sarl $31,%ebx");
2085 break;
2086 case 16:
2087 EMIT_ASM32 (i386_ext_16,
2088 "cwtl\n\t"
2089 "movl %eax,%ebx\n\t"
2090 "sarl $31,%ebx");
2091 break;
2092 case 32:
2093 EMIT_ASM32 (i386_ext_32,
2094 "movl %eax,%ebx\n\t"
2095 "sarl $31,%ebx");
2096 break;
2097 default:
2098 emit_error = 1;
2099 }
2100}
2101
2102static void
2103i386_emit_log_not (void)
2104{
2105 EMIT_ASM32 (i386_log_not,
2106 "or %ebx,%eax\n\t"
2107 "test %eax,%eax\n\t"
2108 "sete %cl\n\t"
2109 "xor %ebx,%ebx\n\t"
2110 "movzbl %cl,%eax");
2111}
2112
2113static void
2114i386_emit_bit_and (void)
2115{
2116 EMIT_ASM32 (i386_and,
2117 "and (%esp),%eax\n\t"
2118 "and 0x4(%esp),%ebx\n\t"
2119 "lea 0x8(%esp),%esp");
2120}
2121
2122static void
2123i386_emit_bit_or (void)
2124{
2125 EMIT_ASM32 (i386_or,
2126 "or (%esp),%eax\n\t"
2127 "or 0x4(%esp),%ebx\n\t"
2128 "lea 0x8(%esp),%esp");
2129}
2130
2131static void
2132i386_emit_bit_xor (void)
2133{
2134 EMIT_ASM32 (i386_xor,
2135 "xor (%esp),%eax\n\t"
2136 "xor 0x4(%esp),%ebx\n\t"
2137 "lea 0x8(%esp),%esp");
2138}
2139
2140static void
2141i386_emit_bit_not (void)
2142{
2143 EMIT_ASM32 (i386_bit_not,
2144 "xor $0xffffffff,%eax\n\t"
2145 "xor $0xffffffff,%ebx\n\t");
2146}
2147
2148static void
2149i386_emit_equal (void)
2150{
2151 EMIT_ASM32 (i386_equal,
2152 "cmpl %ebx,4(%esp)\n\t"
2153 "jne .Li386_equal_false\n\t"
2154 "cmpl %eax,(%esp)\n\t"
2155 "je .Li386_equal_true\n\t"
2156 ".Li386_equal_false:\n\t"
2157 "xor %eax,%eax\n\t"
2158 "jmp .Li386_equal_end\n\t"
2159 ".Li386_equal_true:\n\t"
2160 "mov $1,%eax\n\t"
2161 ".Li386_equal_end:\n\t"
2162 "xor %ebx,%ebx\n\t"
2163 "lea 0x8(%esp),%esp");
2164}
2165
2166static void
2167i386_emit_less_signed (void)
2168{
2169 EMIT_ASM32 (i386_less_signed,
2170 "cmpl %ebx,4(%esp)\n\t"
2171 "jl .Li386_less_signed_true\n\t"
2172 "jne .Li386_less_signed_false\n\t"
2173 "cmpl %eax,(%esp)\n\t"
2174 "jl .Li386_less_signed_true\n\t"
2175 ".Li386_less_signed_false:\n\t"
2176 "xor %eax,%eax\n\t"
2177 "jmp .Li386_less_signed_end\n\t"
2178 ".Li386_less_signed_true:\n\t"
2179 "mov $1,%eax\n\t"
2180 ".Li386_less_signed_end:\n\t"
2181 "xor %ebx,%ebx\n\t"
2182 "lea 0x8(%esp),%esp");
2183}
2184
2185static void
2186i386_emit_less_unsigned (void)
2187{
2188 EMIT_ASM32 (i386_less_unsigned,
2189 "cmpl %ebx,4(%esp)\n\t"
2190 "jb .Li386_less_unsigned_true\n\t"
2191 "jne .Li386_less_unsigned_false\n\t"
2192 "cmpl %eax,(%esp)\n\t"
2193 "jb .Li386_less_unsigned_true\n\t"
2194 ".Li386_less_unsigned_false:\n\t"
2195 "xor %eax,%eax\n\t"
2196 "jmp .Li386_less_unsigned_end\n\t"
2197 ".Li386_less_unsigned_true:\n\t"
2198 "mov $1,%eax\n\t"
2199 ".Li386_less_unsigned_end:\n\t"
2200 "xor %ebx,%ebx\n\t"
2201 "lea 0x8(%esp),%esp");
2202}
2203
2204static void
2205i386_emit_ref (int size)
2206{
2207 switch (size)
2208 {
2209 case 1:
2210 EMIT_ASM32 (i386_ref1,
2211 "movb (%eax),%al");
2212 break;
2213 case 2:
2214 EMIT_ASM32 (i386_ref2,
2215 "movw (%eax),%ax");
2216 break;
2217 case 4:
2218 EMIT_ASM32 (i386_ref4,
2219 "movl (%eax),%eax");
2220 break;
2221 case 8:
2222 EMIT_ASM32 (i386_ref8,
2223 "movl 4(%eax),%ebx\n\t"
2224 "movl (%eax),%eax");
2225 break;
2226 }
2227}
2228
2229static void
2230i386_emit_if_goto (int *offset_p, int *size_p)
2231{
2232 EMIT_ASM32 (i386_if_goto,
2233 "mov %eax,%ecx\n\t"
2234 "or %ebx,%ecx\n\t"
2235 "pop %eax\n\t"
2236 "pop %ebx\n\t"
2237 "cmpl $0,%ecx\n\t"
2238 /* Don't trust the assembler to choose the right jump */
2239 ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2240
2241 if (offset_p)
2242 *offset_p = 11; /* be sure that this matches the sequence above */
2243 if (size_p)
2244 *size_p = 4;
2245}
2246
2247static void
2248i386_emit_goto (int *offset_p, int *size_p)
2249{
2250 EMIT_ASM32 (i386_goto,
2251 /* Don't trust the assembler to choose the right jump */
2252 ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2253 if (offset_p)
2254 *offset_p = 1;
2255 if (size_p)
2256 *size_p = 4;
2257}
2258
2259static void
2260i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2261{
2262 int diff = (to - (from + size));
2263 unsigned char buf[sizeof (int)];
2264
2265 /* We're only doing 4-byte sizes at the moment. */
2266 if (size != 4)
2267 {
2268 emit_error = 1;
2269 return;
2270 }
2271
2272 memcpy (buf, &diff, sizeof (int));
2273 write_inferior_memory (from, buf, sizeof (int));
2274}
2275
2276static void
4e29fb54 2277i386_emit_const (LONGEST num)
6a271cae
PA
2278{
2279 unsigned char buf[16];
2280 int i, hi;
2281 CORE_ADDR buildaddr = current_insn_ptr;
2282
2283 i = 0;
2284 buf[i++] = 0xb8; /* mov $<n>,%eax */
2285 *((int *) (&buf[i])) = (num & 0xffffffff);
2286 i += 4;
2287 hi = ((num >> 32) & 0xffffffff);
2288 if (hi)
2289 {
2290 buf[i++] = 0xbb; /* mov $<n>,%ebx */
2291 *((int *) (&buf[i])) = hi;
2292 i += 4;
2293 }
2294 else
2295 {
2296 buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2297 }
2298 append_insns (&buildaddr, i, buf);
2299 current_insn_ptr = buildaddr;
2300}
2301
2302static void
2303i386_emit_call (CORE_ADDR fn)
2304{
2305 unsigned char buf[16];
2306 int i, offset;
2307 CORE_ADDR buildaddr;
2308
2309 buildaddr = current_insn_ptr;
2310 i = 0;
2311 buf[i++] = 0xe8; /* call <reladdr> */
2312 offset = ((int) fn) - (buildaddr + 5);
2313 memcpy (buf + 1, &offset, 4);
2314 append_insns (&buildaddr, 5, buf);
2315 current_insn_ptr = buildaddr;
2316}
2317
2318static void
2319i386_emit_reg (int reg)
2320{
2321 unsigned char buf[16];
2322 int i;
2323 CORE_ADDR buildaddr;
2324
2325 EMIT_ASM32 (i386_reg_a,
2326 "sub $0x8,%esp");
2327 buildaddr = current_insn_ptr;
2328 i = 0;
2329 buf[i++] = 0xb8; /* mov $<n>,%eax */
2330 *((int *) (&buf[i])) = reg;
2331 i += 4;
2332 append_insns (&buildaddr, i, buf);
2333 current_insn_ptr = buildaddr;
2334 EMIT_ASM32 (i386_reg_b,
2335 "mov %eax,4(%esp)\n\t"
2336 "mov 8(%ebp),%eax\n\t"
2337 "mov %eax,(%esp)");
2338 i386_emit_call (get_raw_reg_func_addr ());
2339 EMIT_ASM32 (i386_reg_c,
2340 "xor %ebx,%ebx\n\t"
2341 "lea 0x8(%esp),%esp");
2342}
2343
2344static void
2345i386_emit_pop (void)
2346{
2347 EMIT_ASM32 (i386_pop,
2348 "pop %eax\n\t"
2349 "pop %ebx");
2350}
2351
2352static void
2353i386_emit_stack_flush (void)
2354{
2355 EMIT_ASM32 (i386_stack_flush,
2356 "push %ebx\n\t"
2357 "push %eax");
2358}
2359
2360static void
2361i386_emit_zero_ext (int arg)
2362{
2363 switch (arg)
2364 {
2365 case 8:
2366 EMIT_ASM32 (i386_zero_ext_8,
2367 "and $0xff,%eax\n\t"
2368 "xor %ebx,%ebx");
2369 break;
2370 case 16:
2371 EMIT_ASM32 (i386_zero_ext_16,
2372 "and $0xffff,%eax\n\t"
2373 "xor %ebx,%ebx");
2374 break;
2375 case 32:
2376 EMIT_ASM32 (i386_zero_ext_32,
2377 "xor %ebx,%ebx");
2378 break;
2379 default:
2380 emit_error = 1;
2381 }
2382}
2383
2384static void
2385i386_emit_swap (void)
2386{
2387 EMIT_ASM32 (i386_swap,
2388 "mov %eax,%ecx\n\t"
2389 "mov %ebx,%edx\n\t"
2390 "pop %eax\n\t"
2391 "pop %ebx\n\t"
2392 "push %edx\n\t"
2393 "push %ecx");
2394}
2395
2396static void
2397i386_emit_stack_adjust (int n)
2398{
2399 unsigned char buf[16];
2400 int i;
2401 CORE_ADDR buildaddr = current_insn_ptr;
2402
2403 i = 0;
2404 buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2405 buf[i++] = 0x64;
2406 buf[i++] = 0x24;
2407 buf[i++] = n * 8;
2408 append_insns (&buildaddr, i, buf);
2409 current_insn_ptr = buildaddr;
2410}
2411
2412/* FN's prototype is `LONGEST(*fn)(int)'. */
2413
2414static void
2415i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2416{
2417 unsigned char buf[16];
2418 int i;
2419 CORE_ADDR buildaddr;
2420
2421 EMIT_ASM32 (i386_int_call_1_a,
2422 /* Reserve a bit of stack space. */
2423 "sub $0x8,%esp");
2424 /* Put the one argument on the stack. */
2425 buildaddr = current_insn_ptr;
2426 i = 0;
2427 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2428 buf[i++] = 0x04;
2429 buf[i++] = 0x24;
2430 *((int *) (&buf[i])) = arg1;
2431 i += 4;
2432 append_insns (&buildaddr, i, buf);
2433 current_insn_ptr = buildaddr;
2434 i386_emit_call (fn);
2435 EMIT_ASM32 (i386_int_call_1_c,
2436 "mov %edx,%ebx\n\t"
2437 "lea 0x8(%esp),%esp");
2438}
2439
4e29fb54 2440/* FN's prototype is `void(*fn)(int,LONGEST)'. */
6a271cae
PA
2441
2442static void
2443i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2444{
2445 unsigned char buf[16];
2446 int i;
2447 CORE_ADDR buildaddr;
2448
2449 EMIT_ASM32 (i386_void_call_2_a,
2450 /* Preserve %eax only; we don't have to worry about %ebx. */
2451 "push %eax\n\t"
2452 /* Reserve a bit of stack space for arguments. */
2453 "sub $0x10,%esp\n\t"
2454 /* Copy "top" to the second argument position. (Note that
2455 we can't assume function won't scribble on its
2456 arguments, so don't try to restore from this.) */
2457 "mov %eax,4(%esp)\n\t"
2458 "mov %ebx,8(%esp)");
2459 /* Put the first argument on the stack. */
2460 buildaddr = current_insn_ptr;
2461 i = 0;
2462 buf[i++] = 0xc7; /* movl $<arg1>,(%esp) */
2463 buf[i++] = 0x04;
2464 buf[i++] = 0x24;
2465 *((int *) (&buf[i])) = arg1;
2466 i += 4;
2467 append_insns (&buildaddr, i, buf);
2468 current_insn_ptr = buildaddr;
2469 i386_emit_call (fn);
2470 EMIT_ASM32 (i386_void_call_2_b,
2471 "lea 0x10(%esp),%esp\n\t"
2472 /* Restore original stack top. */
2473 "pop %eax");
2474}
2475
2476struct emit_ops i386_emit_ops =
2477 {
2478 i386_emit_prologue,
2479 i386_emit_epilogue,
2480 i386_emit_add,
2481 i386_emit_sub,
2482 i386_emit_mul,
2483 i386_emit_lsh,
2484 i386_emit_rsh_signed,
2485 i386_emit_rsh_unsigned,
2486 i386_emit_ext,
2487 i386_emit_log_not,
2488 i386_emit_bit_and,
2489 i386_emit_bit_or,
2490 i386_emit_bit_xor,
2491 i386_emit_bit_not,
2492 i386_emit_equal,
2493 i386_emit_less_signed,
2494 i386_emit_less_unsigned,
2495 i386_emit_ref,
2496 i386_emit_if_goto,
2497 i386_emit_goto,
2498 i386_write_goto_address,
2499 i386_emit_const,
2500 i386_emit_call,
2501 i386_emit_reg,
2502 i386_emit_pop,
2503 i386_emit_stack_flush,
2504 i386_emit_zero_ext,
2505 i386_emit_swap,
2506 i386_emit_stack_adjust,
2507 i386_emit_int_call_1,
2508 i386_emit_void_call_2
2509 };
2510
2511
2512static struct emit_ops *
2513x86_emit_ops (void)
2514{
2515#ifdef __x86_64__
2516 int use_64bit = register_size (0) == 8;
2517
2518 if (use_64bit)
2519 return &amd64_emit_ops;
2520 else
2521#endif
2522 return &i386_emit_ops;
2523}
2524
d0722149
DE
2525/* This is initialized assuming an amd64 target.
2526 x86_arch_setup will correct it for i386 or amd64 targets. */
2527
2528struct linux_target_ops the_low_target =
2529{
2530 x86_arch_setup,
2531 -1,
2532 NULL,
2533 NULL,
2534 NULL,
2535 x86_get_pc,
2536 x86_set_pc,
2537 x86_breakpoint,
2538 x86_breakpoint_len,
2539 NULL,
2540 1,
2541 x86_breakpoint_at,
aa5ca48f
DE
2542 x86_insert_point,
2543 x86_remove_point,
2544 x86_stopped_by_watchpoint,
2545 x86_stopped_data_address,
d0722149
DE
2546 /* collect_ptrace_register/supply_ptrace_register are not needed in the
2547 native i386 case (no registers smaller than an xfer unit), and are not
2548 used in the biarch case (HAVE_LINUX_USRREGS is not defined). */
2549 NULL,
2550 NULL,
2551 /* need to fix up i386 siginfo if host is amd64 */
2552 x86_siginfo_fixup,
aa5ca48f
DE
2553 x86_linux_new_process,
2554 x86_linux_new_thread,
1570b33e 2555 x86_linux_prepare_to_resume,
219f2f23 2556 x86_linux_process_qsupported,
fa593d66
PA
2557 x86_supports_tracepoints,
2558 x86_get_thread_area,
6a271cae
PA
2559 x86_install_fast_tracepoint_jump_pad,
2560 x86_emit_ops
d0722149 2561};