]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/amd64-tdep.c
amd64-windows: memory args passed by pointer during function calls.
[thirdparty/binutils-gdb.git] / gdb / amd64-tdep.c
1 /* Target-dependent code for AMD64.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 Contributed by Jiri Smid, SuSE Labs.
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3 of the License, or
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22
23 #include "defs.h"
24 #include "opcode/i386.h"
25 #include "dis-asm.h"
26 #include "arch-utils.h"
27 #include "block.h"
28 #include "dummy-frame.h"
29 #include "frame.h"
30 #include "frame-base.h"
31 #include "frame-unwind.h"
32 #include "inferior.h"
33 #include "gdbcmd.h"
34 #include "gdbcore.h"
35 #include "objfiles.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "symfile.h"
39
40 #include "gdb_assert.h"
41
42 #include "amd64-tdep.h"
43 #include "i387-tdep.h"
44
45 /* Note that the AMD64 architecture was previously known as x86-64.
46 The latter is (forever) engraved into the canonical system name as
47 returned by config.guess, and used as the name for the AMD64 port
48 of GNU/Linux. The BSD's have renamed their ports to amd64; they
49 don't like to shout. For GDB we prefer the amd64_-prefix over the
50 x86_64_-prefix since it's so much easier to type. */
51
52 /* Register information. */
53
54 static const char *amd64_register_names[] =
55 {
56 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
57
58 /* %r8 is indeed register number 8. */
59 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
60 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
61
62 /* %st0 is register number 24. */
63 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
64 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
65
66 /* %xmm0 is register number 40. */
67 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
68 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
69 "mxcsr",
70 };
71
72 /* Total number of registers. */
73 #define AMD64_NUM_REGS ARRAY_SIZE (amd64_register_names)
74
75 /* The registers used to pass integer arguments during a function call. */
76 static int amd64_dummy_call_integer_regs[] =
77 {
78 AMD64_RDI_REGNUM, /* %rdi */
79 AMD64_RSI_REGNUM, /* %rsi */
80 AMD64_RDX_REGNUM, /* %rdx */
81 AMD64_RCX_REGNUM, /* %rcx */
82 8, /* %r8 */
83 9 /* %r9 */
84 };
85
86 /* Return the name of register REGNUM. */
87
88 const char *
89 amd64_register_name (struct gdbarch *gdbarch, int regnum)
90 {
91 if (regnum >= 0 && regnum < AMD64_NUM_REGS)
92 return amd64_register_names[regnum];
93
94 return NULL;
95 }
96
97 /* Return the GDB type object for the "standard" data type of data in
98 register REGNUM. */
99
100 struct type *
101 amd64_register_type (struct gdbarch *gdbarch, int regnum)
102 {
103 if (regnum >= AMD64_RAX_REGNUM && regnum <= AMD64_RDI_REGNUM)
104 return builtin_type (gdbarch)->builtin_int64;
105 if (regnum == AMD64_RBP_REGNUM || regnum == AMD64_RSP_REGNUM)
106 return builtin_type (gdbarch)->builtin_data_ptr;
107 if (regnum >= AMD64_R8_REGNUM && regnum <= AMD64_R15_REGNUM)
108 return builtin_type (gdbarch)->builtin_int64;
109 if (regnum == AMD64_RIP_REGNUM)
110 return builtin_type (gdbarch)->builtin_func_ptr;
111 if (regnum == AMD64_EFLAGS_REGNUM)
112 return i386_eflags_type (gdbarch);
113 if (regnum >= AMD64_CS_REGNUM && regnum <= AMD64_GS_REGNUM)
114 return builtin_type (gdbarch)->builtin_int32;
115 if (regnum >= AMD64_ST0_REGNUM && regnum <= AMD64_ST0_REGNUM + 7)
116 return i387_ext_type (gdbarch);
117 if (regnum >= AMD64_FCTRL_REGNUM && regnum <= AMD64_FCTRL_REGNUM + 7)
118 return builtin_type (gdbarch)->builtin_int32;
119 if (regnum >= AMD64_XMM0_REGNUM && regnum <= AMD64_XMM0_REGNUM + 15)
120 return i386_sse_type (gdbarch);
121 if (regnum == AMD64_MXCSR_REGNUM)
122 return i386_mxcsr_type (gdbarch);
123
124 internal_error (__FILE__, __LINE__, _("invalid regnum"));
125 }
126
127 /* DWARF Register Number Mapping as defined in the System V psABI,
128 section 3.6. */
129
130 static int amd64_dwarf_regmap[] =
131 {
132 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
133 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
134 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
135 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
136
137 /* Frame Pointer Register RBP. */
138 AMD64_RBP_REGNUM,
139
140 /* Stack Pointer Register RSP. */
141 AMD64_RSP_REGNUM,
142
143 /* Extended Integer Registers 8 - 15. */
144 8, 9, 10, 11, 12, 13, 14, 15,
145
146 /* Return Address RA. Mapped to RIP. */
147 AMD64_RIP_REGNUM,
148
149 /* SSE Registers 0 - 7. */
150 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
151 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
152 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
153 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
154
155 /* Extended SSE Registers 8 - 15. */
156 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
157 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
158 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
159 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
160
161 /* Floating Point Registers 0-7. */
162 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
163 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
164 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
165 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
166
167 /* Control and Status Flags Register. */
168 AMD64_EFLAGS_REGNUM,
169
170 /* Selector Registers. */
171 AMD64_ES_REGNUM,
172 AMD64_CS_REGNUM,
173 AMD64_SS_REGNUM,
174 AMD64_DS_REGNUM,
175 AMD64_FS_REGNUM,
176 AMD64_GS_REGNUM,
177 -1,
178 -1,
179
180 /* Segment Base Address Registers. */
181 -1,
182 -1,
183 -1,
184 -1,
185
186 /* Special Selector Registers. */
187 -1,
188 -1,
189
190 /* Floating Point Control Registers. */
191 AMD64_MXCSR_REGNUM,
192 AMD64_FCTRL_REGNUM,
193 AMD64_FSTAT_REGNUM
194 };
195
196 static const int amd64_dwarf_regmap_len =
197 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
198
199 /* Convert DWARF register number REG to the appropriate register
200 number used by GDB. */
201
202 static int
203 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
204 {
205 int regnum = -1;
206
207 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
208 regnum = amd64_dwarf_regmap[reg];
209
210 if (regnum == -1)
211 warning (_("Unmapped DWARF Register #%d encountered."), reg);
212
213 return regnum;
214 }
215
216 /* Map architectural register numbers to gdb register numbers. */
217
218 static const int amd64_arch_regmap[16] =
219 {
220 AMD64_RAX_REGNUM, /* %rax */
221 AMD64_RCX_REGNUM, /* %rcx */
222 AMD64_RDX_REGNUM, /* %rdx */
223 AMD64_RBX_REGNUM, /* %rbx */
224 AMD64_RSP_REGNUM, /* %rsp */
225 AMD64_RBP_REGNUM, /* %rbp */
226 AMD64_RSI_REGNUM, /* %rsi */
227 AMD64_RDI_REGNUM, /* %rdi */
228 AMD64_R8_REGNUM, /* %r8 */
229 AMD64_R9_REGNUM, /* %r9 */
230 AMD64_R10_REGNUM, /* %r10 */
231 AMD64_R11_REGNUM, /* %r11 */
232 AMD64_R12_REGNUM, /* %r12 */
233 AMD64_R13_REGNUM, /* %r13 */
234 AMD64_R14_REGNUM, /* %r14 */
235 AMD64_R15_REGNUM /* %r15 */
236 };
237
238 static const int amd64_arch_regmap_len =
239 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
240
241 /* Convert architectural register number REG to the appropriate register
242 number used by GDB. */
243
244 static int
245 amd64_arch_reg_to_regnum (int reg)
246 {
247 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
248
249 return amd64_arch_regmap[reg];
250 }
251
252 \f
253
254 /* Return the union class of CLASS1 and CLASS2. See the psABI for
255 details. */
256
257 static enum amd64_reg_class
258 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
259 {
260 /* Rule (a): If both classes are equal, this is the resulting class. */
261 if (class1 == class2)
262 return class1;
263
264 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
265 is the other class. */
266 if (class1 == AMD64_NO_CLASS)
267 return class2;
268 if (class2 == AMD64_NO_CLASS)
269 return class1;
270
271 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
272 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
273 return AMD64_MEMORY;
274
275 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
276 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
277 return AMD64_INTEGER;
278
279 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
280 MEMORY is used as class. */
281 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
282 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
283 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
284 return AMD64_MEMORY;
285
286 /* Rule (f): Otherwise class SSE is used. */
287 return AMD64_SSE;
288 }
289
290 /* Return non-zero if TYPE is a non-POD structure or union type. */
291
292 static int
293 amd64_non_pod_p (struct type *type)
294 {
295 /* ??? A class with a base class certainly isn't POD, but does this
296 catch all non-POD structure types? */
297 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
298 return 1;
299
300 return 0;
301 }
302
303 /* Classify TYPE according to the rules for aggregate (structures and
304 arrays) and union types, and store the result in CLASS. */
305
306 static void
307 amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
308 {
309 int len = TYPE_LENGTH (type);
310
311 /* 1. If the size of an object is larger than two eightbytes, or in
312 C++, is a non-POD structure or union type, or contains
313 unaligned fields, it has class memory. */
314 if (len > 16 || amd64_non_pod_p (type))
315 {
316 class[0] = class[1] = AMD64_MEMORY;
317 return;
318 }
319
320 /* 2. Both eightbytes get initialized to class NO_CLASS. */
321 class[0] = class[1] = AMD64_NO_CLASS;
322
323 /* 3. Each field of an object is classified recursively so that
324 always two fields are considered. The resulting class is
325 calculated according to the classes of the fields in the
326 eightbyte: */
327
328 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
329 {
330 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
331
332 /* All fields in an array have the same type. */
333 amd64_classify (subtype, class);
334 if (len > 8 && class[1] == AMD64_NO_CLASS)
335 class[1] = class[0];
336 }
337 else
338 {
339 int i;
340
341 /* Structure or union. */
342 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
343 || TYPE_CODE (type) == TYPE_CODE_UNION);
344
345 for (i = 0; i < TYPE_NFIELDS (type); i++)
346 {
347 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
348 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
349 enum amd64_reg_class subclass[2];
350
351 /* Ignore static fields. */
352 if (field_is_static (&TYPE_FIELD (type, i)))
353 continue;
354
355 gdb_assert (pos == 0 || pos == 1);
356
357 amd64_classify (subtype, subclass);
358 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
359 if (pos == 0)
360 class[1] = amd64_merge_classes (class[1], subclass[1]);
361 }
362 }
363
364 /* 4. Then a post merger cleanup is done: */
365
366 /* Rule (a): If one of the classes is MEMORY, the whole argument is
367 passed in memory. */
368 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
369 class[0] = class[1] = AMD64_MEMORY;
370
371 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
372 SSE. */
373 if (class[0] == AMD64_SSEUP)
374 class[0] = AMD64_SSE;
375 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
376 class[1] = AMD64_SSE;
377 }
378
379 /* Classify TYPE, and store the result in CLASS. */
380
381 void
382 amd64_classify (struct type *type, enum amd64_reg_class class[2])
383 {
384 enum type_code code = TYPE_CODE (type);
385 int len = TYPE_LENGTH (type);
386
387 class[0] = class[1] = AMD64_NO_CLASS;
388
389 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
390 long, long long, and pointers are in the INTEGER class. Similarly,
391 range types, used by languages such as Ada, are also in the INTEGER
392 class. */
393 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
394 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
395 || code == TYPE_CODE_CHAR
396 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
397 && (len == 1 || len == 2 || len == 4 || len == 8))
398 class[0] = AMD64_INTEGER;
399
400 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
401 are in class SSE. */
402 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
403 && (len == 4 || len == 8))
404 /* FIXME: __m64 . */
405 class[0] = AMD64_SSE;
406
407 /* Arguments of types __float128, _Decimal128 and __m128 are split into
408 two halves. The least significant ones belong to class SSE, the most
409 significant one to class SSEUP. */
410 else if (code == TYPE_CODE_DECFLOAT && len == 16)
411 /* FIXME: __float128, __m128. */
412 class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
413
414 /* The 64-bit mantissa of arguments of type long double belongs to
415 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
416 class X87UP. */
417 else if (code == TYPE_CODE_FLT && len == 16)
418 /* Class X87 and X87UP. */
419 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
420
421 /* Aggregates. */
422 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
423 || code == TYPE_CODE_UNION)
424 amd64_classify_aggregate (type, class);
425 }
426
427 static enum return_value_convention
428 amd64_return_value (struct gdbarch *gdbarch, struct type *func_type,
429 struct type *type, struct regcache *regcache,
430 gdb_byte *readbuf, const gdb_byte *writebuf)
431 {
432 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
433 enum amd64_reg_class class[2];
434 int len = TYPE_LENGTH (type);
435 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
436 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
437 int integer_reg = 0;
438 int sse_reg = 0;
439 int i;
440
441 gdb_assert (!(readbuf && writebuf));
442 gdb_assert (tdep->classify);
443
444 /* 1. Classify the return type with the classification algorithm. */
445 tdep->classify (type, class);
446
447 /* 2. If the type has class MEMORY, then the caller provides space
448 for the return value and passes the address of this storage in
449 %rdi as if it were the first argument to the function. In effect,
450 this address becomes a hidden first argument.
451
452 On return %rax will contain the address that has been passed in
453 by the caller in %rdi. */
454 if (class[0] == AMD64_MEMORY)
455 {
456 /* As indicated by the comment above, the ABI guarantees that we
457 can always find the return value just after the function has
458 returned. */
459
460 if (readbuf)
461 {
462 ULONGEST addr;
463
464 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
465 read_memory (addr, readbuf, TYPE_LENGTH (type));
466 }
467
468 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
469 }
470
471 gdb_assert (class[1] != AMD64_MEMORY);
472 gdb_assert (len <= 16);
473
474 for (i = 0; len > 0; i++, len -= 8)
475 {
476 int regnum = -1;
477 int offset = 0;
478
479 switch (class[i])
480 {
481 case AMD64_INTEGER:
482 /* 3. If the class is INTEGER, the next available register
483 of the sequence %rax, %rdx is used. */
484 regnum = integer_regnum[integer_reg++];
485 break;
486
487 case AMD64_SSE:
488 /* 4. If the class is SSE, the next available SSE register
489 of the sequence %xmm0, %xmm1 is used. */
490 regnum = sse_regnum[sse_reg++];
491 break;
492
493 case AMD64_SSEUP:
494 /* 5. If the class is SSEUP, the eightbyte is passed in the
495 upper half of the last used SSE register. */
496 gdb_assert (sse_reg > 0);
497 regnum = sse_regnum[sse_reg - 1];
498 offset = 8;
499 break;
500
501 case AMD64_X87:
502 /* 6. If the class is X87, the value is returned on the X87
503 stack in %st0 as 80-bit x87 number. */
504 regnum = AMD64_ST0_REGNUM;
505 if (writebuf)
506 i387_return_value (gdbarch, regcache);
507 break;
508
509 case AMD64_X87UP:
510 /* 7. If the class is X87UP, the value is returned together
511 with the previous X87 value in %st0. */
512 gdb_assert (i > 0 && class[0] == AMD64_X87);
513 regnum = AMD64_ST0_REGNUM;
514 offset = 8;
515 len = 2;
516 break;
517
518 case AMD64_NO_CLASS:
519 continue;
520
521 default:
522 gdb_assert (!"Unexpected register class.");
523 }
524
525 gdb_assert (regnum != -1);
526
527 if (readbuf)
528 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
529 readbuf + i * 8);
530 if (writebuf)
531 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
532 writebuf + i * 8);
533 }
534
535 return RETURN_VALUE_REGISTER_CONVENTION;
536 }
537 \f
538
539 static CORE_ADDR
540 amd64_push_arguments (struct regcache *regcache, int nargs,
541 struct value **args, CORE_ADDR sp, int struct_return)
542 {
543 struct gdbarch *gdbarch = get_regcache_arch (regcache);
544 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
545 int *integer_regs = tdep->call_dummy_integer_regs;
546 int num_integer_regs = tdep->call_dummy_num_integer_regs;
547
548 static int sse_regnum[] =
549 {
550 /* %xmm0 ... %xmm7 */
551 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
552 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
553 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
554 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
555 };
556 struct value **stack_args = alloca (nargs * sizeof (struct value *));
557 /* An array that mirrors the stack_args array. For all arguments
558 that are passed by MEMORY, if that argument's address also needs
559 to be stored in a register, the ARG_ADDR_REGNO array will contain
560 that register number (or a negative value otherwise). */
561 int *arg_addr_regno = alloca (nargs * sizeof (int));
562 int num_stack_args = 0;
563 int num_elements = 0;
564 int element = 0;
565 int integer_reg = 0;
566 int sse_reg = 0;
567 int i;
568
569 gdb_assert (tdep->classify);
570
571 /* Reserve a register for the "hidden" argument. */
572 if (struct_return)
573 integer_reg++;
574
575 for (i = 0; i < nargs; i++)
576 {
577 struct type *type = value_type (args[i]);
578 int len = TYPE_LENGTH (type);
579 enum amd64_reg_class class[2];
580 int needed_integer_regs = 0;
581 int needed_sse_regs = 0;
582 int j;
583
584 /* Classify argument. */
585 tdep->classify (type, class);
586
587 /* Calculate the number of integer and SSE registers needed for
588 this argument. */
589 for (j = 0; j < 2; j++)
590 {
591 if (class[j] == AMD64_INTEGER)
592 needed_integer_regs++;
593 else if (class[j] == AMD64_SSE)
594 needed_sse_regs++;
595 }
596
597 /* Check whether enough registers are available, and if the
598 argument should be passed in registers at all. */
599 if (integer_reg + needed_integer_regs > num_integer_regs
600 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
601 || (needed_integer_regs == 0 && needed_sse_regs == 0))
602 {
603 /* The argument will be passed on the stack. */
604 num_elements += ((len + 7) / 8);
605 stack_args[num_stack_args] = args[i];
606 /* If this is an AMD64_MEMORY argument whose address must also
607 be passed in one of the integer registers, reserve that
608 register and associate this value to that register so that
609 we can store the argument address as soon as we know it. */
610 if (class[0] == AMD64_MEMORY
611 && tdep->memory_args_by_pointer
612 && integer_reg < tdep->call_dummy_num_integer_regs)
613 arg_addr_regno[num_stack_args] =
614 tdep->call_dummy_integer_regs[integer_reg++];
615 else
616 arg_addr_regno[num_stack_args] = -1;
617 num_stack_args++;
618 }
619 else
620 {
621 /* The argument will be passed in registers. */
622 const gdb_byte *valbuf = value_contents (args[i]);
623 gdb_byte buf[8];
624
625 gdb_assert (len <= 16);
626
627 for (j = 0; len > 0; j++, len -= 8)
628 {
629 int regnum = -1;
630 int offset = 0;
631
632 switch (class[j])
633 {
634 case AMD64_INTEGER:
635 regnum = integer_regs[integer_reg++];
636 break;
637
638 case AMD64_SSE:
639 regnum = sse_regnum[sse_reg++];
640 break;
641
642 case AMD64_SSEUP:
643 gdb_assert (sse_reg > 0);
644 regnum = sse_regnum[sse_reg - 1];
645 offset = 8;
646 break;
647
648 default:
649 gdb_assert (!"Unexpected register class.");
650 }
651
652 gdb_assert (regnum != -1);
653 memset (buf, 0, sizeof buf);
654 memcpy (buf, valbuf + j * 8, min (len, 8));
655 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
656 }
657 }
658 }
659
660 /* Allocate space for the arguments on the stack. */
661 sp -= num_elements * 8;
662
663 /* The psABI says that "The end of the input argument area shall be
664 aligned on a 16 byte boundary." */
665 sp &= ~0xf;
666
667 /* Write out the arguments to the stack. */
668 for (i = 0; i < num_stack_args; i++)
669 {
670 struct type *type = value_type (stack_args[i]);
671 const gdb_byte *valbuf = value_contents (stack_args[i]);
672 int len = TYPE_LENGTH (type);
673 CORE_ADDR arg_addr = sp + element * 8;
674
675 write_memory (arg_addr, valbuf, len);
676 if (arg_addr_regno[i] >= 0)
677 {
678 /* We also need to store the address of that argument in
679 the given register. */
680 gdb_byte buf[8];
681 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
682
683 store_unsigned_integer (buf, 8, byte_order, arg_addr);
684 regcache_cooked_write (regcache, arg_addr_regno[i], buf);
685 }
686 element += ((len + 7) / 8);
687 }
688
689 /* The psABI says that "For calls that may call functions that use
690 varargs or stdargs (prototype-less calls or calls to functions
691 containing ellipsis (...) in the declaration) %al is used as
692 hidden argument to specify the number of SSE registers used. */
693 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
694 return sp;
695 }
696
697 static CORE_ADDR
698 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
699 struct regcache *regcache, CORE_ADDR bp_addr,
700 int nargs, struct value **args, CORE_ADDR sp,
701 int struct_return, CORE_ADDR struct_addr)
702 {
703 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
704 gdb_byte buf[8];
705
706 /* Pass arguments. */
707 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
708
709 /* Pass "hidden" argument". */
710 if (struct_return)
711 {
712 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
713 /* The "hidden" argument is passed throught the first argument
714 register. */
715 const int arg_regnum = tdep->call_dummy_integer_regs[0];
716
717 store_unsigned_integer (buf, 8, byte_order, struct_addr);
718 regcache_cooked_write (regcache, arg_regnum, buf);
719 }
720
721 /* Store return address. */
722 sp -= 8;
723 store_unsigned_integer (buf, 8, byte_order, bp_addr);
724 write_memory (sp, buf, 8);
725
726 /* Finally, update the stack pointer... */
727 store_unsigned_integer (buf, 8, byte_order, sp);
728 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
729
730 /* ...and fake a frame pointer. */
731 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
732
733 return sp + 16;
734 }
735 \f
736 /* Displaced instruction handling. */
737
738 /* A partially decoded instruction.
739 This contains enough details for displaced stepping purposes. */
740
741 struct amd64_insn
742 {
743 /* The number of opcode bytes. */
744 int opcode_len;
745 /* The offset of the rex prefix or -1 if not present. */
746 int rex_offset;
747 /* The offset to the first opcode byte. */
748 int opcode_offset;
749 /* The offset to the modrm byte or -1 if not present. */
750 int modrm_offset;
751
752 /* The raw instruction. */
753 gdb_byte *raw_insn;
754 };
755
756 struct displaced_step_closure
757 {
758 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
759 int tmp_used;
760 int tmp_regno;
761 ULONGEST tmp_save;
762
763 /* Details of the instruction. */
764 struct amd64_insn insn_details;
765
766 /* Amount of space allocated to insn_buf. */
767 int max_len;
768
769 /* The possibly modified insn.
770 This is a variable-length field. */
771 gdb_byte insn_buf[1];
772 };
773
774 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
775 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
776 at which point delete these in favor of libopcodes' versions). */
777
778 static const unsigned char onebyte_has_modrm[256] = {
779 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
780 /* ------------------------------- */
781 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
782 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
783 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
784 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
785 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
786 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
787 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
788 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
789 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
790 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
791 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
792 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
793 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
794 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
795 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
796 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
797 /* ------------------------------- */
798 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
799 };
800
801 static const unsigned char twobyte_has_modrm[256] = {
802 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
803 /* ------------------------------- */
804 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
805 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
806 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
807 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
808 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
809 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
810 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
811 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
812 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
813 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
814 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
815 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
816 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
817 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
818 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
819 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
820 /* ------------------------------- */
821 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
822 };
823
824 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
825
826 static int
827 rex_prefix_p (gdb_byte pfx)
828 {
829 return REX_PREFIX_P (pfx);
830 }
831
832 /* Skip the legacy instruction prefixes in INSN.
833 We assume INSN is properly sentineled so we don't have to worry
834 about falling off the end of the buffer. */
835
836 static gdb_byte *
837 amd64_skip_prefixes (gdb_byte *insn)
838 {
839 while (1)
840 {
841 switch (*insn)
842 {
843 case DATA_PREFIX_OPCODE:
844 case ADDR_PREFIX_OPCODE:
845 case CS_PREFIX_OPCODE:
846 case DS_PREFIX_OPCODE:
847 case ES_PREFIX_OPCODE:
848 case FS_PREFIX_OPCODE:
849 case GS_PREFIX_OPCODE:
850 case SS_PREFIX_OPCODE:
851 case LOCK_PREFIX_OPCODE:
852 case REPE_PREFIX_OPCODE:
853 case REPNE_PREFIX_OPCODE:
854 ++insn;
855 continue;
856 default:
857 break;
858 }
859 break;
860 }
861
862 return insn;
863 }
864
865 /* fprintf-function for amd64_insn_length.
866 This function is a nop, we don't want to print anything, we just want to
867 compute the length of the insn. */
868
869 static int ATTR_FORMAT (printf, 2, 3)
870 amd64_insn_length_fprintf (void *stream, const char *format, ...)
871 {
872 return 0;
873 }
874
875 /* Initialize a struct disassemble_info for amd64_insn_length. */
876
877 static void
878 amd64_insn_length_init_dis (struct gdbarch *gdbarch,
879 struct disassemble_info *di,
880 const gdb_byte *insn, int max_len,
881 CORE_ADDR addr)
882 {
883 init_disassemble_info (di, NULL, amd64_insn_length_fprintf);
884
885 /* init_disassemble_info installs buffer_read_memory, etc.
886 so we don't need to do that here.
887 The cast is necessary until disassemble_info is const-ified. */
888 di->buffer = (gdb_byte *) insn;
889 di->buffer_length = max_len;
890 di->buffer_vma = addr;
891
892 di->arch = gdbarch_bfd_arch_info (gdbarch)->arch;
893 di->mach = gdbarch_bfd_arch_info (gdbarch)->mach;
894 di->endian = gdbarch_byte_order (gdbarch);
895 di->endian_code = gdbarch_byte_order_for_code (gdbarch);
896
897 disassemble_init_for_target (di);
898 }
899
900 /* Return the length in bytes of INSN.
901 MAX_LEN is the size of the buffer containing INSN.
902 libopcodes currently doesn't export a utility to compute the
903 instruction length, so use the disassembler until then. */
904
905 static int
906 amd64_insn_length (struct gdbarch *gdbarch,
907 const gdb_byte *insn, int max_len, CORE_ADDR addr)
908 {
909 struct disassemble_info di;
910
911 amd64_insn_length_init_dis (gdbarch, &di, insn, max_len, addr);
912
913 return gdbarch_print_insn (gdbarch, addr, &di);
914 }
915
916 /* Return an integer register (other than RSP) that is unused as an input
917 operand in INSN.
918 In order to not require adding a rex prefix if the insn doesn't already
919 have one, the result is restricted to RAX ... RDI, sans RSP.
920 The register numbering of the result follows architecture ordering,
921 e.g. RDI = 7. */
922
923 static int
924 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
925 {
926 /* 1 bit for each reg */
927 int used_regs_mask = 0;
928
929 /* There can be at most 3 int regs used as inputs in an insn, and we have
930 7 to choose from (RAX ... RDI, sans RSP).
931 This allows us to take a conservative approach and keep things simple.
932 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
933 that implicitly specify RAX. */
934
935 /* Avoid RAX. */
936 used_regs_mask |= 1 << EAX_REG_NUM;
937 /* Similarily avoid RDX, implicit operand in divides. */
938 used_regs_mask |= 1 << EDX_REG_NUM;
939 /* Avoid RSP. */
940 used_regs_mask |= 1 << ESP_REG_NUM;
941
942 /* If the opcode is one byte long and there's no ModRM byte,
943 assume the opcode specifies a register. */
944 if (details->opcode_len == 1 && details->modrm_offset == -1)
945 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
946
947 /* Mark used regs in the modrm/sib bytes. */
948 if (details->modrm_offset != -1)
949 {
950 int modrm = details->raw_insn[details->modrm_offset];
951 int mod = MODRM_MOD_FIELD (modrm);
952 int reg = MODRM_REG_FIELD (modrm);
953 int rm = MODRM_RM_FIELD (modrm);
954 int have_sib = mod != 3 && rm == 4;
955
956 /* Assume the reg field of the modrm byte specifies a register. */
957 used_regs_mask |= 1 << reg;
958
959 if (have_sib)
960 {
961 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
962 int index = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
963 used_regs_mask |= 1 << base;
964 used_regs_mask |= 1 << index;
965 }
966 else
967 {
968 used_regs_mask |= 1 << rm;
969 }
970 }
971
972 gdb_assert (used_regs_mask < 256);
973 gdb_assert (used_regs_mask != 255);
974
975 /* Finally, find a free reg. */
976 {
977 int i;
978
979 for (i = 0; i < 8; ++i)
980 {
981 if (! (used_regs_mask & (1 << i)))
982 return i;
983 }
984
985 /* We shouldn't get here. */
986 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
987 }
988 }
989
990 /* Extract the details of INSN that we need. */
991
992 static void
993 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
994 {
995 gdb_byte *start = insn;
996 int need_modrm;
997
998 details->raw_insn = insn;
999
1000 details->opcode_len = -1;
1001 details->rex_offset = -1;
1002 details->opcode_offset = -1;
1003 details->modrm_offset = -1;
1004
1005 /* Skip legacy instruction prefixes. */
1006 insn = amd64_skip_prefixes (insn);
1007
1008 /* Skip REX instruction prefix. */
1009 if (rex_prefix_p (*insn))
1010 {
1011 details->rex_offset = insn - start;
1012 ++insn;
1013 }
1014
1015 details->opcode_offset = insn - start;
1016
1017 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1018 {
1019 /* Two or three-byte opcode. */
1020 ++insn;
1021 need_modrm = twobyte_has_modrm[*insn];
1022
1023 /* Check for three-byte opcode. */
1024 switch (*insn)
1025 {
1026 case 0x24:
1027 case 0x25:
1028 case 0x38:
1029 case 0x3a:
1030 case 0x7a:
1031 case 0x7b:
1032 ++insn;
1033 details->opcode_len = 3;
1034 break;
1035 default:
1036 details->opcode_len = 2;
1037 break;
1038 }
1039 }
1040 else
1041 {
1042 /* One-byte opcode. */
1043 need_modrm = onebyte_has_modrm[*insn];
1044 details->opcode_len = 1;
1045 }
1046
1047 if (need_modrm)
1048 {
1049 ++insn;
1050 details->modrm_offset = insn - start;
1051 }
1052 }
1053
1054 /* Update %rip-relative addressing in INSN.
1055
1056 %rip-relative addressing only uses a 32-bit displacement.
1057 32 bits is not enough to be guaranteed to cover the distance between where
1058 the real instruction is and where its copy is.
1059 Convert the insn to use base+disp addressing.
1060 We set base = pc + insn_length so we can leave disp unchanged. */
1061
1062 static void
1063 fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1064 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1065 {
1066 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1067 const struct amd64_insn *insn_details = &dsc->insn_details;
1068 int modrm_offset = insn_details->modrm_offset;
1069 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1070 CORE_ADDR rip_base;
1071 int32_t disp;
1072 int insn_length;
1073 int arch_tmp_regno, tmp_regno;
1074 ULONGEST orig_value;
1075
1076 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1077 ++insn;
1078
1079 /* Compute the rip-relative address. */
1080 disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
1081 insn_length = amd64_insn_length (gdbarch, dsc->insn_buf, dsc->max_len, from);
1082 rip_base = from + insn_length;
1083
1084 /* We need a register to hold the address.
1085 Pick one not used in the insn.
1086 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1087 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1088 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1089
1090 /* REX.B should be unset as we were using rip-relative addressing,
1091 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1092 if (insn_details->rex_offset != -1)
1093 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1094
1095 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1096 dsc->tmp_regno = tmp_regno;
1097 dsc->tmp_save = orig_value;
1098 dsc->tmp_used = 1;
1099
1100 /* Convert the ModRM field to be base+disp. */
1101 dsc->insn_buf[modrm_offset] &= ~0xc7;
1102 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1103
1104 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1105
1106 if (debug_displaced)
1107 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1108 "displaced: using temp reg %d, old value %s, new value %s\n",
1109 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1110 paddress (gdbarch, rip_base));
1111 }
1112
1113 static void
1114 fixup_displaced_copy (struct gdbarch *gdbarch,
1115 struct displaced_step_closure *dsc,
1116 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1117 {
1118 const struct amd64_insn *details = &dsc->insn_details;
1119
1120 if (details->modrm_offset != -1)
1121 {
1122 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1123
1124 if ((modrm & 0xc7) == 0x05)
1125 {
1126 /* The insn uses rip-relative addressing.
1127 Deal with it. */
1128 fixup_riprel (gdbarch, dsc, from, to, regs);
1129 }
1130 }
1131 }
1132
1133 struct displaced_step_closure *
1134 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1135 CORE_ADDR from, CORE_ADDR to,
1136 struct regcache *regs)
1137 {
1138 int len = gdbarch_max_insn_length (gdbarch);
1139 /* Extra space for sentinels so fixup_{riprel,displaced_copy don't have to
1140 continually watch for running off the end of the buffer. */
1141 int fixup_sentinel_space = len;
1142 struct displaced_step_closure *dsc =
1143 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1144 gdb_byte *buf = &dsc->insn_buf[0];
1145 struct amd64_insn *details = &dsc->insn_details;
1146
1147 dsc->tmp_used = 0;
1148 dsc->max_len = len + fixup_sentinel_space;
1149
1150 read_memory (from, buf, len);
1151
1152 /* Set up the sentinel space so we don't have to worry about running
1153 off the end of the buffer. An excessive number of leading prefixes
1154 could otherwise cause this. */
1155 memset (buf + len, 0, fixup_sentinel_space);
1156
1157 amd64_get_insn_details (buf, details);
1158
1159 /* GDB may get control back after the insn after the syscall.
1160 Presumably this is a kernel bug.
1161 If this is a syscall, make sure there's a nop afterwards. */
1162 {
1163 int syscall_length;
1164
1165 if (amd64_syscall_p (details, &syscall_length))
1166 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1167 }
1168
1169 /* Modify the insn to cope with the address where it will be executed from.
1170 In particular, handle any rip-relative addressing. */
1171 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1172
1173 write_memory (to, buf, len);
1174
1175 if (debug_displaced)
1176 {
1177 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1178 paddress (gdbarch, from), paddress (gdbarch, to));
1179 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1180 }
1181
1182 return dsc;
1183 }
1184
1185 static int
1186 amd64_absolute_jmp_p (const struct amd64_insn *details)
1187 {
1188 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1189
1190 if (insn[0] == 0xff)
1191 {
1192 /* jump near, absolute indirect (/4) */
1193 if ((insn[1] & 0x38) == 0x20)
1194 return 1;
1195
1196 /* jump far, absolute indirect (/5) */
1197 if ((insn[1] & 0x38) == 0x28)
1198 return 1;
1199 }
1200
1201 return 0;
1202 }
1203
1204 static int
1205 amd64_absolute_call_p (const struct amd64_insn *details)
1206 {
1207 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1208
1209 if (insn[0] == 0xff)
1210 {
1211 /* Call near, absolute indirect (/2) */
1212 if ((insn[1] & 0x38) == 0x10)
1213 return 1;
1214
1215 /* Call far, absolute indirect (/3) */
1216 if ((insn[1] & 0x38) == 0x18)
1217 return 1;
1218 }
1219
1220 return 0;
1221 }
1222
1223 static int
1224 amd64_ret_p (const struct amd64_insn *details)
1225 {
1226 /* NOTE: gcc can emit "repz ; ret". */
1227 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1228
1229 switch (insn[0])
1230 {
1231 case 0xc2: /* ret near, pop N bytes */
1232 case 0xc3: /* ret near */
1233 case 0xca: /* ret far, pop N bytes */
1234 case 0xcb: /* ret far */
1235 case 0xcf: /* iret */
1236 return 1;
1237
1238 default:
1239 return 0;
1240 }
1241 }
1242
1243 static int
1244 amd64_call_p (const struct amd64_insn *details)
1245 {
1246 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1247
1248 if (amd64_absolute_call_p (details))
1249 return 1;
1250
1251 /* call near, relative */
1252 if (insn[0] == 0xe8)
1253 return 1;
1254
1255 return 0;
1256 }
1257
1258 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1259 length in bytes. Otherwise, return zero. */
1260
1261 static int
1262 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1263 {
1264 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1265
1266 if (insn[0] == 0x0f && insn[1] == 0x05)
1267 {
1268 *lengthp = 2;
1269 return 1;
1270 }
1271
1272 return 0;
1273 }
1274
1275 /* Fix up the state of registers and memory after having single-stepped
1276 a displaced instruction. */
1277
1278 void
1279 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1280 struct displaced_step_closure *dsc,
1281 CORE_ADDR from, CORE_ADDR to,
1282 struct regcache *regs)
1283 {
1284 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1285 /* The offset we applied to the instruction's address. */
1286 ULONGEST insn_offset = to - from;
1287 gdb_byte *insn = dsc->insn_buf;
1288 const struct amd64_insn *insn_details = &dsc->insn_details;
1289
1290 if (debug_displaced)
1291 fprintf_unfiltered (gdb_stdlog,
1292 "displaced: fixup (%s, %s), "
1293 "insn = 0x%02x 0x%02x ...\n",
1294 paddress (gdbarch, from), paddress (gdbarch, to),
1295 insn[0], insn[1]);
1296
1297 /* If we used a tmp reg, restore it. */
1298
1299 if (dsc->tmp_used)
1300 {
1301 if (debug_displaced)
1302 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1303 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1304 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1305 }
1306
1307 /* The list of issues to contend with here is taken from
1308 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1309 Yay for Free Software! */
1310
1311 /* Relocate the %rip back to the program's instruction stream,
1312 if necessary. */
1313
1314 /* Except in the case of absolute or indirect jump or call
1315 instructions, or a return instruction, the new rip is relative to
1316 the displaced instruction; make it relative to the original insn.
1317 Well, signal handler returns don't need relocation either, but we use the
1318 value of %rip to recognize those; see below. */
1319 if (! amd64_absolute_jmp_p (insn_details)
1320 && ! amd64_absolute_call_p (insn_details)
1321 && ! amd64_ret_p (insn_details))
1322 {
1323 ULONGEST orig_rip;
1324 int insn_len;
1325
1326 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1327
1328 /* A signal trampoline system call changes the %rip, resuming
1329 execution of the main program after the signal handler has
1330 returned. That makes them like 'return' instructions; we
1331 shouldn't relocate %rip.
1332
1333 But most system calls don't, and we do need to relocate %rip.
1334
1335 Our heuristic for distinguishing these cases: if stepping
1336 over the system call instruction left control directly after
1337 the instruction, the we relocate --- control almost certainly
1338 doesn't belong in the displaced copy. Otherwise, we assume
1339 the instruction has put control where it belongs, and leave
1340 it unrelocated. Goodness help us if there are PC-relative
1341 system calls. */
1342 if (amd64_syscall_p (insn_details, &insn_len)
1343 && orig_rip != to + insn_len
1344 /* GDB can get control back after the insn after the syscall.
1345 Presumably this is a kernel bug.
1346 Fixup ensures its a nop, we add one to the length for it. */
1347 && orig_rip != to + insn_len + 1)
1348 {
1349 if (debug_displaced)
1350 fprintf_unfiltered (gdb_stdlog,
1351 "displaced: syscall changed %%rip; "
1352 "not relocating\n");
1353 }
1354 else
1355 {
1356 ULONGEST rip = orig_rip - insn_offset;
1357
1358 /* If we just stepped over a breakpoint insn, we don't backup
1359 the pc on purpose; this is to match behaviour without
1360 stepping. */
1361
1362 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1363
1364 if (debug_displaced)
1365 fprintf_unfiltered (gdb_stdlog,
1366 "displaced: "
1367 "relocated %%rip from %s to %s\n",
1368 paddress (gdbarch, orig_rip),
1369 paddress (gdbarch, rip));
1370 }
1371 }
1372
1373 /* If the instruction was PUSHFL, then the TF bit will be set in the
1374 pushed value, and should be cleared. We'll leave this for later,
1375 since GDB already messes up the TF flag when stepping over a
1376 pushfl. */
1377
1378 /* If the instruction was a call, the return address now atop the
1379 stack is the address following the copied instruction. We need
1380 to make it the address following the original instruction. */
1381 if (amd64_call_p (insn_details))
1382 {
1383 ULONGEST rsp;
1384 ULONGEST retaddr;
1385 const ULONGEST retaddr_len = 8;
1386
1387 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1388 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1389 retaddr = (retaddr - insn_offset) & 0xffffffffUL;
1390 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1391
1392 if (debug_displaced)
1393 fprintf_unfiltered (gdb_stdlog,
1394 "displaced: relocated return addr at %s "
1395 "to %s\n",
1396 paddress (gdbarch, rsp),
1397 paddress (gdbarch, retaddr));
1398 }
1399 }
1400 \f
1401 /* The maximum number of saved registers. This should include %rip. */
1402 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1403
1404 struct amd64_frame_cache
1405 {
1406 /* Base address. */
1407 CORE_ADDR base;
1408 CORE_ADDR sp_offset;
1409 CORE_ADDR pc;
1410
1411 /* Saved registers. */
1412 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1413 CORE_ADDR saved_sp;
1414 int saved_sp_reg;
1415
1416 /* Do we have a frame? */
1417 int frameless_p;
1418 };
1419
1420 /* Initialize a frame cache. */
1421
1422 static void
1423 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1424 {
1425 int i;
1426
1427 /* Base address. */
1428 cache->base = 0;
1429 cache->sp_offset = -8;
1430 cache->pc = 0;
1431
1432 /* Saved registers. We initialize these to -1 since zero is a valid
1433 offset (that's where %rbp is supposed to be stored).
1434 The values start out as being offsets, and are later converted to
1435 addresses (at which point -1 is interpreted as an address, still meaning
1436 "invalid"). */
1437 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1438 cache->saved_regs[i] = -1;
1439 cache->saved_sp = 0;
1440 cache->saved_sp_reg = -1;
1441
1442 /* Frameless until proven otherwise. */
1443 cache->frameless_p = 1;
1444 }
1445
1446 /* Allocate and initialize a frame cache. */
1447
1448 static struct amd64_frame_cache *
1449 amd64_alloc_frame_cache (void)
1450 {
1451 struct amd64_frame_cache *cache;
1452
1453 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1454 amd64_init_frame_cache (cache);
1455 return cache;
1456 }
1457
1458 /* GCC 4.4 and later, can put code in the prologue to realign the
1459 stack pointer. Check whether PC points to such code, and update
1460 CACHE accordingly. Return the first instruction after the code
1461 sequence or CURRENT_PC, whichever is smaller. If we don't
1462 recognize the code, return PC. */
1463
1464 static CORE_ADDR
1465 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1466 struct amd64_frame_cache *cache)
1467 {
1468 /* There are 2 code sequences to re-align stack before the frame
1469 gets set up:
1470
1471 1. Use a caller-saved saved register:
1472
1473 leaq 8(%rsp), %reg
1474 andq $-XXX, %rsp
1475 pushq -8(%reg)
1476
1477 2. Use a callee-saved saved register:
1478
1479 pushq %reg
1480 leaq 16(%rsp), %reg
1481 andq $-XXX, %rsp
1482 pushq -8(%reg)
1483
1484 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1485
1486 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1487 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1488 */
1489
1490 gdb_byte buf[18];
1491 int reg, r;
1492 int offset, offset_and;
1493
1494 if (target_read_memory (pc, buf, sizeof buf))
1495 return pc;
1496
1497 /* Check caller-saved saved register. The first instruction has
1498 to be "leaq 8(%rsp), %reg". */
1499 if ((buf[0] & 0xfb) == 0x48
1500 && buf[1] == 0x8d
1501 && buf[3] == 0x24
1502 && buf[4] == 0x8)
1503 {
1504 /* MOD must be binary 10 and R/M must be binary 100. */
1505 if ((buf[2] & 0xc7) != 0x44)
1506 return pc;
1507
1508 /* REG has register number. */
1509 reg = (buf[2] >> 3) & 7;
1510
1511 /* Check the REX.R bit. */
1512 if (buf[0] == 0x4c)
1513 reg += 8;
1514
1515 offset = 5;
1516 }
1517 else
1518 {
1519 /* Check callee-saved saved register. The first instruction
1520 has to be "pushq %reg". */
1521 reg = 0;
1522 if ((buf[0] & 0xf8) == 0x50)
1523 offset = 0;
1524 else if ((buf[0] & 0xf6) == 0x40
1525 && (buf[1] & 0xf8) == 0x50)
1526 {
1527 /* Check the REX.B bit. */
1528 if ((buf[0] & 1) != 0)
1529 reg = 8;
1530
1531 offset = 1;
1532 }
1533 else
1534 return pc;
1535
1536 /* Get register. */
1537 reg += buf[offset] & 0x7;
1538
1539 offset++;
1540
1541 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1542 if ((buf[offset] & 0xfb) != 0x48
1543 || buf[offset + 1] != 0x8d
1544 || buf[offset + 3] != 0x24
1545 || buf[offset + 4] != 0x10)
1546 return pc;
1547
1548 /* MOD must be binary 10 and R/M must be binary 100. */
1549 if ((buf[offset + 2] & 0xc7) != 0x44)
1550 return pc;
1551
1552 /* REG has register number. */
1553 r = (buf[offset + 2] >> 3) & 7;
1554
1555 /* Check the REX.R bit. */
1556 if (buf[offset] == 0x4c)
1557 r += 8;
1558
1559 /* Registers in pushq and leaq have to be the same. */
1560 if (reg != r)
1561 return pc;
1562
1563 offset += 5;
1564 }
1565
1566 /* Rigister can't be %rsp nor %rbp. */
1567 if (reg == 4 || reg == 5)
1568 return pc;
1569
1570 /* The next instruction has to be "andq $-XXX, %rsp". */
1571 if (buf[offset] != 0x48
1572 || buf[offset + 2] != 0xe4
1573 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1574 return pc;
1575
1576 offset_and = offset;
1577 offset += buf[offset + 1] == 0x81 ? 7 : 4;
1578
1579 /* The next instruction has to be "pushq -8(%reg)". */
1580 r = 0;
1581 if (buf[offset] == 0xff)
1582 offset++;
1583 else if ((buf[offset] & 0xf6) == 0x40
1584 && buf[offset + 1] == 0xff)
1585 {
1586 /* Check the REX.B bit. */
1587 if ((buf[offset] & 0x1) != 0)
1588 r = 8;
1589 offset += 2;
1590 }
1591 else
1592 return pc;
1593
1594 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1595 01. */
1596 if (buf[offset + 1] != 0xf8
1597 || (buf[offset] & 0xf8) != 0x70)
1598 return pc;
1599
1600 /* R/M has register. */
1601 r += buf[offset] & 7;
1602
1603 /* Registers in leaq and pushq have to be the same. */
1604 if (reg != r)
1605 return pc;
1606
1607 if (current_pc > pc + offset_and)
1608 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
1609
1610 return min (pc + offset + 2, current_pc);
1611 }
1612
1613 /* Do a limited analysis of the prologue at PC and update CACHE
1614 accordingly. Bail out early if CURRENT_PC is reached. Return the
1615 address where the analysis stopped.
1616
1617 We will handle only functions beginning with:
1618
1619 pushq %rbp 0x55
1620 movq %rsp, %rbp 0x48 0x89 0xe5
1621
1622 Any function that doesn't start with this sequence will be assumed
1623 to have no prologue and thus no valid frame pointer in %rbp. */
1624
1625 static CORE_ADDR
1626 amd64_analyze_prologue (struct gdbarch *gdbarch,
1627 CORE_ADDR pc, CORE_ADDR current_pc,
1628 struct amd64_frame_cache *cache)
1629 {
1630 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1631 static gdb_byte proto[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */
1632 gdb_byte buf[3];
1633 gdb_byte op;
1634
1635 if (current_pc <= pc)
1636 return current_pc;
1637
1638 pc = amd64_analyze_stack_align (pc, current_pc, cache);
1639
1640 op = read_memory_unsigned_integer (pc, 1, byte_order);
1641
1642 if (op == 0x55) /* pushq %rbp */
1643 {
1644 /* Take into account that we've executed the `pushq %rbp' that
1645 starts this instruction sequence. */
1646 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
1647 cache->sp_offset += 8;
1648
1649 /* If that's all, return now. */
1650 if (current_pc <= pc + 1)
1651 return current_pc;
1652
1653 /* Check for `movq %rsp, %rbp'. */
1654 read_memory (pc + 1, buf, 3);
1655 if (memcmp (buf, proto, 3) != 0)
1656 return pc + 1;
1657
1658 /* OK, we actually have a frame. */
1659 cache->frameless_p = 0;
1660 return pc + 4;
1661 }
1662
1663 return pc;
1664 }
1665
1666 /* Return PC of first real instruction. */
1667
1668 static CORE_ADDR
1669 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
1670 {
1671 struct amd64_frame_cache cache;
1672 CORE_ADDR pc;
1673
1674 amd64_init_frame_cache (&cache);
1675 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
1676 &cache);
1677 if (cache.frameless_p)
1678 return start_pc;
1679
1680 return pc;
1681 }
1682 \f
1683
1684 /* Normal frames. */
1685
1686 static struct amd64_frame_cache *
1687 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
1688 {
1689 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1690 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1691 struct amd64_frame_cache *cache;
1692 gdb_byte buf[8];
1693 int i;
1694
1695 if (*this_cache)
1696 return *this_cache;
1697
1698 cache = amd64_alloc_frame_cache ();
1699 *this_cache = cache;
1700
1701 cache->pc = get_frame_func (this_frame);
1702 if (cache->pc != 0)
1703 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
1704 cache);
1705
1706 if (cache->saved_sp_reg != -1)
1707 {
1708 /* Stack pointer has been saved. */
1709 get_frame_register (this_frame, cache->saved_sp_reg, buf);
1710 cache->saved_sp = extract_unsigned_integer(buf, 8, byte_order);
1711 }
1712
1713 if (cache->frameless_p)
1714 {
1715 /* We didn't find a valid frame. If we're at the start of a
1716 function, or somewhere half-way its prologue, the function's
1717 frame probably hasn't been fully setup yet. Try to
1718 reconstruct the base address for the stack frame by looking
1719 at the stack pointer. For truly "frameless" functions this
1720 might work too. */
1721
1722 if (cache->saved_sp_reg != -1)
1723 {
1724 /* We're halfway aligning the stack. */
1725 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
1726 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
1727
1728 /* This will be added back below. */
1729 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
1730 }
1731 else
1732 {
1733 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1734 cache->base = extract_unsigned_integer (buf, 8, byte_order)
1735 + cache->sp_offset;
1736 }
1737 }
1738 else
1739 {
1740 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
1741 cache->base = extract_unsigned_integer (buf, 8, byte_order);
1742 }
1743
1744 /* Now that we have the base address for the stack frame we can
1745 calculate the value of %rsp in the calling frame. */
1746 cache->saved_sp = cache->base + 16;
1747
1748 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
1749 frame we find it at the same offset from the reconstructed base
1750 address. If we're halfway aligning the stack, %rip is handled
1751 differently (see above). */
1752 if (!cache->frameless_p || cache->saved_sp_reg == -1)
1753 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
1754
1755 /* Adjust all the saved registers such that they contain addresses
1756 instead of offsets. */
1757 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1758 if (cache->saved_regs[i] != -1)
1759 cache->saved_regs[i] += cache->base;
1760
1761 return cache;
1762 }
1763
1764 static void
1765 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1766 struct frame_id *this_id)
1767 {
1768 struct amd64_frame_cache *cache =
1769 amd64_frame_cache (this_frame, this_cache);
1770
1771 /* This marks the outermost frame. */
1772 if (cache->base == 0)
1773 return;
1774
1775 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
1776 }
1777
1778 static struct value *
1779 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1780 int regnum)
1781 {
1782 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1783 struct amd64_frame_cache *cache =
1784 amd64_frame_cache (this_frame, this_cache);
1785
1786 gdb_assert (regnum >= 0);
1787
1788 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
1789 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
1790
1791 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
1792 return frame_unwind_got_memory (this_frame, regnum,
1793 cache->saved_regs[regnum]);
1794
1795 return frame_unwind_got_register (this_frame, regnum, regnum);
1796 }
1797
1798 static const struct frame_unwind amd64_frame_unwind =
1799 {
1800 NORMAL_FRAME,
1801 amd64_frame_this_id,
1802 amd64_frame_prev_register,
1803 NULL,
1804 default_frame_sniffer
1805 };
1806 \f
1807
1808 /* Signal trampolines. */
1809
1810 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
1811 64-bit variants. This would require using identical frame caches
1812 on both platforms. */
1813
1814 static struct amd64_frame_cache *
1815 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
1816 {
1817 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1818 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1819 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1820 struct amd64_frame_cache *cache;
1821 CORE_ADDR addr;
1822 gdb_byte buf[8];
1823 int i;
1824
1825 if (*this_cache)
1826 return *this_cache;
1827
1828 cache = amd64_alloc_frame_cache ();
1829
1830 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1831 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
1832
1833 addr = tdep->sigcontext_addr (this_frame);
1834 gdb_assert (tdep->sc_reg_offset);
1835 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
1836 for (i = 0; i < tdep->sc_num_regs; i++)
1837 if (tdep->sc_reg_offset[i] != -1)
1838 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
1839
1840 *this_cache = cache;
1841 return cache;
1842 }
1843
1844 static void
1845 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
1846 void **this_cache, struct frame_id *this_id)
1847 {
1848 struct amd64_frame_cache *cache =
1849 amd64_sigtramp_frame_cache (this_frame, this_cache);
1850
1851 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
1852 }
1853
1854 static struct value *
1855 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
1856 void **this_cache, int regnum)
1857 {
1858 /* Make sure we've initialized the cache. */
1859 amd64_sigtramp_frame_cache (this_frame, this_cache);
1860
1861 return amd64_frame_prev_register (this_frame, this_cache, regnum);
1862 }
1863
1864 static int
1865 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
1866 struct frame_info *this_frame,
1867 void **this_cache)
1868 {
1869 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1870
1871 /* We shouldn't even bother if we don't have a sigcontext_addr
1872 handler. */
1873 if (tdep->sigcontext_addr == NULL)
1874 return 0;
1875
1876 if (tdep->sigtramp_p != NULL)
1877 {
1878 if (tdep->sigtramp_p (this_frame))
1879 return 1;
1880 }
1881
1882 if (tdep->sigtramp_start != 0)
1883 {
1884 CORE_ADDR pc = get_frame_pc (this_frame);
1885
1886 gdb_assert (tdep->sigtramp_end != 0);
1887 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
1888 return 1;
1889 }
1890
1891 return 0;
1892 }
1893
1894 static const struct frame_unwind amd64_sigtramp_frame_unwind =
1895 {
1896 SIGTRAMP_FRAME,
1897 amd64_sigtramp_frame_this_id,
1898 amd64_sigtramp_frame_prev_register,
1899 NULL,
1900 amd64_sigtramp_frame_sniffer
1901 };
1902 \f
1903
1904 static CORE_ADDR
1905 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
1906 {
1907 struct amd64_frame_cache *cache =
1908 amd64_frame_cache (this_frame, this_cache);
1909
1910 return cache->base;
1911 }
1912
1913 static const struct frame_base amd64_frame_base =
1914 {
1915 &amd64_frame_unwind,
1916 amd64_frame_base_address,
1917 amd64_frame_base_address,
1918 amd64_frame_base_address
1919 };
1920
1921 /* Normal frames, but in a function epilogue. */
1922
1923 /* The epilogue is defined here as the 'ret' instruction, which will
1924 follow any instruction such as 'leave' or 'pop %ebp' that destroys
1925 the function's stack frame. */
1926
1927 static int
1928 amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
1929 {
1930 gdb_byte insn;
1931
1932 if (target_read_memory (pc, &insn, 1))
1933 return 0; /* Can't read memory at pc. */
1934
1935 if (insn != 0xc3) /* 'ret' instruction. */
1936 return 0;
1937
1938 return 1;
1939 }
1940
1941 static int
1942 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
1943 struct frame_info *this_frame,
1944 void **this_prologue_cache)
1945 {
1946 if (frame_relative_level (this_frame) == 0)
1947 return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
1948 get_frame_pc (this_frame));
1949 else
1950 return 0;
1951 }
1952
1953 static struct amd64_frame_cache *
1954 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
1955 {
1956 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1957 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1958 struct amd64_frame_cache *cache;
1959 gdb_byte buf[8];
1960
1961 if (*this_cache)
1962 return *this_cache;
1963
1964 cache = amd64_alloc_frame_cache ();
1965 *this_cache = cache;
1966
1967 /* Cache base will be %esp plus cache->sp_offset (-8). */
1968 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1969 cache->base = extract_unsigned_integer (buf, 8,
1970 byte_order) + cache->sp_offset;
1971
1972 /* Cache pc will be the frame func. */
1973 cache->pc = get_frame_pc (this_frame);
1974
1975 /* The saved %esp will be at cache->base plus 16. */
1976 cache->saved_sp = cache->base + 16;
1977
1978 /* The saved %eip will be at cache->base plus 8. */
1979 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
1980
1981 return cache;
1982 }
1983
1984 static void
1985 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
1986 void **this_cache,
1987 struct frame_id *this_id)
1988 {
1989 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
1990 this_cache);
1991
1992 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
1993 }
1994
1995 static const struct frame_unwind amd64_epilogue_frame_unwind =
1996 {
1997 NORMAL_FRAME,
1998 amd64_epilogue_frame_this_id,
1999 amd64_frame_prev_register,
2000 NULL,
2001 amd64_epilogue_frame_sniffer
2002 };
2003
2004 static struct frame_id
2005 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2006 {
2007 CORE_ADDR fp;
2008
2009 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
2010
2011 return frame_id_build (fp + 16, get_frame_pc (this_frame));
2012 }
2013
2014 /* 16 byte align the SP per frame requirements. */
2015
2016 static CORE_ADDR
2017 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2018 {
2019 return sp & -(CORE_ADDR)16;
2020 }
2021 \f
2022
2023 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
2024 in the floating-point register set REGSET to register cache
2025 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
2026
2027 static void
2028 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2029 int regnum, const void *fpregs, size_t len)
2030 {
2031 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2032
2033 gdb_assert (len == tdep->sizeof_fpregset);
2034 amd64_supply_fxsave (regcache, regnum, fpregs);
2035 }
2036
2037 /* Collect register REGNUM from the register cache REGCACHE and store
2038 it in the buffer specified by FPREGS and LEN as described by the
2039 floating-point register set REGSET. If REGNUM is -1, do this for
2040 all registers in REGSET. */
2041
2042 static void
2043 amd64_collect_fpregset (const struct regset *regset,
2044 const struct regcache *regcache,
2045 int regnum, void *fpregs, size_t len)
2046 {
2047 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2048
2049 gdb_assert (len == tdep->sizeof_fpregset);
2050 amd64_collect_fxsave (regcache, regnum, fpregs);
2051 }
2052
2053 /* Return the appropriate register set for the core section identified
2054 by SECT_NAME and SECT_SIZE. */
2055
2056 static const struct regset *
2057 amd64_regset_from_core_section (struct gdbarch *gdbarch,
2058 const char *sect_name, size_t sect_size)
2059 {
2060 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2061
2062 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
2063 {
2064 if (tdep->fpregset == NULL)
2065 tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
2066 amd64_collect_fpregset);
2067
2068 return tdep->fpregset;
2069 }
2070
2071 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
2072 }
2073 \f
2074
2075 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
2076 %rdi. We expect its value to be a pointer to the jmp_buf structure
2077 from which we extract the address that we will land at. This
2078 address is copied into PC. This routine returns non-zero on
2079 success. */
2080
2081 static int
2082 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2083 {
2084 gdb_byte buf[8];
2085 CORE_ADDR jb_addr;
2086 struct gdbarch *gdbarch = get_frame_arch (frame);
2087 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
2088 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
2089
2090 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2091 longjmp will land. */
2092 if (jb_pc_offset == -1)
2093 return 0;
2094
2095 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
2096 jb_addr= extract_typed_address
2097 (buf, builtin_type (gdbarch)->builtin_data_ptr);
2098 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2099 return 0;
2100
2101 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
2102
2103 return 1;
2104 }
2105
2106 static const int amd64_record_regmap[] =
2107 {
2108 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2109 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2110 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2111 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2112 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2113 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2114 };
2115
2116 void
2117 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
2118 {
2119 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2120
2121 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2122 floating-point registers. */
2123 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
2124
2125 /* AMD64 has an FPU and 16 SSE registers. */
2126 tdep->st0_regnum = AMD64_ST0_REGNUM;
2127 tdep->num_xmm_regs = 16;
2128
2129 /* This is what all the fuss is about. */
2130 set_gdbarch_long_bit (gdbarch, 64);
2131 set_gdbarch_long_long_bit (gdbarch, 64);
2132 set_gdbarch_ptr_bit (gdbarch, 64);
2133
2134 /* In contrast to the i386, on AMD64 a `long double' actually takes
2135 up 128 bits, even though it's still based on the i387 extended
2136 floating-point format which has only 80 significant bits. */
2137 set_gdbarch_long_double_bit (gdbarch, 128);
2138
2139 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
2140 set_gdbarch_register_name (gdbarch, amd64_register_name);
2141 set_gdbarch_register_type (gdbarch, amd64_register_type);
2142
2143 /* Register numbers of various important registers. */
2144 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
2145 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
2146 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
2147 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
2148
2149 /* The "default" register numbering scheme for AMD64 is referred to
2150 as the "DWARF Register Number Mapping" in the System V psABI.
2151 The preferred debugging format for all known AMD64 targets is
2152 actually DWARF2, and GCC doesn't seem to support DWARF (that is
2153 DWARF-1), but we provide the same mapping just in case. This
2154 mapping is also used for stabs, which GCC does support. */
2155 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2156 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2157
2158 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
2159 be in use on any of the supported AMD64 targets. */
2160
2161 /* Call dummy code. */
2162 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
2163 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
2164 set_gdbarch_frame_red_zone_size (gdbarch, 128);
2165 tdep->call_dummy_num_integer_regs =
2166 ARRAY_SIZE (amd64_dummy_call_integer_regs);
2167 tdep->call_dummy_integer_regs = amd64_dummy_call_integer_regs;
2168 tdep->classify = amd64_classify;
2169
2170 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
2171 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
2172 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
2173
2174 set_gdbarch_return_value (gdbarch, amd64_return_value);
2175
2176 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
2177
2178 /* Avoid wiring in the MMX registers for now. */
2179 set_gdbarch_num_pseudo_regs (gdbarch, 0);
2180 tdep->mm0_regnum = -1;
2181
2182 tdep->record_regmap = amd64_record_regmap;
2183
2184 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
2185
2186 /* Hook the function epilogue frame unwinder. This unwinder is
2187 appended to the list first, so that it supercedes the other
2188 unwinders in function epilogues. */
2189 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
2190
2191 /* Hook the prologue-based frame unwinders. */
2192 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
2193 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
2194 frame_base_set_default (gdbarch, &amd64_frame_base);
2195
2196 /* If we have a register mapping, enable the generic core file support. */
2197 if (tdep->gregset_reg_offset)
2198 set_gdbarch_regset_from_core_section (gdbarch,
2199 amd64_regset_from_core_section);
2200
2201 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
2202 }
2203 \f
2204
2205 /* The 64-bit FXSAVE format differs from the 32-bit format in the
2206 sense that the instruction pointer and data pointer are simply
2207 64-bit offsets into the code segment and the data segment instead
2208 of a selector offset pair. The functions below store the upper 32
2209 bits of these pointers (instead of just the 16-bits of the segment
2210 selector). */
2211
2212 /* Fill register REGNUM in REGCACHE with the appropriate
2213 floating-point or SSE register value from *FXSAVE. If REGNUM is
2214 -1, do this for all registers. This function masks off any of the
2215 reserved bits in *FXSAVE. */
2216
2217 void
2218 amd64_supply_fxsave (struct regcache *regcache, int regnum,
2219 const void *fxsave)
2220 {
2221 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2222 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2223
2224 i387_supply_fxsave (regcache, regnum, fxsave);
2225
2226 if (fxsave && gdbarch_ptr_bit (gdbarch) == 64)
2227 {
2228 const gdb_byte *regs = fxsave;
2229
2230 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2231 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2232 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2233 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
2234 }
2235 }
2236
2237 /* Fill register REGNUM (if it is a floating-point or SSE register) in
2238 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
2239 all registers. This function doesn't touch any of the reserved
2240 bits in *FXSAVE. */
2241
2242 void
2243 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
2244 void *fxsave)
2245 {
2246 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2247 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2248 gdb_byte *regs = fxsave;
2249
2250 i387_collect_fxsave (regcache, regnum, fxsave);
2251
2252 if (gdbarch_ptr_bit (gdbarch) == 64)
2253 {
2254 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2255 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2256 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2257 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
2258 }
2259 }