]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/amd64-tdep.c
*** empty log message ***
[thirdparty/binutils-gdb.git] / gdb / amd64-tdep.c
CommitLineData
e53bef9f 1/* Target-dependent code for AMD64.
ce0eebec 2
0fb0cc75 3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
5ae96ec1
MK
4 Free Software Foundation, Inc.
5
6 Contributed by Jiri Smid, SuSE Labs.
53e95fcf
JS
7
8 This file is part of GDB.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
a9762ec7 12 the Free Software Foundation; either version 3 of the License, or
53e95fcf
JS
13 (at your option) any later version.
14
15 This program is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License
a9762ec7 21 along with this program. If not, see <http://www.gnu.org/licenses/>. */
53e95fcf
JS
22
23#include "defs.h"
35669430
DE
24#include "opcode/i386.h"
25#include "dis-asm.h"
c4f35dd8
MK
26#include "arch-utils.h"
27#include "block.h"
28#include "dummy-frame.h"
29#include "frame.h"
30#include "frame-base.h"
31#include "frame-unwind.h"
53e95fcf 32#include "inferior.h"
53e95fcf 33#include "gdbcmd.h"
c4f35dd8
MK
34#include "gdbcore.h"
35#include "objfiles.h"
53e95fcf 36#include "regcache.h"
2c261fae 37#include "regset.h"
53e95fcf 38#include "symfile.h"
c4f35dd8 39
82dbc5f7 40#include "gdb_assert.h"
c4f35dd8 41
9c1488cb 42#include "amd64-tdep.h"
c4f35dd8 43#include "i387-tdep.h"
53e95fcf 44
e53bef9f
MK
45/* Note that the AMD64 architecture was previously known as x86-64.
46 The latter is (forever) engraved into the canonical system name as
90f90721 47 returned by config.guess, and used as the name for the AMD64 port
e53bef9f
MK
48 of GNU/Linux. The BSD's have renamed their ports to amd64; they
49 don't like to shout. For GDB we prefer the amd64_-prefix over the
50 x86_64_-prefix since it's so much easier to type. */
51
402ecd56 52/* Register information. */
c4f35dd8 53
6707b003 54static const char *amd64_register_names[] =
de220d0f 55{
6707b003 56 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
c4f35dd8
MK
57
58 /* %r8 is indeed register number 8. */
6707b003
UW
59 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
60 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
c4f35dd8 61
af233647 62 /* %st0 is register number 24. */
6707b003
UW
63 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
64 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
c4f35dd8 65
af233647 66 /* %xmm0 is register number 40. */
6707b003
UW
67 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
68 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
69 "mxcsr",
0e04a514
ML
70};
71
c4f35dd8 72/* Total number of registers. */
6707b003 73#define AMD64_NUM_REGS ARRAY_SIZE (amd64_register_names)
de220d0f 74
c4f35dd8 75/* Return the name of register REGNUM. */
b6779aa2 76
8695c747 77const char *
d93859e2 78amd64_register_name (struct gdbarch *gdbarch, int regnum)
53e95fcf 79{
e53bef9f 80 if (regnum >= 0 && regnum < AMD64_NUM_REGS)
6707b003 81 return amd64_register_names[regnum];
53e95fcf 82
c4f35dd8 83 return NULL;
53e95fcf
JS
84}
85
86/* Return the GDB type object for the "standard" data type of data in
c4f35dd8 87 register REGNUM. */
53e95fcf 88
8695c747 89struct type *
e53bef9f 90amd64_register_type (struct gdbarch *gdbarch, int regnum)
53e95fcf 91{
6707b003
UW
92 if (regnum >= AMD64_RAX_REGNUM && regnum <= AMD64_RDI_REGNUM)
93 return builtin_type_int64;
94 if (regnum == AMD64_RBP_REGNUM || regnum == AMD64_RSP_REGNUM)
0dfff4cb 95 return builtin_type (gdbarch)->builtin_data_ptr;
6707b003
UW
96 if (regnum >= AMD64_R8_REGNUM && regnum <= AMD64_R15_REGNUM)
97 return builtin_type_int64;
98 if (regnum == AMD64_RIP_REGNUM)
0dfff4cb 99 return builtin_type (gdbarch)->builtin_func_ptr;
6707b003
UW
100 if (regnum == AMD64_EFLAGS_REGNUM)
101 return i386_eflags_type;
102 if (regnum >= AMD64_CS_REGNUM && regnum <= AMD64_GS_REGNUM)
103 return builtin_type_int32;
104 if (regnum >= AMD64_ST0_REGNUM && regnum <= AMD64_ST0_REGNUM + 7)
105 return builtin_type_i387_ext;
106 if (regnum >= AMD64_FCTRL_REGNUM && regnum <= AMD64_FCTRL_REGNUM + 7)
107 return builtin_type_int32;
108 if (regnum >= AMD64_XMM0_REGNUM && regnum <= AMD64_XMM0_REGNUM + 15)
794ac428 109 return i386_sse_type (gdbarch);
6707b003
UW
110 if (regnum == AMD64_MXCSR_REGNUM)
111 return i386_mxcsr_type;
112
113 internal_error (__FILE__, __LINE__, _("invalid regnum"));
53e95fcf
JS
114}
115
c4f35dd8
MK
116/* DWARF Register Number Mapping as defined in the System V psABI,
117 section 3.6. */
53e95fcf 118
e53bef9f 119static int amd64_dwarf_regmap[] =
0e04a514 120{
c4f35dd8 121 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
90f90721
MK
122 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
123 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
124 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
c4f35dd8
MK
125
126 /* Frame Pointer Register RBP. */
90f90721 127 AMD64_RBP_REGNUM,
c4f35dd8
MK
128
129 /* Stack Pointer Register RSP. */
90f90721 130 AMD64_RSP_REGNUM,
c4f35dd8
MK
131
132 /* Extended Integer Registers 8 - 15. */
133 8, 9, 10, 11, 12, 13, 14, 15,
134
59207364 135 /* Return Address RA. Mapped to RIP. */
90f90721 136 AMD64_RIP_REGNUM,
c4f35dd8
MK
137
138 /* SSE Registers 0 - 7. */
90f90721
MK
139 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
140 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
141 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
142 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
c4f35dd8
MK
143
144 /* Extended SSE Registers 8 - 15. */
90f90721
MK
145 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
146 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
147 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
148 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
c4f35dd8
MK
149
150 /* Floating Point Registers 0-7. */
90f90721
MK
151 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
152 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
153 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
c6f4c129
JB
154 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
155
156 /* Control and Status Flags Register. */
157 AMD64_EFLAGS_REGNUM,
158
159 /* Selector Registers. */
160 AMD64_ES_REGNUM,
161 AMD64_CS_REGNUM,
162 AMD64_SS_REGNUM,
163 AMD64_DS_REGNUM,
164 AMD64_FS_REGNUM,
165 AMD64_GS_REGNUM,
166 -1,
167 -1,
168
169 /* Segment Base Address Registers. */
170 -1,
171 -1,
172 -1,
173 -1,
174
175 /* Special Selector Registers. */
176 -1,
177 -1,
178
179 /* Floating Point Control Registers. */
180 AMD64_MXCSR_REGNUM,
181 AMD64_FCTRL_REGNUM,
182 AMD64_FSTAT_REGNUM
c4f35dd8 183};
0e04a514 184
e53bef9f
MK
185static const int amd64_dwarf_regmap_len =
186 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
0e04a514 187
c4f35dd8
MK
188/* Convert DWARF register number REG to the appropriate register
189 number used by GDB. */
26abbdc4 190
c4f35dd8 191static int
d3f73121 192amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
53e95fcf 193{
c4f35dd8 194 int regnum = -1;
53e95fcf 195
16aff9a6 196 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
e53bef9f 197 regnum = amd64_dwarf_regmap[reg];
53e95fcf 198
c4f35dd8 199 if (regnum == -1)
8a3fe4f8 200 warning (_("Unmapped DWARF Register #%d encountered."), reg);
c4f35dd8
MK
201
202 return regnum;
53e95fcf 203}
d532c08f 204
35669430
DE
205/* Map architectural register numbers to gdb register numbers. */
206
207static const int amd64_arch_regmap[16] =
208{
209 AMD64_RAX_REGNUM, /* %rax */
210 AMD64_RCX_REGNUM, /* %rcx */
211 AMD64_RDX_REGNUM, /* %rdx */
212 AMD64_RBX_REGNUM, /* %rbx */
213 AMD64_RSP_REGNUM, /* %rsp */
214 AMD64_RBP_REGNUM, /* %rbp */
215 AMD64_RSI_REGNUM, /* %rsi */
216 AMD64_RDI_REGNUM, /* %rdi */
217 AMD64_R8_REGNUM, /* %r8 */
218 AMD64_R9_REGNUM, /* %r9 */
219 AMD64_R10_REGNUM, /* %r10 */
220 AMD64_R11_REGNUM, /* %r11 */
221 AMD64_R12_REGNUM, /* %r12 */
222 AMD64_R13_REGNUM, /* %r13 */
223 AMD64_R14_REGNUM, /* %r14 */
224 AMD64_R15_REGNUM /* %r15 */
225};
226
227static const int amd64_arch_regmap_len =
228 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
229
230/* Convert architectural register number REG to the appropriate register
231 number used by GDB. */
232
233static int
234amd64_arch_reg_to_regnum (int reg)
235{
236 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
237
238 return amd64_arch_regmap[reg];
239}
240
53e95fcf
JS
241\f
242
efb1c01c
MK
243/* Register classes as defined in the psABI. */
244
245enum amd64_reg_class
246{
247 AMD64_INTEGER,
248 AMD64_SSE,
249 AMD64_SSEUP,
250 AMD64_X87,
251 AMD64_X87UP,
252 AMD64_COMPLEX_X87,
253 AMD64_NO_CLASS,
254 AMD64_MEMORY
255};
256
257/* Return the union class of CLASS1 and CLASS2. See the psABI for
258 details. */
259
260static enum amd64_reg_class
261amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
262{
263 /* Rule (a): If both classes are equal, this is the resulting class. */
264 if (class1 == class2)
265 return class1;
266
267 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
268 is the other class. */
269 if (class1 == AMD64_NO_CLASS)
270 return class2;
271 if (class2 == AMD64_NO_CLASS)
272 return class1;
273
274 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
275 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
276 return AMD64_MEMORY;
277
278 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
279 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
280 return AMD64_INTEGER;
281
282 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
283 MEMORY is used as class. */
284 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
285 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
286 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
287 return AMD64_MEMORY;
288
289 /* Rule (f): Otherwise class SSE is used. */
290 return AMD64_SSE;
291}
292
293static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
294
79b1ab3d
MK
295/* Return non-zero if TYPE is a non-POD structure or union type. */
296
297static int
298amd64_non_pod_p (struct type *type)
299{
300 /* ??? A class with a base class certainly isn't POD, but does this
301 catch all non-POD structure types? */
302 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
303 return 1;
304
305 return 0;
306}
307
efb1c01c
MK
308/* Classify TYPE according to the rules for aggregate (structures and
309 arrays) and union types, and store the result in CLASS. */
c4f35dd8
MK
310
311static void
efb1c01c 312amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
53e95fcf
JS
313{
314 int len = TYPE_LENGTH (type);
315
efb1c01c
MK
316 /* 1. If the size of an object is larger than two eightbytes, or in
317 C++, is a non-POD structure or union type, or contains
318 unaligned fields, it has class memory. */
79b1ab3d 319 if (len > 16 || amd64_non_pod_p (type))
53e95fcf 320 {
efb1c01c
MK
321 class[0] = class[1] = AMD64_MEMORY;
322 return;
53e95fcf 323 }
efb1c01c
MK
324
325 /* 2. Both eightbytes get initialized to class NO_CLASS. */
326 class[0] = class[1] = AMD64_NO_CLASS;
327
328 /* 3. Each field of an object is classified recursively so that
329 always two fields are considered. The resulting class is
330 calculated according to the classes of the fields in the
331 eightbyte: */
332
333 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
8ffd9b1b 334 {
efb1c01c
MK
335 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
336
337 /* All fields in an array have the same type. */
338 amd64_classify (subtype, class);
339 if (len > 8 && class[1] == AMD64_NO_CLASS)
340 class[1] = class[0];
8ffd9b1b 341 }
53e95fcf
JS
342 else
343 {
efb1c01c 344 int i;
53e95fcf 345
efb1c01c
MK
346 /* Structure or union. */
347 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
348 || TYPE_CODE (type) == TYPE_CODE_UNION);
349
350 for (i = 0; i < TYPE_NFIELDS (type); i++)
53e95fcf 351 {
efb1c01c
MK
352 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
353 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
354 enum amd64_reg_class subclass[2];
355
562c50c2 356 /* Ignore static fields. */
d6a843b5 357 if (field_is_static (&TYPE_FIELD (type, i)))
562c50c2
MK
358 continue;
359
efb1c01c
MK
360 gdb_assert (pos == 0 || pos == 1);
361
362 amd64_classify (subtype, subclass);
363 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
364 if (pos == 0)
365 class[1] = amd64_merge_classes (class[1], subclass[1]);
53e95fcf 366 }
53e95fcf 367 }
efb1c01c
MK
368
369 /* 4. Then a post merger cleanup is done: */
370
371 /* Rule (a): If one of the classes is MEMORY, the whole argument is
372 passed in memory. */
373 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
374 class[0] = class[1] = AMD64_MEMORY;
375
376 /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
377 SSE. */
378 if (class[0] == AMD64_SSEUP)
379 class[0] = AMD64_SSE;
380 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
381 class[1] = AMD64_SSE;
382}
383
384/* Classify TYPE, and store the result in CLASS. */
385
386static void
387amd64_classify (struct type *type, enum amd64_reg_class class[2])
388{
389 enum type_code code = TYPE_CODE (type);
390 int len = TYPE_LENGTH (type);
391
392 class[0] = class[1] = AMD64_NO_CLASS;
393
394 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
5a7225ed
JB
395 long, long long, and pointers are in the INTEGER class. Similarly,
396 range types, used by languages such as Ada, are also in the INTEGER
397 class. */
efb1c01c 398 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
b929c77f 399 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
9db13498 400 || code == TYPE_CODE_CHAR
efb1c01c
MK
401 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
402 && (len == 1 || len == 2 || len == 4 || len == 8))
403 class[0] = AMD64_INTEGER;
404
5daa78cc
TJB
405 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
406 are in class SSE. */
407 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
408 && (len == 4 || len == 8))
efb1c01c
MK
409 /* FIXME: __m64 . */
410 class[0] = AMD64_SSE;
411
5daa78cc
TJB
412 /* Arguments of types __float128, _Decimal128 and __m128 are split into
413 two halves. The least significant ones belong to class SSE, the most
efb1c01c 414 significant one to class SSEUP. */
5daa78cc
TJB
415 else if (code == TYPE_CODE_DECFLOAT && len == 16)
416 /* FIXME: __float128, __m128. */
417 class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
efb1c01c
MK
418
419 /* The 64-bit mantissa of arguments of type long double belongs to
420 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
421 class X87UP. */
422 else if (code == TYPE_CODE_FLT && len == 16)
423 /* Class X87 and X87UP. */
424 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
425
426 /* Aggregates. */
427 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
428 || code == TYPE_CODE_UNION)
429 amd64_classify_aggregate (type, class);
430}
431
432static enum return_value_convention
c055b101
CV
433amd64_return_value (struct gdbarch *gdbarch, struct type *func_type,
434 struct type *type, struct regcache *regcache,
42835c2b 435 gdb_byte *readbuf, const gdb_byte *writebuf)
efb1c01c
MK
436{
437 enum amd64_reg_class class[2];
438 int len = TYPE_LENGTH (type);
90f90721
MK
439 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
440 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
efb1c01c
MK
441 int integer_reg = 0;
442 int sse_reg = 0;
443 int i;
444
445 gdb_assert (!(readbuf && writebuf));
446
447 /* 1. Classify the return type with the classification algorithm. */
448 amd64_classify (type, class);
449
450 /* 2. If the type has class MEMORY, then the caller provides space
6fa57a7d
MK
451 for the return value and passes the address of this storage in
452 %rdi as if it were the first argument to the function. In effect,
453 this address becomes a hidden first argument.
454
455 On return %rax will contain the address that has been passed in
456 by the caller in %rdi. */
efb1c01c 457 if (class[0] == AMD64_MEMORY)
6fa57a7d
MK
458 {
459 /* As indicated by the comment above, the ABI guarantees that we
460 can always find the return value just after the function has
461 returned. */
462
463 if (readbuf)
464 {
465 ULONGEST addr;
466
467 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
468 read_memory (addr, readbuf, TYPE_LENGTH (type));
469 }
470
471 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
472 }
efb1c01c
MK
473
474 gdb_assert (class[1] != AMD64_MEMORY);
475 gdb_assert (len <= 16);
476
477 for (i = 0; len > 0; i++, len -= 8)
478 {
479 int regnum = -1;
480 int offset = 0;
481
482 switch (class[i])
483 {
484 case AMD64_INTEGER:
485 /* 3. If the class is INTEGER, the next available register
486 of the sequence %rax, %rdx is used. */
487 regnum = integer_regnum[integer_reg++];
488 break;
489
490 case AMD64_SSE:
491 /* 4. If the class is SSE, the next available SSE register
492 of the sequence %xmm0, %xmm1 is used. */
493 regnum = sse_regnum[sse_reg++];
494 break;
495
496 case AMD64_SSEUP:
497 /* 5. If the class is SSEUP, the eightbyte is passed in the
498 upper half of the last used SSE register. */
499 gdb_assert (sse_reg > 0);
500 regnum = sse_regnum[sse_reg - 1];
501 offset = 8;
502 break;
503
504 case AMD64_X87:
505 /* 6. If the class is X87, the value is returned on the X87
506 stack in %st0 as 80-bit x87 number. */
90f90721 507 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
508 if (writebuf)
509 i387_return_value (gdbarch, regcache);
510 break;
511
512 case AMD64_X87UP:
513 /* 7. If the class is X87UP, the value is returned together
514 with the previous X87 value in %st0. */
515 gdb_assert (i > 0 && class[0] == AMD64_X87);
90f90721 516 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
517 offset = 8;
518 len = 2;
519 break;
520
521 case AMD64_NO_CLASS:
522 continue;
523
524 default:
525 gdb_assert (!"Unexpected register class.");
526 }
527
528 gdb_assert (regnum != -1);
529
530 if (readbuf)
531 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
42835c2b 532 readbuf + i * 8);
efb1c01c
MK
533 if (writebuf)
534 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
42835c2b 535 writebuf + i * 8);
efb1c01c
MK
536 }
537
538 return RETURN_VALUE_REGISTER_CONVENTION;
53e95fcf
JS
539}
540\f
541
720aa428
MK
542static CORE_ADDR
543amd64_push_arguments (struct regcache *regcache, int nargs,
6470d250 544 struct value **args, CORE_ADDR sp, int struct_return)
720aa428
MK
545{
546 static int integer_regnum[] =
547 {
90f90721
MK
548 AMD64_RDI_REGNUM, /* %rdi */
549 AMD64_RSI_REGNUM, /* %rsi */
550 AMD64_RDX_REGNUM, /* %rdx */
551 AMD64_RCX_REGNUM, /* %rcx */
552 8, /* %r8 */
553 9 /* %r9 */
720aa428
MK
554 };
555 static int sse_regnum[] =
556 {
557 /* %xmm0 ... %xmm7 */
90f90721
MK
558 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
559 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
560 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
561 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
720aa428
MK
562 };
563 struct value **stack_args = alloca (nargs * sizeof (struct value *));
564 int num_stack_args = 0;
565 int num_elements = 0;
566 int element = 0;
567 int integer_reg = 0;
568 int sse_reg = 0;
569 int i;
570
6470d250
MK
571 /* Reserve a register for the "hidden" argument. */
572 if (struct_return)
573 integer_reg++;
574
720aa428
MK
575 for (i = 0; i < nargs; i++)
576 {
4991999e 577 struct type *type = value_type (args[i]);
720aa428
MK
578 int len = TYPE_LENGTH (type);
579 enum amd64_reg_class class[2];
580 int needed_integer_regs = 0;
581 int needed_sse_regs = 0;
582 int j;
583
584 /* Classify argument. */
585 amd64_classify (type, class);
586
587 /* Calculate the number of integer and SSE registers needed for
588 this argument. */
589 for (j = 0; j < 2; j++)
590 {
591 if (class[j] == AMD64_INTEGER)
592 needed_integer_regs++;
593 else if (class[j] == AMD64_SSE)
594 needed_sse_regs++;
595 }
596
597 /* Check whether enough registers are available, and if the
598 argument should be passed in registers at all. */
599 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
600 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
601 || (needed_integer_regs == 0 && needed_sse_regs == 0))
602 {
603 /* The argument will be passed on the stack. */
604 num_elements += ((len + 7) / 8);
605 stack_args[num_stack_args++] = args[i];
606 }
607 else
608 {
609 /* The argument will be passed in registers. */
d8de1ef7
MK
610 const gdb_byte *valbuf = value_contents (args[i]);
611 gdb_byte buf[8];
720aa428
MK
612
613 gdb_assert (len <= 16);
614
615 for (j = 0; len > 0; j++, len -= 8)
616 {
617 int regnum = -1;
618 int offset = 0;
619
620 switch (class[j])
621 {
622 case AMD64_INTEGER:
623 regnum = integer_regnum[integer_reg++];
624 break;
625
626 case AMD64_SSE:
627 regnum = sse_regnum[sse_reg++];
628 break;
629
630 case AMD64_SSEUP:
631 gdb_assert (sse_reg > 0);
632 regnum = sse_regnum[sse_reg - 1];
633 offset = 8;
634 break;
635
636 default:
637 gdb_assert (!"Unexpected register class.");
638 }
639
640 gdb_assert (regnum != -1);
641 memset (buf, 0, sizeof buf);
642 memcpy (buf, valbuf + j * 8, min (len, 8));
643 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
644 }
645 }
646 }
647
648 /* Allocate space for the arguments on the stack. */
649 sp -= num_elements * 8;
650
651 /* The psABI says that "The end of the input argument area shall be
652 aligned on a 16 byte boundary." */
653 sp &= ~0xf;
654
655 /* Write out the arguments to the stack. */
656 for (i = 0; i < num_stack_args; i++)
657 {
4991999e 658 struct type *type = value_type (stack_args[i]);
d8de1ef7 659 const gdb_byte *valbuf = value_contents (stack_args[i]);
720aa428
MK
660 int len = TYPE_LENGTH (type);
661
662 write_memory (sp + element * 8, valbuf, len);
663 element += ((len + 7) / 8);
664 }
665
666 /* The psABI says that "For calls that may call functions that use
667 varargs or stdargs (prototype-less calls or calls to functions
668 containing ellipsis (...) in the declaration) %al is used as
669 hidden argument to specify the number of SSE registers used. */
90f90721 670 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
720aa428
MK
671 return sp;
672}
673
c4f35dd8 674static CORE_ADDR
7d9b040b 675amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
e53bef9f
MK
676 struct regcache *regcache, CORE_ADDR bp_addr,
677 int nargs, struct value **args, CORE_ADDR sp,
678 int struct_return, CORE_ADDR struct_addr)
53e95fcf 679{
d8de1ef7 680 gdb_byte buf[8];
c4f35dd8
MK
681
682 /* Pass arguments. */
6470d250 683 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
c4f35dd8
MK
684
685 /* Pass "hidden" argument". */
686 if (struct_return)
687 {
688 store_unsigned_integer (buf, 8, struct_addr);
90f90721 689 regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
c4f35dd8
MK
690 }
691
692 /* Store return address. */
693 sp -= 8;
10f93086 694 store_unsigned_integer (buf, 8, bp_addr);
c4f35dd8
MK
695 write_memory (sp, buf, 8);
696
697 /* Finally, update the stack pointer... */
698 store_unsigned_integer (buf, 8, sp);
90f90721 699 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
c4f35dd8
MK
700
701 /* ...and fake a frame pointer. */
90f90721 702 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
c4f35dd8 703
3e210248 704 return sp + 16;
53e95fcf 705}
c4f35dd8 706\f
35669430
DE
707/* Displaced instruction handling. */
708
709/* A partially decoded instruction.
710 This contains enough details for displaced stepping purposes. */
711
712struct amd64_insn
713{
714 /* The number of opcode bytes. */
715 int opcode_len;
716 /* The offset of the rex prefix or -1 if not present. */
717 int rex_offset;
718 /* The offset to the first opcode byte. */
719 int opcode_offset;
720 /* The offset to the modrm byte or -1 if not present. */
721 int modrm_offset;
722
723 /* The raw instruction. */
724 gdb_byte *raw_insn;
725};
726
727struct displaced_step_closure
728{
729 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
730 int tmp_used;
731 int tmp_regno;
732 ULONGEST tmp_save;
733
734 /* Details of the instruction. */
735 struct amd64_insn insn_details;
736
737 /* Amount of space allocated to insn_buf. */
738 int max_len;
739
740 /* The possibly modified insn.
741 This is a variable-length field. */
742 gdb_byte insn_buf[1];
743};
744
745/* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
746 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
747 at which point delete these in favor of libopcodes' versions). */
748
749static const unsigned char onebyte_has_modrm[256] = {
750 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
751 /* ------------------------------- */
752 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
753 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
754 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
755 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
756 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
757 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
758 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
759 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
760 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
761 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
762 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
763 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
764 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
765 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
766 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
767 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
768 /* ------------------------------- */
769 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
770};
771
772static const unsigned char twobyte_has_modrm[256] = {
773 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
774 /* ------------------------------- */
775 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
776 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
777 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
778 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
779 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
780 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
781 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
782 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
783 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
784 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
785 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
786 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
787 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
788 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
789 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
790 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
791 /* ------------------------------- */
792 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
793};
794
795static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
796
797static int
798rex_prefix_p (gdb_byte pfx)
799{
800 return REX_PREFIX_P (pfx);
801}
802
803/* Skip the legacy instruction prefixes in INSN.
804 We assume INSN is properly sentineled so we don't have to worry
805 about falling off the end of the buffer. */
806
807static gdb_byte *
808skip_prefixes (gdb_byte *insn)
809{
810 while (1)
811 {
812 switch (*insn)
813 {
814 case DATA_PREFIX_OPCODE:
815 case ADDR_PREFIX_OPCODE:
816 case CS_PREFIX_OPCODE:
817 case DS_PREFIX_OPCODE:
818 case ES_PREFIX_OPCODE:
819 case FS_PREFIX_OPCODE:
820 case GS_PREFIX_OPCODE:
821 case SS_PREFIX_OPCODE:
822 case LOCK_PREFIX_OPCODE:
823 case REPE_PREFIX_OPCODE:
824 case REPNE_PREFIX_OPCODE:
825 ++insn;
826 continue;
827 default:
828 break;
829 }
830 break;
831 }
832
833 return insn;
834}
835
836/* fprintf-function for amd64_insn_length.
837 This function is a nop, we don't want to print anything, we just want to
838 compute the length of the insn. */
839
840static int ATTR_FORMAT (printf, 2, 3)
841amd64_insn_length_fprintf (void *stream, const char *format, ...)
842{
843 return 0;
844}
845
846/* Initialize a struct disassemble_info for amd64_insn_length. */
847
848static void
849amd64_insn_length_init_dis (struct gdbarch *gdbarch,
850 struct disassemble_info *di,
851 const gdb_byte *insn, int max_len,
852 CORE_ADDR addr)
853{
854 init_disassemble_info (di, NULL, amd64_insn_length_fprintf);
855
856 /* init_disassemble_info installs buffer_read_memory, etc.
857 so we don't need to do that here.
858 The cast is necessary until disassemble_info is const-ified. */
859 di->buffer = (gdb_byte *) insn;
860 di->buffer_length = max_len;
861 di->buffer_vma = addr;
862
863 di->arch = gdbarch_bfd_arch_info (gdbarch)->arch;
864 di->mach = gdbarch_bfd_arch_info (gdbarch)->mach;
865 di->endian = gdbarch_byte_order (gdbarch);
866 di->endian_code = gdbarch_byte_order_for_code (gdbarch);
867
868 disassemble_init_for_target (di);
869}
870
871/* Return the length in bytes of INSN.
872 MAX_LEN is the size of the buffer containing INSN.
873 libopcodes currently doesn't export a utility to compute the
874 instruction length, so use the disassembler until then. */
875
876static int
877amd64_insn_length (struct gdbarch *gdbarch,
878 const gdb_byte *insn, int max_len, CORE_ADDR addr)
879{
880 struct disassemble_info di;
881
882 amd64_insn_length_init_dis (gdbarch, &di, insn, max_len, addr);
883
884 return gdbarch_print_insn (gdbarch, addr, &di);
885}
886
887/* Return an integer register (other than RSP) that is unused as an input
888 operand in INSN.
889 In order to not require adding a rex prefix if the insn doesn't already
890 have one, the result is restricted to RAX ... RDI, sans RSP.
891 The register numbering of the result follows architecture ordering,
892 e.g. RDI = 7. */
893
894static int
895amd64_get_unused_input_int_reg (const struct amd64_insn *details)
896{
897 /* 1 bit for each reg */
898 int used_regs_mask = 0;
899
900 /* There can be at most 3 int regs used as inputs in an insn, and we have
901 7 to choose from (RAX ... RDI, sans RSP).
902 This allows us to take a conservative approach and keep things simple.
903 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
904 that implicitly specify RAX. */
905
906 /* Avoid RAX. */
907 used_regs_mask |= 1 << EAX_REG_NUM;
908 /* Similarily avoid RDX, implicit operand in divides. */
909 used_regs_mask |= 1 << EDX_REG_NUM;
910 /* Avoid RSP. */
911 used_regs_mask |= 1 << ESP_REG_NUM;
912
913 /* If the opcode is one byte long and there's no ModRM byte,
914 assume the opcode specifies a register. */
915 if (details->opcode_len == 1 && details->modrm_offset == -1)
916 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
917
918 /* Mark used regs in the modrm/sib bytes. */
919 if (details->modrm_offset != -1)
920 {
921 int modrm = details->raw_insn[details->modrm_offset];
922 int mod = MODRM_MOD_FIELD (modrm);
923 int reg = MODRM_REG_FIELD (modrm);
924 int rm = MODRM_RM_FIELD (modrm);
925 int have_sib = mod != 3 && rm == 4;
926
927 /* Assume the reg field of the modrm byte specifies a register. */
928 used_regs_mask |= 1 << reg;
929
930 if (have_sib)
931 {
932 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
933 int index = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
934 used_regs_mask |= 1 << base;
935 used_regs_mask |= 1 << index;
936 }
937 else
938 {
939 used_regs_mask |= 1 << rm;
940 }
941 }
942
943 gdb_assert (used_regs_mask < 256);
944 gdb_assert (used_regs_mask != 255);
945
946 /* Finally, find a free reg. */
947 {
948 int i;
949
950 for (i = 0; i < 8; ++i)
951 {
952 if (! (used_regs_mask & (1 << i)))
953 return i;
954 }
955
956 /* We shouldn't get here. */
957 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
958 }
959}
960
961/* Extract the details of INSN that we need. */
962
963static void
964amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
965{
966 gdb_byte *start = insn;
967 int need_modrm;
968
969 details->raw_insn = insn;
970
971 details->opcode_len = -1;
972 details->rex_offset = -1;
973 details->opcode_offset = -1;
974 details->modrm_offset = -1;
975
976 /* Skip legacy instruction prefixes. */
977 insn = skip_prefixes (insn);
978
979 /* Skip REX instruction prefix. */
980 if (rex_prefix_p (*insn))
981 {
982 details->rex_offset = insn - start;
983 ++insn;
984 }
985
986 details->opcode_offset = insn - start;
987
988 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
989 {
990 /* Two or three-byte opcode. */
991 ++insn;
992 need_modrm = twobyte_has_modrm[*insn];
993
994 /* Check for three-byte opcode. */
995 if (*insn == 0x38 || *insn == 0x3a)
996 {
997 ++insn;
998 details->opcode_len = 3;
999 }
1000 else
1001 details->opcode_len = 2;
1002 }
1003 else
1004 {
1005 /* One-byte opcode. */
1006 need_modrm = onebyte_has_modrm[*insn];
1007 details->opcode_len = 1;
1008 }
1009
1010 if (need_modrm)
1011 {
1012 ++insn;
1013 details->modrm_offset = insn - start;
1014 }
1015}
1016
1017/* Update %rip-relative addressing in INSN.
1018
1019 %rip-relative addressing only uses a 32-bit displacement.
1020 32 bits is not enough to be guaranteed to cover the distance between where
1021 the real instruction is and where its copy is.
1022 Convert the insn to use base+disp addressing.
1023 We set base = pc + insn_length so we can leave disp unchanged. */
c4f35dd8 1024
35669430
DE
1025static void
1026fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1027 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1028{
1029 const struct amd64_insn *insn_details = &dsc->insn_details;
1030 int modrm_offset = insn_details->modrm_offset;
1031 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1032 CORE_ADDR rip_base;
1033 int32_t disp;
1034 int insn_length;
1035 int arch_tmp_regno, tmp_regno;
1036 ULONGEST orig_value;
1037
1038 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1039 ++insn;
1040
1041 /* Compute the rip-relative address. */
1042 disp = extract_signed_integer (insn, sizeof (int32_t));
1043 insn_length = amd64_insn_length (gdbarch, dsc->insn_buf, dsc->max_len, from);
1044 rip_base = from + insn_length;
1045
1046 /* We need a register to hold the address.
1047 Pick one not used in the insn.
1048 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1049 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1050 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1051
1052 /* REX.B should be unset as we were using rip-relative addressing,
1053 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1054 if (insn_details->rex_offset != -1)
1055 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1056
1057 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1058 dsc->tmp_regno = tmp_regno;
1059 dsc->tmp_save = orig_value;
1060 dsc->tmp_used = 1;
1061
1062 /* Convert the ModRM field to be base+disp. */
1063 dsc->insn_buf[modrm_offset] &= ~0xc7;
1064 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1065
1066 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1067
1068 if (debug_displaced)
1069 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1070 "displaced: using temp reg %d, old value 0x%s, new value 0x%s\n",
1071 dsc->tmp_regno, paddr_nz (dsc->tmp_save),
1072 paddr_nz (rip_base));
1073}
1074
1075static void
1076fixup_displaced_copy (struct gdbarch *gdbarch,
1077 struct displaced_step_closure *dsc,
1078 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1079{
1080 const struct amd64_insn *details = &dsc->insn_details;
1081
1082 if (details->modrm_offset != -1)
1083 {
1084 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1085
1086 if ((modrm & 0xc7) == 0x05)
1087 {
1088 /* The insn uses rip-relative addressing.
1089 Deal with it. */
1090 fixup_riprel (gdbarch, dsc, from, to, regs);
1091 }
1092 }
1093}
1094
1095struct displaced_step_closure *
1096amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1097 CORE_ADDR from, CORE_ADDR to,
1098 struct regcache *regs)
1099{
1100 int len = gdbarch_max_insn_length (gdbarch);
1101 /* Extra space for sentinels so fixup_{riprel,displaced_copy don't have to
1102 continually watch for running off the end of the buffer. */
1103 int fixup_sentinel_space = len;
1104 struct displaced_step_closure *dsc =
1105 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1106 gdb_byte *buf = &dsc->insn_buf[0];
1107 struct amd64_insn *details = &dsc->insn_details;
1108
1109 dsc->tmp_used = 0;
1110 dsc->max_len = len + fixup_sentinel_space;
1111
1112 read_memory (from, buf, len);
1113
1114 /* Set up the sentinel space so we don't have to worry about running
1115 off the end of the buffer. An excessive number of leading prefixes
1116 could otherwise cause this. */
1117 memset (buf + len, 0, fixup_sentinel_space);
1118
1119 amd64_get_insn_details (buf, details);
1120
1121 /* GDB may get control back after the insn after the syscall.
1122 Presumably this is a kernel bug.
1123 If this is a syscall, make sure there's a nop afterwards. */
1124 {
1125 int syscall_length;
1126
1127 if (amd64_syscall_p (details, &syscall_length))
1128 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1129 }
1130
1131 /* Modify the insn to cope with the address where it will be executed from.
1132 In particular, handle any rip-relative addressing. */
1133 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1134
1135 write_memory (to, buf, len);
1136
1137 if (debug_displaced)
1138 {
1139 fprintf_unfiltered (gdb_stdlog, "displaced: copy 0x%s->0x%s: ",
1140 paddr_nz (from), paddr_nz (to));
1141 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1142 }
1143
1144 return dsc;
1145}
1146
1147static int
1148amd64_absolute_jmp_p (const struct amd64_insn *details)
1149{
1150 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1151
1152 if (insn[0] == 0xff)
1153 {
1154 /* jump near, absolute indirect (/4) */
1155 if ((insn[1] & 0x38) == 0x20)
1156 return 1;
1157
1158 /* jump far, absolute indirect (/5) */
1159 if ((insn[1] & 0x38) == 0x28)
1160 return 1;
1161 }
1162
1163 return 0;
1164}
1165
1166static int
1167amd64_absolute_call_p (const struct amd64_insn *details)
1168{
1169 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1170
1171 if (insn[0] == 0xff)
1172 {
1173 /* Call near, absolute indirect (/2) */
1174 if ((insn[1] & 0x38) == 0x10)
1175 return 1;
1176
1177 /* Call far, absolute indirect (/3) */
1178 if ((insn[1] & 0x38) == 0x18)
1179 return 1;
1180 }
1181
1182 return 0;
1183}
1184
1185static int
1186amd64_ret_p (const struct amd64_insn *details)
1187{
1188 /* NOTE: gcc can emit "repz ; ret". */
1189 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1190
1191 switch (insn[0])
1192 {
1193 case 0xc2: /* ret near, pop N bytes */
1194 case 0xc3: /* ret near */
1195 case 0xca: /* ret far, pop N bytes */
1196 case 0xcb: /* ret far */
1197 case 0xcf: /* iret */
1198 return 1;
1199
1200 default:
1201 return 0;
1202 }
1203}
1204
1205static int
1206amd64_call_p (const struct amd64_insn *details)
1207{
1208 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1209
1210 if (amd64_absolute_call_p (details))
1211 return 1;
1212
1213 /* call near, relative */
1214 if (insn[0] == 0xe8)
1215 return 1;
1216
1217 return 0;
1218}
1219
1220static int
1221amd64_breakpoint_p (const struct amd64_insn *details)
1222{
1223 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1224
1225 return insn[0] == 0xcc; /* int 3 */
1226}
1227
1228/* Return non-zero if INSN is a system call, and set *LENGTHP to its
1229 length in bytes. Otherwise, return zero. */
1230
1231static int
1232amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1233{
1234 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1235
1236 if (insn[0] == 0x0f && insn[1] == 0x05)
1237 {
1238 *lengthp = 2;
1239 return 1;
1240 }
1241
1242 return 0;
1243}
1244
1245/* Fix up the state of registers and memory after having single-stepped
1246 a displaced instruction. */
1247
1248void
1249amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1250 struct displaced_step_closure *dsc,
1251 CORE_ADDR from, CORE_ADDR to,
1252 struct regcache *regs)
1253{
1254 /* The offset we applied to the instruction's address. */
1255 ULONGEST insn_offset = to - from;
1256 gdb_byte *insn = dsc->insn_buf;
1257 const struct amd64_insn *insn_details = &dsc->insn_details;
1258
1259 if (debug_displaced)
1260 fprintf_unfiltered (gdb_stdlog,
1261 "displaced: fixup (0x%s, 0x%s), "
1262 "insn = 0x%02x 0x%02x ...\n",
1263 paddr_nz (from), paddr_nz (to), insn[0], insn[1]);
1264
1265 /* If we used a tmp reg, restore it. */
1266
1267 if (dsc->tmp_used)
1268 {
1269 if (debug_displaced)
1270 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to 0x%s\n",
1271 dsc->tmp_regno, paddr_nz (dsc->tmp_save));
1272 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1273 }
1274
1275 /* The list of issues to contend with here is taken from
1276 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1277 Yay for Free Software! */
1278
1279 /* Relocate the %rip back to the program's instruction stream,
1280 if necessary. */
1281
1282 /* Except in the case of absolute or indirect jump or call
1283 instructions, or a return instruction, the new rip is relative to
1284 the displaced instruction; make it relative to the original insn.
1285 Well, signal handler returns don't need relocation either, but we use the
1286 value of %rip to recognize those; see below. */
1287 if (! amd64_absolute_jmp_p (insn_details)
1288 && ! amd64_absolute_call_p (insn_details)
1289 && ! amd64_ret_p (insn_details))
1290 {
1291 ULONGEST orig_rip;
1292 int insn_len;
1293
1294 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1295
1296 /* A signal trampoline system call changes the %rip, resuming
1297 execution of the main program after the signal handler has
1298 returned. That makes them like 'return' instructions; we
1299 shouldn't relocate %rip.
1300
1301 But most system calls don't, and we do need to relocate %rip.
1302
1303 Our heuristic for distinguishing these cases: if stepping
1304 over the system call instruction left control directly after
1305 the instruction, the we relocate --- control almost certainly
1306 doesn't belong in the displaced copy. Otherwise, we assume
1307 the instruction has put control where it belongs, and leave
1308 it unrelocated. Goodness help us if there are PC-relative
1309 system calls. */
1310 if (amd64_syscall_p (insn_details, &insn_len)
1311 && orig_rip != to + insn_len
1312 /* GDB can get control back after the insn after the syscall.
1313 Presumably this is a kernel bug.
1314 Fixup ensures its a nop, we add one to the length for it. */
1315 && orig_rip != to + insn_len + 1)
1316 {
1317 if (debug_displaced)
1318 fprintf_unfiltered (gdb_stdlog,
1319 "displaced: syscall changed %%rip; "
1320 "not relocating\n");
1321 }
1322 else
1323 {
1324 ULONGEST rip = orig_rip - insn_offset;
1325
1326 /* If we have stepped over a breakpoint, set %rip to
1327 point at the breakpoint instruction itself.
1328
1329 (gdbarch_decr_pc_after_break was never something the core
1330 of GDB should have been concerned with; arch-specific
1331 code should be making PC values consistent before
1332 presenting them to GDB.) */
1333 if (amd64_breakpoint_p (insn_details))
1334 {
1335 if (debug_displaced)
1336 fprintf_unfiltered (gdb_stdlog,
1337 "displaced: stepped breakpoint\n");
1338 rip--;
1339 }
1340
1341 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1342
1343 if (debug_displaced)
1344 fprintf_unfiltered (gdb_stdlog,
1345 "displaced: "
1346 "relocated %%rip from 0x%s to 0x%s\n",
1347 paddr_nz (orig_rip), paddr_nz (rip));
1348 }
1349 }
1350
1351 /* If the instruction was PUSHFL, then the TF bit will be set in the
1352 pushed value, and should be cleared. We'll leave this for later,
1353 since GDB already messes up the TF flag when stepping over a
1354 pushfl. */
1355
1356 /* If the instruction was a call, the return address now atop the
1357 stack is the address following the copied instruction. We need
1358 to make it the address following the original instruction. */
1359 if (amd64_call_p (insn_details))
1360 {
1361 ULONGEST rsp;
1362 ULONGEST retaddr;
1363 const ULONGEST retaddr_len = 8;
1364
1365 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1366 retaddr = read_memory_unsigned_integer (rsp, retaddr_len);
1367 retaddr = (retaddr - insn_offset) & 0xffffffffUL;
1368 write_memory_unsigned_integer (rsp, retaddr_len, retaddr);
1369
1370 if (debug_displaced)
1371 fprintf_unfiltered (gdb_stdlog,
1372 "displaced: relocated return addr at 0x%s "
1373 "to 0x%s\n",
1374 paddr_nz (rsp),
1375 paddr_nz (retaddr));
1376 }
1377}
1378\f
c4f35dd8 1379/* The maximum number of saved registers. This should include %rip. */
90f90721 1380#define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
c4f35dd8 1381
e53bef9f 1382struct amd64_frame_cache
c4f35dd8
MK
1383{
1384 /* Base address. */
1385 CORE_ADDR base;
1386 CORE_ADDR sp_offset;
1387 CORE_ADDR pc;
1388
1389 /* Saved registers. */
e53bef9f 1390 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
c4f35dd8 1391 CORE_ADDR saved_sp;
e0c62198 1392 int saved_sp_reg;
c4f35dd8
MK
1393
1394 /* Do we have a frame? */
1395 int frameless_p;
1396};
8dda9770 1397
d2449ee8 1398/* Initialize a frame cache. */
c4f35dd8 1399
d2449ee8
DJ
1400static void
1401amd64_init_frame_cache (struct amd64_frame_cache *cache)
8dda9770 1402{
c4f35dd8
MK
1403 int i;
1404
c4f35dd8
MK
1405 /* Base address. */
1406 cache->base = 0;
1407 cache->sp_offset = -8;
1408 cache->pc = 0;
1409
1410 /* Saved registers. We initialize these to -1 since zero is a valid
1411 offset (that's where %rbp is supposed to be stored). */
e53bef9f 1412 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
1413 cache->saved_regs[i] = -1;
1414 cache->saved_sp = 0;
e0c62198 1415 cache->saved_sp_reg = -1;
c4f35dd8
MK
1416
1417 /* Frameless until proven otherwise. */
1418 cache->frameless_p = 1;
d2449ee8 1419}
c4f35dd8 1420
d2449ee8
DJ
1421/* Allocate and initialize a frame cache. */
1422
1423static struct amd64_frame_cache *
1424amd64_alloc_frame_cache (void)
1425{
1426 struct amd64_frame_cache *cache;
1427
1428 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1429 amd64_init_frame_cache (cache);
c4f35dd8 1430 return cache;
8dda9770 1431}
53e95fcf 1432
e0c62198
L
1433/* GCC 4.4 and later, can put code in the prologue to realign the
1434 stack pointer. Check whether PC points to such code, and update
1435 CACHE accordingly. Return the first instruction after the code
1436 sequence or CURRENT_PC, whichever is smaller. If we don't
1437 recognize the code, return PC. */
1438
1439static CORE_ADDR
1440amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1441 struct amd64_frame_cache *cache)
1442{
1443 /* There are 2 code sequences to re-align stack before the frame
1444 gets set up:
1445
1446 1. Use a caller-saved saved register:
1447
1448 leaq 8(%rsp), %reg
1449 andq $-XXX, %rsp
1450 pushq -8(%reg)
1451
1452 2. Use a callee-saved saved register:
1453
1454 pushq %reg
1455 leaq 16(%rsp), %reg
1456 andq $-XXX, %rsp
1457 pushq -8(%reg)
1458
1459 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1460
1461 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1462 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1463 */
1464
1465 gdb_byte buf[18];
1466 int reg, r;
1467 int offset, offset_and;
e0c62198
L
1468
1469 if (target_read_memory (pc, buf, sizeof buf))
1470 return pc;
1471
1472 /* Check caller-saved saved register. The first instruction has
1473 to be "leaq 8(%rsp), %reg". */
1474 if ((buf[0] & 0xfb) == 0x48
1475 && buf[1] == 0x8d
1476 && buf[3] == 0x24
1477 && buf[4] == 0x8)
1478 {
1479 /* MOD must be binary 10 and R/M must be binary 100. */
1480 if ((buf[2] & 0xc7) != 0x44)
1481 return pc;
1482
1483 /* REG has register number. */
1484 reg = (buf[2] >> 3) & 7;
1485
1486 /* Check the REX.R bit. */
1487 if (buf[0] == 0x4c)
1488 reg += 8;
1489
1490 offset = 5;
1491 }
1492 else
1493 {
1494 /* Check callee-saved saved register. The first instruction
1495 has to be "pushq %reg". */
1496 reg = 0;
1497 if ((buf[0] & 0xf8) == 0x50)
1498 offset = 0;
1499 else if ((buf[0] & 0xf6) == 0x40
1500 && (buf[1] & 0xf8) == 0x50)
1501 {
1502 /* Check the REX.B bit. */
1503 if ((buf[0] & 1) != 0)
1504 reg = 8;
1505
1506 offset = 1;
1507 }
1508 else
1509 return pc;
1510
1511 /* Get register. */
1512 reg += buf[offset] & 0x7;
1513
1514 offset++;
1515
1516 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1517 if ((buf[offset] & 0xfb) != 0x48
1518 || buf[offset + 1] != 0x8d
1519 || buf[offset + 3] != 0x24
1520 || buf[offset + 4] != 0x10)
1521 return pc;
1522
1523 /* MOD must be binary 10 and R/M must be binary 100. */
1524 if ((buf[offset + 2] & 0xc7) != 0x44)
1525 return pc;
1526
1527 /* REG has register number. */
1528 r = (buf[offset + 2] >> 3) & 7;
1529
1530 /* Check the REX.R bit. */
1531 if (buf[offset] == 0x4c)
1532 r += 8;
1533
1534 /* Registers in pushq and leaq have to be the same. */
1535 if (reg != r)
1536 return pc;
1537
1538 offset += 5;
1539 }
1540
1541 /* Rigister can't be %rsp nor %rbp. */
1542 if (reg == 4 || reg == 5)
1543 return pc;
1544
1545 /* The next instruction has to be "andq $-XXX, %rsp". */
1546 if (buf[offset] != 0x48
1547 || buf[offset + 2] != 0xe4
1548 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1549 return pc;
1550
1551 offset_and = offset;
1552 offset += buf[offset + 1] == 0x81 ? 7 : 4;
1553
1554 /* The next instruction has to be "pushq -8(%reg)". */
1555 r = 0;
1556 if (buf[offset] == 0xff)
1557 offset++;
1558 else if ((buf[offset] & 0xf6) == 0x40
1559 && buf[offset + 1] == 0xff)
1560 {
1561 /* Check the REX.B bit. */
1562 if ((buf[offset] & 0x1) != 0)
1563 r = 8;
1564 offset += 2;
1565 }
1566 else
1567 return pc;
1568
1569 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1570 01. */
1571 if (buf[offset + 1] != 0xf8
1572 || (buf[offset] & 0xf8) != 0x70)
1573 return pc;
1574
1575 /* R/M has register. */
1576 r += buf[offset] & 7;
1577
1578 /* Registers in leaq and pushq have to be the same. */
1579 if (reg != r)
1580 return pc;
1581
1582 if (current_pc > pc + offset_and)
35669430 1583 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
e0c62198
L
1584
1585 return min (pc + offset + 2, current_pc);
1586}
1587
c4f35dd8
MK
1588/* Do a limited analysis of the prologue at PC and update CACHE
1589 accordingly. Bail out early if CURRENT_PC is reached. Return the
1590 address where the analysis stopped.
1591
1592 We will handle only functions beginning with:
1593
1594 pushq %rbp 0x55
1595 movq %rsp, %rbp 0x48 0x89 0xe5
1596
1597 Any function that doesn't start with this sequence will be assumed
1598 to have no prologue and thus no valid frame pointer in %rbp. */
1599
1600static CORE_ADDR
e53bef9f
MK
1601amd64_analyze_prologue (CORE_ADDR pc, CORE_ADDR current_pc,
1602 struct amd64_frame_cache *cache)
53e95fcf 1603{
d8de1ef7
MK
1604 static gdb_byte proto[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */
1605 gdb_byte buf[3];
1606 gdb_byte op;
c4f35dd8
MK
1607
1608 if (current_pc <= pc)
1609 return current_pc;
1610
e0c62198
L
1611 pc = amd64_analyze_stack_align (pc, current_pc, cache);
1612
c4f35dd8
MK
1613 op = read_memory_unsigned_integer (pc, 1);
1614
1615 if (op == 0x55) /* pushq %rbp */
1616 {
1617 /* Take into account that we've executed the `pushq %rbp' that
1618 starts this instruction sequence. */
90f90721 1619 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
c4f35dd8
MK
1620 cache->sp_offset += 8;
1621
1622 /* If that's all, return now. */
1623 if (current_pc <= pc + 1)
1624 return current_pc;
1625
1626 /* Check for `movq %rsp, %rbp'. */
1627 read_memory (pc + 1, buf, 3);
1628 if (memcmp (buf, proto, 3) != 0)
1629 return pc + 1;
1630
1631 /* OK, we actually have a frame. */
1632 cache->frameless_p = 0;
1633 return pc + 4;
1634 }
1635
1636 return pc;
53e95fcf
JS
1637}
1638
c4f35dd8
MK
1639/* Return PC of first real instruction. */
1640
1641static CORE_ADDR
6093d2eb 1642amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
53e95fcf 1643{
e53bef9f 1644 struct amd64_frame_cache cache;
c4f35dd8
MK
1645 CORE_ADDR pc;
1646
d2449ee8 1647 amd64_init_frame_cache (&cache);
594706e6 1648 pc = amd64_analyze_prologue (start_pc, 0xffffffffffffffffLL, &cache);
c4f35dd8
MK
1649 if (cache.frameless_p)
1650 return start_pc;
1651
1652 return pc;
53e95fcf 1653}
c4f35dd8 1654\f
53e95fcf 1655
c4f35dd8
MK
1656/* Normal frames. */
1657
e53bef9f 1658static struct amd64_frame_cache *
10458914 1659amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
6d686a84 1660{
e53bef9f 1661 struct amd64_frame_cache *cache;
d8de1ef7 1662 gdb_byte buf[8];
6d686a84 1663 int i;
6d686a84 1664
c4f35dd8
MK
1665 if (*this_cache)
1666 return *this_cache;
6d686a84 1667
e53bef9f 1668 cache = amd64_alloc_frame_cache ();
c4f35dd8
MK
1669 *this_cache = cache;
1670
10458914 1671 cache->pc = get_frame_func (this_frame);
c4f35dd8 1672 if (cache->pc != 0)
10458914 1673 amd64_analyze_prologue (cache->pc, get_frame_pc (this_frame), cache);
c4f35dd8 1674
e0c62198
L
1675 if (cache->saved_sp_reg != -1)
1676 {
1677 /* Stack pointer has been saved. */
1678 get_frame_register (this_frame, cache->saved_sp_reg, buf);
1679 cache->saved_sp = extract_unsigned_integer(buf, 8);
1680 }
1681
c4f35dd8
MK
1682 if (cache->frameless_p)
1683 {
4a28816e
MK
1684 /* We didn't find a valid frame. If we're at the start of a
1685 function, or somewhere half-way its prologue, the function's
1686 frame probably hasn't been fully setup yet. Try to
1687 reconstruct the base address for the stack frame by looking
1688 at the stack pointer. For truly "frameless" functions this
1689 might work too. */
c4f35dd8 1690
e0c62198
L
1691 if (cache->saved_sp_reg != -1)
1692 {
1693 /* We're halfway aligning the stack. */
1694 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
1695 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
1696
1697 /* This will be added back below. */
1698 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
1699 }
1700 else
1701 {
1702 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1703 cache->base = extract_unsigned_integer (buf, 8) + cache->sp_offset;
1704 }
c4f35dd8 1705 }
35883a3f
MK
1706 else
1707 {
10458914 1708 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
35883a3f
MK
1709 cache->base = extract_unsigned_integer (buf, 8);
1710 }
c4f35dd8
MK
1711
1712 /* Now that we have the base address for the stack frame we can
1713 calculate the value of %rsp in the calling frame. */
1714 cache->saved_sp = cache->base + 16;
1715
35883a3f
MK
1716 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
1717 frame we find it at the same offset from the reconstructed base
e0c62198
L
1718 address. If we're halfway aligning the stack, %rip is handled
1719 differently (see above). */
1720 if (!cache->frameless_p || cache->saved_sp_reg == -1)
1721 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
35883a3f 1722
c4f35dd8
MK
1723 /* Adjust all the saved registers such that they contain addresses
1724 instead of offsets. */
e53bef9f 1725 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
1726 if (cache->saved_regs[i] != -1)
1727 cache->saved_regs[i] += cache->base;
1728
1729 return cache;
6d686a84
ML
1730}
1731
c4f35dd8 1732static void
10458914 1733amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
e53bef9f 1734 struct frame_id *this_id)
c4f35dd8 1735{
e53bef9f 1736 struct amd64_frame_cache *cache =
10458914 1737 amd64_frame_cache (this_frame, this_cache);
c4f35dd8
MK
1738
1739 /* This marks the outermost frame. */
1740 if (cache->base == 0)
1741 return;
1742
1743 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
1744}
e76e1718 1745
10458914
DJ
1746static struct value *
1747amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1748 int regnum)
53e95fcf 1749{
10458914 1750 struct gdbarch *gdbarch = get_frame_arch (this_frame);
e53bef9f 1751 struct amd64_frame_cache *cache =
10458914 1752 amd64_frame_cache (this_frame, this_cache);
e76e1718 1753
c4f35dd8 1754 gdb_assert (regnum >= 0);
b1ab997b 1755
2ae02b47 1756 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
10458914 1757 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
e76e1718 1758
e53bef9f 1759 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
10458914
DJ
1760 return frame_unwind_got_memory (this_frame, regnum,
1761 cache->saved_regs[regnum]);
e76e1718 1762
10458914 1763 return frame_unwind_got_register (this_frame, regnum, regnum);
c4f35dd8 1764}
e76e1718 1765
e53bef9f 1766static const struct frame_unwind amd64_frame_unwind =
c4f35dd8
MK
1767{
1768 NORMAL_FRAME,
e53bef9f 1769 amd64_frame_this_id,
10458914
DJ
1770 amd64_frame_prev_register,
1771 NULL,
1772 default_frame_sniffer
c4f35dd8 1773};
c4f35dd8 1774\f
e76e1718 1775
c4f35dd8
MK
1776/* Signal trampolines. */
1777
1778/* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
1779 64-bit variants. This would require using identical frame caches
1780 on both platforms. */
1781
e53bef9f 1782static struct amd64_frame_cache *
10458914 1783amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
c4f35dd8 1784{
e53bef9f 1785 struct amd64_frame_cache *cache;
10458914 1786 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
c4f35dd8 1787 CORE_ADDR addr;
d8de1ef7 1788 gdb_byte buf[8];
2b5e0749 1789 int i;
c4f35dd8
MK
1790
1791 if (*this_cache)
1792 return *this_cache;
1793
e53bef9f 1794 cache = amd64_alloc_frame_cache ();
c4f35dd8 1795
10458914 1796 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
c4f35dd8
MK
1797 cache->base = extract_unsigned_integer (buf, 8) - 8;
1798
10458914 1799 addr = tdep->sigcontext_addr (this_frame);
2b5e0749 1800 gdb_assert (tdep->sc_reg_offset);
e53bef9f 1801 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2b5e0749
MK
1802 for (i = 0; i < tdep->sc_num_regs; i++)
1803 if (tdep->sc_reg_offset[i] != -1)
1804 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
c4f35dd8
MK
1805
1806 *this_cache = cache;
1807 return cache;
53e95fcf
JS
1808}
1809
c4f35dd8 1810static void
10458914 1811amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
e53bef9f 1812 void **this_cache, struct frame_id *this_id)
c4f35dd8 1813{
e53bef9f 1814 struct amd64_frame_cache *cache =
10458914 1815 amd64_sigtramp_frame_cache (this_frame, this_cache);
c4f35dd8 1816
10458914 1817 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
c4f35dd8
MK
1818}
1819
10458914
DJ
1820static struct value *
1821amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
1822 void **this_cache, int regnum)
c4f35dd8
MK
1823{
1824 /* Make sure we've initialized the cache. */
10458914 1825 amd64_sigtramp_frame_cache (this_frame, this_cache);
c4f35dd8 1826
10458914 1827 return amd64_frame_prev_register (this_frame, this_cache, regnum);
c4f35dd8
MK
1828}
1829
10458914
DJ
1830static int
1831amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
1832 struct frame_info *this_frame,
1833 void **this_cache)
c4f35dd8 1834{
10458914 1835 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
911bc6ee
MK
1836
1837 /* We shouldn't even bother if we don't have a sigcontext_addr
1838 handler. */
1839 if (tdep->sigcontext_addr == NULL)
10458914 1840 return 0;
911bc6ee
MK
1841
1842 if (tdep->sigtramp_p != NULL)
1843 {
10458914
DJ
1844 if (tdep->sigtramp_p (this_frame))
1845 return 1;
911bc6ee 1846 }
c4f35dd8 1847
911bc6ee 1848 if (tdep->sigtramp_start != 0)
1c3545ae 1849 {
10458914 1850 CORE_ADDR pc = get_frame_pc (this_frame);
1c3545ae 1851
911bc6ee
MK
1852 gdb_assert (tdep->sigtramp_end != 0);
1853 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
10458914 1854 return 1;
1c3545ae 1855 }
c4f35dd8 1856
10458914 1857 return 0;
c4f35dd8 1858}
10458914
DJ
1859
1860static const struct frame_unwind amd64_sigtramp_frame_unwind =
1861{
1862 SIGTRAMP_FRAME,
1863 amd64_sigtramp_frame_this_id,
1864 amd64_sigtramp_frame_prev_register,
1865 NULL,
1866 amd64_sigtramp_frame_sniffer
1867};
c4f35dd8
MK
1868\f
1869
1870static CORE_ADDR
10458914 1871amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
c4f35dd8 1872{
e53bef9f 1873 struct amd64_frame_cache *cache =
10458914 1874 amd64_frame_cache (this_frame, this_cache);
c4f35dd8
MK
1875
1876 return cache->base;
1877}
1878
e53bef9f 1879static const struct frame_base amd64_frame_base =
c4f35dd8 1880{
e53bef9f
MK
1881 &amd64_frame_unwind,
1882 amd64_frame_base_address,
1883 amd64_frame_base_address,
1884 amd64_frame_base_address
c4f35dd8
MK
1885};
1886
166f4c7b 1887static struct frame_id
10458914 1888amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
166f4c7b 1889{
c4f35dd8
MK
1890 CORE_ADDR fp;
1891
10458914 1892 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
c4f35dd8 1893
10458914 1894 return frame_id_build (fp + 16, get_frame_pc (this_frame));
166f4c7b
ML
1895}
1896
8b148df9
AC
1897/* 16 byte align the SP per frame requirements. */
1898
1899static CORE_ADDR
e53bef9f 1900amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
8b148df9
AC
1901{
1902 return sp & -(CORE_ADDR)16;
1903}
473f17b0
MK
1904\f
1905
593adc23
MK
1906/* Supply register REGNUM from the buffer specified by FPREGS and LEN
1907 in the floating-point register set REGSET to register cache
1908 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
473f17b0
MK
1909
1910static void
e53bef9f
MK
1911amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
1912 int regnum, const void *fpregs, size_t len)
473f17b0 1913{
9ea75c57 1914 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
473f17b0
MK
1915
1916 gdb_assert (len == tdep->sizeof_fpregset);
90f90721 1917 amd64_supply_fxsave (regcache, regnum, fpregs);
473f17b0 1918}
8b148df9 1919
593adc23
MK
1920/* Collect register REGNUM from the register cache REGCACHE and store
1921 it in the buffer specified by FPREGS and LEN as described by the
1922 floating-point register set REGSET. If REGNUM is -1, do this for
1923 all registers in REGSET. */
1924
1925static void
1926amd64_collect_fpregset (const struct regset *regset,
1927 const struct regcache *regcache,
1928 int regnum, void *fpregs, size_t len)
1929{
1930 const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
1931
1932 gdb_assert (len == tdep->sizeof_fpregset);
1933 amd64_collect_fxsave (regcache, regnum, fpregs);
1934}
1935
c6b33596
MK
1936/* Return the appropriate register set for the core section identified
1937 by SECT_NAME and SECT_SIZE. */
1938
1939static const struct regset *
e53bef9f
MK
1940amd64_regset_from_core_section (struct gdbarch *gdbarch,
1941 const char *sect_name, size_t sect_size)
c6b33596
MK
1942{
1943 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1944
1945 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
1946 {
1947 if (tdep->fpregset == NULL)
593adc23
MK
1948 tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
1949 amd64_collect_fpregset);
c6b33596
MK
1950
1951 return tdep->fpregset;
1952 }
1953
1954 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
1955}
1956\f
1957
436675d3
PA
1958/* Figure out where the longjmp will land. Slurp the jmp_buf out of
1959 %rdi. We expect its value to be a pointer to the jmp_buf structure
1960 from which we extract the address that we will land at. This
1961 address is copied into PC. This routine returns non-zero on
1962 success. */
1963
1964static int
1965amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1966{
1967 gdb_byte buf[8];
1968 CORE_ADDR jb_addr;
1969 struct gdbarch *gdbarch = get_frame_arch (frame);
1970 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
0dfff4cb 1971 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
436675d3
PA
1972
1973 /* If JB_PC_OFFSET is -1, we have no way to find out where the
1974 longjmp will land. */
1975 if (jb_pc_offset == -1)
1976 return 0;
1977
1978 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
0dfff4cb
UW
1979 jb_addr= extract_typed_address
1980 (buf, builtin_type (gdbarch)->builtin_data_ptr);
436675d3
PA
1981 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
1982 return 0;
1983
0dfff4cb 1984 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
436675d3
PA
1985
1986 return 1;
1987}
1988
2213a65d 1989void
90f90721 1990amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
53e95fcf 1991{
0c1a73d6 1992 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
53e95fcf 1993
473f17b0
MK
1994 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
1995 floating-point registers. */
1996 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
1997
5716833c 1998 /* AMD64 has an FPU and 16 SSE registers. */
90f90721 1999 tdep->st0_regnum = AMD64_ST0_REGNUM;
0c1a73d6 2000 tdep->num_xmm_regs = 16;
53e95fcf 2001
0c1a73d6 2002 /* This is what all the fuss is about. */
53e95fcf
JS
2003 set_gdbarch_long_bit (gdbarch, 64);
2004 set_gdbarch_long_long_bit (gdbarch, 64);
2005 set_gdbarch_ptr_bit (gdbarch, 64);
2006
e53bef9f
MK
2007 /* In contrast to the i386, on AMD64 a `long double' actually takes
2008 up 128 bits, even though it's still based on the i387 extended
2009 floating-point format which has only 80 significant bits. */
b83b026c
MK
2010 set_gdbarch_long_double_bit (gdbarch, 128);
2011
e53bef9f
MK
2012 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
2013 set_gdbarch_register_name (gdbarch, amd64_register_name);
2014 set_gdbarch_register_type (gdbarch, amd64_register_type);
b83b026c
MK
2015
2016 /* Register numbers of various important registers. */
90f90721
MK
2017 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
2018 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
2019 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
2020 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
b83b026c 2021
e53bef9f
MK
2022 /* The "default" register numbering scheme for AMD64 is referred to
2023 as the "DWARF Register Number Mapping" in the System V psABI.
2024 The preferred debugging format for all known AMD64 targets is
2025 actually DWARF2, and GCC doesn't seem to support DWARF (that is
2026 DWARF-1), but we provide the same mapping just in case. This
2027 mapping is also used for stabs, which GCC does support. */
2028 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
e53bef9f 2029 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
de220d0f 2030
c4f35dd8 2031 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
e53bef9f 2032 be in use on any of the supported AMD64 targets. */
53e95fcf 2033
c4f35dd8 2034 /* Call dummy code. */
e53bef9f
MK
2035 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
2036 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
8b148df9 2037 set_gdbarch_frame_red_zone_size (gdbarch, 128);
53e95fcf 2038
83acabca 2039 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
d532c08f
MK
2040 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
2041 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
2042
efb1c01c 2043 set_gdbarch_return_value (gdbarch, amd64_return_value);
53e95fcf 2044
e53bef9f 2045 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
53e95fcf 2046
c4f35dd8 2047 /* Avoid wiring in the MMX registers for now. */
2213a65d 2048 set_gdbarch_num_pseudo_regs (gdbarch, 0);
5716833c 2049 tdep->mm0_regnum = -1;
2213a65d 2050
10458914 2051 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
53e95fcf 2052
10458914
DJ
2053 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
2054 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
e53bef9f 2055 frame_base_set_default (gdbarch, &amd64_frame_base);
c6b33596
MK
2056
2057 /* If we have a register mapping, enable the generic core file support. */
2058 if (tdep->gregset_reg_offset)
2059 set_gdbarch_regset_from_core_section (gdbarch,
e53bef9f 2060 amd64_regset_from_core_section);
436675d3
PA
2061
2062 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
c4f35dd8
MK
2063}
2064\f
2065
41d041d6
MK
2066/* The 64-bit FXSAVE format differs from the 32-bit format in the
2067 sense that the instruction pointer and data pointer are simply
2068 64-bit offsets into the code segment and the data segment instead
2069 of a selector offset pair. The functions below store the upper 32
2070 bits of these pointers (instead of just the 16-bits of the segment
2071 selector). */
2072
2073/* Fill register REGNUM in REGCACHE with the appropriate
0485f6ad
MK
2074 floating-point or SSE register value from *FXSAVE. If REGNUM is
2075 -1, do this for all registers. This function masks off any of the
2076 reserved bits in *FXSAVE. */
c4f35dd8
MK
2077
2078void
90f90721 2079amd64_supply_fxsave (struct regcache *regcache, int regnum,
20a6ec49 2080 const void *fxsave)
c4f35dd8 2081{
20a6ec49
MD
2082 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2083 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2084
41d041d6 2085 i387_supply_fxsave (regcache, regnum, fxsave);
c4f35dd8 2086
20a6ec49 2087 if (fxsave && gdbarch_ptr_bit (gdbarch) == 64)
c4f35dd8 2088 {
d8de1ef7 2089 const gdb_byte *regs = fxsave;
41d041d6 2090
20a6ec49
MD
2091 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2092 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2093 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2094 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
c4f35dd8 2095 }
0c1a73d6
MK
2096}
2097
3c017e40
MK
2098/* Fill register REGNUM (if it is a floating-point or SSE register) in
2099 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
2100 all registers. This function doesn't touch any of the reserved
2101 bits in *FXSAVE. */
2102
2103void
2104amd64_collect_fxsave (const struct regcache *regcache, int regnum,
2105 void *fxsave)
2106{
20a6ec49
MD
2107 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2108 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
d8de1ef7 2109 gdb_byte *regs = fxsave;
3c017e40
MK
2110
2111 i387_collect_fxsave (regcache, regnum, fxsave);
2112
20a6ec49 2113 if (gdbarch_ptr_bit (gdbarch) == 64)
f0ef85a5 2114 {
20a6ec49
MD
2115 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2116 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2117 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2118 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
f0ef85a5 2119 }
3c017e40 2120}