]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/amd64-tdep.c
Include gdb_assert.h in common-defs.h
[thirdparty/binutils-gdb.git] / gdb / amd64-tdep.c
1 /* Target-dependent code for AMD64.
2
3 Copyright (C) 2001-2014 Free Software Foundation, Inc.
4
5 Contributed by Jiri Smid, SuSE Labs.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "opcode/i386.h"
24 #include "dis-asm.h"
25 #include "arch-utils.h"
26 #include "block.h"
27 #include "dummy-frame.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "inferior.h"
32 #include "infrun.h"
33 #include "gdbcmd.h"
34 #include "gdbcore.h"
35 #include "objfiles.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "symfile.h"
39 #include "disasm.h"
40 #include "exceptions.h"
41 #include "amd64-tdep.h"
42 #include "i387-tdep.h"
43
44 #include "features/i386/amd64.c"
45 #include "features/i386/amd64-avx.c"
46 #include "features/i386/amd64-mpx.c"
47 #include "features/i386/amd64-avx512.c"
48
49 #include "features/i386/x32.c"
50 #include "features/i386/x32-avx.c"
51 #include "features/i386/x32-avx512.c"
52
53 #include "ax.h"
54 #include "ax-gdb.h"
55
56 /* Note that the AMD64 architecture was previously known as x86-64.
57 The latter is (forever) engraved into the canonical system name as
58 returned by config.guess, and used as the name for the AMD64 port
59 of GNU/Linux. The BSD's have renamed their ports to amd64; they
60 don't like to shout. For GDB we prefer the amd64_-prefix over the
61 x86_64_-prefix since it's so much easier to type. */
62
63 /* Register information. */
64
65 static const char *amd64_register_names[] =
66 {
67 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
68
69 /* %r8 is indeed register number 8. */
70 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
71 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
72
73 /* %st0 is register number 24. */
74 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
75 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
76
77 /* %xmm0 is register number 40. */
78 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
79 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
80 "mxcsr",
81 };
82
83 static const char *amd64_ymm_names[] =
84 {
85 "ymm0", "ymm1", "ymm2", "ymm3",
86 "ymm4", "ymm5", "ymm6", "ymm7",
87 "ymm8", "ymm9", "ymm10", "ymm11",
88 "ymm12", "ymm13", "ymm14", "ymm15"
89 };
90
91 static const char *amd64_ymm_avx512_names[] =
92 {
93 "ymm16", "ymm17", "ymm18", "ymm19",
94 "ymm20", "ymm21", "ymm22", "ymm23",
95 "ymm24", "ymm25", "ymm26", "ymm27",
96 "ymm28", "ymm29", "ymm30", "ymm31"
97 };
98
99 static const char *amd64_ymmh_names[] =
100 {
101 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
102 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
103 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
104 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
105 };
106
107 static const char *amd64_ymmh_avx512_names[] =
108 {
109 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
110 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
111 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
112 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
113 };
114
115 static const char *amd64_mpx_names[] =
116 {
117 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
118 };
119
120 static const char *amd64_k_names[] =
121 {
122 "k0", "k1", "k2", "k3",
123 "k4", "k5", "k6", "k7"
124 };
125
126 static const char *amd64_zmmh_names[] =
127 {
128 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
129 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
130 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
131 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
132 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
133 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
134 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
135 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
136 };
137
138 static const char *amd64_zmm_names[] =
139 {
140 "zmm0", "zmm1", "zmm2", "zmm3",
141 "zmm4", "zmm5", "zmm6", "zmm7",
142 "zmm8", "zmm9", "zmm10", "zmm11",
143 "zmm12", "zmm13", "zmm14", "zmm15",
144 "zmm16", "zmm17", "zmm18", "zmm19",
145 "zmm20", "zmm21", "zmm22", "zmm23",
146 "zmm24", "zmm25", "zmm26", "zmm27",
147 "zmm28", "zmm29", "zmm30", "zmm31"
148 };
149
150 static const char *amd64_xmm_avx512_names[] = {
151 "xmm16", "xmm17", "xmm18", "xmm19",
152 "xmm20", "xmm21", "xmm22", "xmm23",
153 "xmm24", "xmm25", "xmm26", "xmm27",
154 "xmm28", "xmm29", "xmm30", "xmm31"
155 };
156
157 /* DWARF Register Number Mapping as defined in the System V psABI,
158 section 3.6. */
159
160 static int amd64_dwarf_regmap[] =
161 {
162 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
163 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
164 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
165 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
166
167 /* Frame Pointer Register RBP. */
168 AMD64_RBP_REGNUM,
169
170 /* Stack Pointer Register RSP. */
171 AMD64_RSP_REGNUM,
172
173 /* Extended Integer Registers 8 - 15. */
174 AMD64_R8_REGNUM, /* %r8 */
175 AMD64_R9_REGNUM, /* %r9 */
176 AMD64_R10_REGNUM, /* %r10 */
177 AMD64_R11_REGNUM, /* %r11 */
178 AMD64_R12_REGNUM, /* %r12 */
179 AMD64_R13_REGNUM, /* %r13 */
180 AMD64_R14_REGNUM, /* %r14 */
181 AMD64_R15_REGNUM, /* %r15 */
182
183 /* Return Address RA. Mapped to RIP. */
184 AMD64_RIP_REGNUM,
185
186 /* SSE Registers 0 - 7. */
187 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
188 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
189 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
190 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
191
192 /* Extended SSE Registers 8 - 15. */
193 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
194 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
195 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
196 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
197
198 /* Floating Point Registers 0-7. */
199 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
200 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
201 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
202 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
203
204 /* Control and Status Flags Register. */
205 AMD64_EFLAGS_REGNUM,
206
207 /* Selector Registers. */
208 AMD64_ES_REGNUM,
209 AMD64_CS_REGNUM,
210 AMD64_SS_REGNUM,
211 AMD64_DS_REGNUM,
212 AMD64_FS_REGNUM,
213 AMD64_GS_REGNUM,
214 -1,
215 -1,
216
217 /* Segment Base Address Registers. */
218 -1,
219 -1,
220 -1,
221 -1,
222
223 /* Special Selector Registers. */
224 -1,
225 -1,
226
227 /* Floating Point Control Registers. */
228 AMD64_MXCSR_REGNUM,
229 AMD64_FCTRL_REGNUM,
230 AMD64_FSTAT_REGNUM
231 };
232
233 static const int amd64_dwarf_regmap_len =
234 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
235
236 /* Convert DWARF register number REG to the appropriate register
237 number used by GDB. */
238
239 static int
240 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
241 {
242 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
243 int ymm0_regnum = tdep->ymm0_regnum;
244 int regnum = -1;
245
246 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
247 regnum = amd64_dwarf_regmap[reg];
248
249 if (regnum == -1)
250 warning (_("Unmapped DWARF Register #%d encountered."), reg);
251 else if (ymm0_regnum >= 0
252 && i386_xmm_regnum_p (gdbarch, regnum))
253 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
254
255 return regnum;
256 }
257
258 /* Map architectural register numbers to gdb register numbers. */
259
260 static const int amd64_arch_regmap[16] =
261 {
262 AMD64_RAX_REGNUM, /* %rax */
263 AMD64_RCX_REGNUM, /* %rcx */
264 AMD64_RDX_REGNUM, /* %rdx */
265 AMD64_RBX_REGNUM, /* %rbx */
266 AMD64_RSP_REGNUM, /* %rsp */
267 AMD64_RBP_REGNUM, /* %rbp */
268 AMD64_RSI_REGNUM, /* %rsi */
269 AMD64_RDI_REGNUM, /* %rdi */
270 AMD64_R8_REGNUM, /* %r8 */
271 AMD64_R9_REGNUM, /* %r9 */
272 AMD64_R10_REGNUM, /* %r10 */
273 AMD64_R11_REGNUM, /* %r11 */
274 AMD64_R12_REGNUM, /* %r12 */
275 AMD64_R13_REGNUM, /* %r13 */
276 AMD64_R14_REGNUM, /* %r14 */
277 AMD64_R15_REGNUM /* %r15 */
278 };
279
280 static const int amd64_arch_regmap_len =
281 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
282
283 /* Convert architectural register number REG to the appropriate register
284 number used by GDB. */
285
286 static int
287 amd64_arch_reg_to_regnum (int reg)
288 {
289 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
290
291 return amd64_arch_regmap[reg];
292 }
293
294 /* Register names for byte pseudo-registers. */
295
296 static const char *amd64_byte_names[] =
297 {
298 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
299 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
300 "ah", "bh", "ch", "dh"
301 };
302
303 /* Number of lower byte registers. */
304 #define AMD64_NUM_LOWER_BYTE_REGS 16
305
306 /* Register names for word pseudo-registers. */
307
308 static const char *amd64_word_names[] =
309 {
310 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
311 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
312 };
313
314 /* Register names for dword pseudo-registers. */
315
316 static const char *amd64_dword_names[] =
317 {
318 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
319 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
320 "eip"
321 };
322
323 /* Return the name of register REGNUM. */
324
325 static const char *
326 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
327 {
328 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
329 if (i386_byte_regnum_p (gdbarch, regnum))
330 return amd64_byte_names[regnum - tdep->al_regnum];
331 else if (i386_zmm_regnum_p (gdbarch, regnum))
332 return amd64_zmm_names[regnum - tdep->zmm0_regnum];
333 else if (i386_ymm_regnum_p (gdbarch, regnum))
334 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
335 else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
336 return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
337 else if (i386_word_regnum_p (gdbarch, regnum))
338 return amd64_word_names[regnum - tdep->ax_regnum];
339 else if (i386_dword_regnum_p (gdbarch, regnum))
340 return amd64_dword_names[regnum - tdep->eax_regnum];
341 else
342 return i386_pseudo_register_name (gdbarch, regnum);
343 }
344
345 static struct value *
346 amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
347 struct regcache *regcache,
348 int regnum)
349 {
350 gdb_byte raw_buf[MAX_REGISTER_SIZE];
351 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
352 enum register_status status;
353 struct value *result_value;
354 gdb_byte *buf;
355
356 result_value = allocate_value (register_type (gdbarch, regnum));
357 VALUE_LVAL (result_value) = lval_register;
358 VALUE_REGNUM (result_value) = regnum;
359 buf = value_contents_raw (result_value);
360
361 if (i386_byte_regnum_p (gdbarch, regnum))
362 {
363 int gpnum = regnum - tdep->al_regnum;
364
365 /* Extract (always little endian). */
366 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
367 {
368 /* Special handling for AH, BH, CH, DH. */
369 status = regcache_raw_read (regcache,
370 gpnum - AMD64_NUM_LOWER_BYTE_REGS,
371 raw_buf);
372 if (status == REG_VALID)
373 memcpy (buf, raw_buf + 1, 1);
374 else
375 mark_value_bytes_unavailable (result_value, 0,
376 TYPE_LENGTH (value_type (result_value)));
377 }
378 else
379 {
380 status = regcache_raw_read (regcache, gpnum, raw_buf);
381 if (status == REG_VALID)
382 memcpy (buf, raw_buf, 1);
383 else
384 mark_value_bytes_unavailable (result_value, 0,
385 TYPE_LENGTH (value_type (result_value)));
386 }
387 }
388 else if (i386_dword_regnum_p (gdbarch, regnum))
389 {
390 int gpnum = regnum - tdep->eax_regnum;
391 /* Extract (always little endian). */
392 status = regcache_raw_read (regcache, gpnum, raw_buf);
393 if (status == REG_VALID)
394 memcpy (buf, raw_buf, 4);
395 else
396 mark_value_bytes_unavailable (result_value, 0,
397 TYPE_LENGTH (value_type (result_value)));
398 }
399 else
400 i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
401 result_value);
402
403 return result_value;
404 }
405
406 static void
407 amd64_pseudo_register_write (struct gdbarch *gdbarch,
408 struct regcache *regcache,
409 int regnum, const gdb_byte *buf)
410 {
411 gdb_byte raw_buf[MAX_REGISTER_SIZE];
412 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
413
414 if (i386_byte_regnum_p (gdbarch, regnum))
415 {
416 int gpnum = regnum - tdep->al_regnum;
417
418 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
419 {
420 /* Read ... AH, BH, CH, DH. */
421 regcache_raw_read (regcache,
422 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
423 /* ... Modify ... (always little endian). */
424 memcpy (raw_buf + 1, buf, 1);
425 /* ... Write. */
426 regcache_raw_write (regcache,
427 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
428 }
429 else
430 {
431 /* Read ... */
432 regcache_raw_read (regcache, gpnum, raw_buf);
433 /* ... Modify ... (always little endian). */
434 memcpy (raw_buf, buf, 1);
435 /* ... Write. */
436 regcache_raw_write (regcache, gpnum, raw_buf);
437 }
438 }
439 else if (i386_dword_regnum_p (gdbarch, regnum))
440 {
441 int gpnum = regnum - tdep->eax_regnum;
442
443 /* Read ... */
444 regcache_raw_read (regcache, gpnum, raw_buf);
445 /* ... Modify ... (always little endian). */
446 memcpy (raw_buf, buf, 4);
447 /* ... Write. */
448 regcache_raw_write (regcache, gpnum, raw_buf);
449 }
450 else
451 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
452 }
453
454 \f
455
456 /* Register classes as defined in the psABI. */
457
458 enum amd64_reg_class
459 {
460 AMD64_INTEGER,
461 AMD64_SSE,
462 AMD64_SSEUP,
463 AMD64_X87,
464 AMD64_X87UP,
465 AMD64_COMPLEX_X87,
466 AMD64_NO_CLASS,
467 AMD64_MEMORY
468 };
469
470 /* Return the union class of CLASS1 and CLASS2. See the psABI for
471 details. */
472
473 static enum amd64_reg_class
474 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
475 {
476 /* Rule (a): If both classes are equal, this is the resulting class. */
477 if (class1 == class2)
478 return class1;
479
480 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
481 is the other class. */
482 if (class1 == AMD64_NO_CLASS)
483 return class2;
484 if (class2 == AMD64_NO_CLASS)
485 return class1;
486
487 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
488 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
489 return AMD64_MEMORY;
490
491 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
492 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
493 return AMD64_INTEGER;
494
495 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
496 MEMORY is used as class. */
497 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
498 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
499 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
500 return AMD64_MEMORY;
501
502 /* Rule (f): Otherwise class SSE is used. */
503 return AMD64_SSE;
504 }
505
506 static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
507
508 /* Return non-zero if TYPE is a non-POD structure or union type. */
509
510 static int
511 amd64_non_pod_p (struct type *type)
512 {
513 /* ??? A class with a base class certainly isn't POD, but does this
514 catch all non-POD structure types? */
515 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
516 return 1;
517
518 return 0;
519 }
520
521 /* Classify TYPE according to the rules for aggregate (structures and
522 arrays) and union types, and store the result in CLASS. */
523
524 static void
525 amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
526 {
527 /* 1. If the size of an object is larger than two eightbytes, or in
528 C++, is a non-POD structure or union type, or contains
529 unaligned fields, it has class memory. */
530 if (TYPE_LENGTH (type) > 16 || amd64_non_pod_p (type))
531 {
532 class[0] = class[1] = AMD64_MEMORY;
533 return;
534 }
535
536 /* 2. Both eightbytes get initialized to class NO_CLASS. */
537 class[0] = class[1] = AMD64_NO_CLASS;
538
539 /* 3. Each field of an object is classified recursively so that
540 always two fields are considered. The resulting class is
541 calculated according to the classes of the fields in the
542 eightbyte: */
543
544 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
545 {
546 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
547
548 /* All fields in an array have the same type. */
549 amd64_classify (subtype, class);
550 if (TYPE_LENGTH (type) > 8 && class[1] == AMD64_NO_CLASS)
551 class[1] = class[0];
552 }
553 else
554 {
555 int i;
556
557 /* Structure or union. */
558 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
559 || TYPE_CODE (type) == TYPE_CODE_UNION);
560
561 for (i = 0; i < TYPE_NFIELDS (type); i++)
562 {
563 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
564 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
565 enum amd64_reg_class subclass[2];
566 int bitsize = TYPE_FIELD_BITSIZE (type, i);
567 int endpos;
568
569 if (bitsize == 0)
570 bitsize = TYPE_LENGTH (subtype) * 8;
571 endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
572
573 /* Ignore static fields. */
574 if (field_is_static (&TYPE_FIELD (type, i)))
575 continue;
576
577 gdb_assert (pos == 0 || pos == 1);
578
579 amd64_classify (subtype, subclass);
580 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
581 if (bitsize <= 64 && pos == 0 && endpos == 1)
582 /* This is a bit of an odd case: We have a field that would
583 normally fit in one of the two eightbytes, except that
584 it is placed in a way that this field straddles them.
585 This has been seen with a structure containing an array.
586
587 The ABI is a bit unclear in this case, but we assume that
588 this field's class (stored in subclass[0]) must also be merged
589 into class[1]. In other words, our field has a piece stored
590 in the second eight-byte, and thus its class applies to
591 the second eight-byte as well.
592
593 In the case where the field length exceeds 8 bytes,
594 it should not be necessary to merge the field class
595 into class[1]. As LEN > 8, subclass[1] is necessarily
596 different from AMD64_NO_CLASS. If subclass[1] is equal
597 to subclass[0], then the normal class[1]/subclass[1]
598 merging will take care of everything. For subclass[1]
599 to be different from subclass[0], I can only see the case
600 where we have a SSE/SSEUP or X87/X87UP pair, which both
601 use up all 16 bytes of the aggregate, and are already
602 handled just fine (because each portion sits on its own
603 8-byte). */
604 class[1] = amd64_merge_classes (class[1], subclass[0]);
605 if (pos == 0)
606 class[1] = amd64_merge_classes (class[1], subclass[1]);
607 }
608 }
609
610 /* 4. Then a post merger cleanup is done: */
611
612 /* Rule (a): If one of the classes is MEMORY, the whole argument is
613 passed in memory. */
614 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
615 class[0] = class[1] = AMD64_MEMORY;
616
617 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
618 SSE. */
619 if (class[0] == AMD64_SSEUP)
620 class[0] = AMD64_SSE;
621 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
622 class[1] = AMD64_SSE;
623 }
624
625 /* Classify TYPE, and store the result in CLASS. */
626
627 static void
628 amd64_classify (struct type *type, enum amd64_reg_class class[2])
629 {
630 enum type_code code = TYPE_CODE (type);
631 int len = TYPE_LENGTH (type);
632
633 class[0] = class[1] = AMD64_NO_CLASS;
634
635 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
636 long, long long, and pointers are in the INTEGER class. Similarly,
637 range types, used by languages such as Ada, are also in the INTEGER
638 class. */
639 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
640 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
641 || code == TYPE_CODE_CHAR
642 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
643 && (len == 1 || len == 2 || len == 4 || len == 8))
644 class[0] = AMD64_INTEGER;
645
646 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
647 are in class SSE. */
648 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
649 && (len == 4 || len == 8))
650 /* FIXME: __m64 . */
651 class[0] = AMD64_SSE;
652
653 /* Arguments of types __float128, _Decimal128 and __m128 are split into
654 two halves. The least significant ones belong to class SSE, the most
655 significant one to class SSEUP. */
656 else if (code == TYPE_CODE_DECFLOAT && len == 16)
657 /* FIXME: __float128, __m128. */
658 class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
659
660 /* The 64-bit mantissa of arguments of type long double belongs to
661 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
662 class X87UP. */
663 else if (code == TYPE_CODE_FLT && len == 16)
664 /* Class X87 and X87UP. */
665 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
666
667 /* Arguments of complex T where T is one of the types float or
668 double get treated as if they are implemented as:
669
670 struct complexT {
671 T real;
672 T imag;
673 }; */
674 else if (code == TYPE_CODE_COMPLEX && len == 8)
675 class[0] = AMD64_SSE;
676 else if (code == TYPE_CODE_COMPLEX && len == 16)
677 class[0] = class[1] = AMD64_SSE;
678
679 /* A variable of type complex long double is classified as type
680 COMPLEX_X87. */
681 else if (code == TYPE_CODE_COMPLEX && len == 32)
682 class[0] = AMD64_COMPLEX_X87;
683
684 /* Aggregates. */
685 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
686 || code == TYPE_CODE_UNION)
687 amd64_classify_aggregate (type, class);
688 }
689
690 static enum return_value_convention
691 amd64_return_value (struct gdbarch *gdbarch, struct value *function,
692 struct type *type, struct regcache *regcache,
693 gdb_byte *readbuf, const gdb_byte *writebuf)
694 {
695 enum amd64_reg_class class[2];
696 int len = TYPE_LENGTH (type);
697 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
698 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
699 int integer_reg = 0;
700 int sse_reg = 0;
701 int i;
702
703 gdb_assert (!(readbuf && writebuf));
704
705 /* 1. Classify the return type with the classification algorithm. */
706 amd64_classify (type, class);
707
708 /* 2. If the type has class MEMORY, then the caller provides space
709 for the return value and passes the address of this storage in
710 %rdi as if it were the first argument to the function. In effect,
711 this address becomes a hidden first argument.
712
713 On return %rax will contain the address that has been passed in
714 by the caller in %rdi. */
715 if (class[0] == AMD64_MEMORY)
716 {
717 /* As indicated by the comment above, the ABI guarantees that we
718 can always find the return value just after the function has
719 returned. */
720
721 if (readbuf)
722 {
723 ULONGEST addr;
724
725 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
726 read_memory (addr, readbuf, TYPE_LENGTH (type));
727 }
728
729 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
730 }
731
732 /* 8. If the class is COMPLEX_X87, the real part of the value is
733 returned in %st0 and the imaginary part in %st1. */
734 if (class[0] == AMD64_COMPLEX_X87)
735 {
736 if (readbuf)
737 {
738 regcache_raw_read (regcache, AMD64_ST0_REGNUM, readbuf);
739 regcache_raw_read (regcache, AMD64_ST1_REGNUM, readbuf + 16);
740 }
741
742 if (writebuf)
743 {
744 i387_return_value (gdbarch, regcache);
745 regcache_raw_write (regcache, AMD64_ST0_REGNUM, writebuf);
746 regcache_raw_write (regcache, AMD64_ST1_REGNUM, writebuf + 16);
747
748 /* Fix up the tag word such that both %st(0) and %st(1) are
749 marked as valid. */
750 regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
751 }
752
753 return RETURN_VALUE_REGISTER_CONVENTION;
754 }
755
756 gdb_assert (class[1] != AMD64_MEMORY);
757 gdb_assert (len <= 16);
758
759 for (i = 0; len > 0; i++, len -= 8)
760 {
761 int regnum = -1;
762 int offset = 0;
763
764 switch (class[i])
765 {
766 case AMD64_INTEGER:
767 /* 3. If the class is INTEGER, the next available register
768 of the sequence %rax, %rdx is used. */
769 regnum = integer_regnum[integer_reg++];
770 break;
771
772 case AMD64_SSE:
773 /* 4. If the class is SSE, the next available SSE register
774 of the sequence %xmm0, %xmm1 is used. */
775 regnum = sse_regnum[sse_reg++];
776 break;
777
778 case AMD64_SSEUP:
779 /* 5. If the class is SSEUP, the eightbyte is passed in the
780 upper half of the last used SSE register. */
781 gdb_assert (sse_reg > 0);
782 regnum = sse_regnum[sse_reg - 1];
783 offset = 8;
784 break;
785
786 case AMD64_X87:
787 /* 6. If the class is X87, the value is returned on the X87
788 stack in %st0 as 80-bit x87 number. */
789 regnum = AMD64_ST0_REGNUM;
790 if (writebuf)
791 i387_return_value (gdbarch, regcache);
792 break;
793
794 case AMD64_X87UP:
795 /* 7. If the class is X87UP, the value is returned together
796 with the previous X87 value in %st0. */
797 gdb_assert (i > 0 && class[0] == AMD64_X87);
798 regnum = AMD64_ST0_REGNUM;
799 offset = 8;
800 len = 2;
801 break;
802
803 case AMD64_NO_CLASS:
804 continue;
805
806 default:
807 gdb_assert (!"Unexpected register class.");
808 }
809
810 gdb_assert (regnum != -1);
811
812 if (readbuf)
813 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
814 readbuf + i * 8);
815 if (writebuf)
816 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
817 writebuf + i * 8);
818 }
819
820 return RETURN_VALUE_REGISTER_CONVENTION;
821 }
822 \f
823
824 static CORE_ADDR
825 amd64_push_arguments (struct regcache *regcache, int nargs,
826 struct value **args, CORE_ADDR sp, int struct_return)
827 {
828 static int integer_regnum[] =
829 {
830 AMD64_RDI_REGNUM, /* %rdi */
831 AMD64_RSI_REGNUM, /* %rsi */
832 AMD64_RDX_REGNUM, /* %rdx */
833 AMD64_RCX_REGNUM, /* %rcx */
834 AMD64_R8_REGNUM, /* %r8 */
835 AMD64_R9_REGNUM /* %r9 */
836 };
837 static int sse_regnum[] =
838 {
839 /* %xmm0 ... %xmm7 */
840 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
841 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
842 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
843 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
844 };
845 struct value **stack_args = alloca (nargs * sizeof (struct value *));
846 int num_stack_args = 0;
847 int num_elements = 0;
848 int element = 0;
849 int integer_reg = 0;
850 int sse_reg = 0;
851 int i;
852
853 /* Reserve a register for the "hidden" argument. */
854 if (struct_return)
855 integer_reg++;
856
857 for (i = 0; i < nargs; i++)
858 {
859 struct type *type = value_type (args[i]);
860 int len = TYPE_LENGTH (type);
861 enum amd64_reg_class class[2];
862 int needed_integer_regs = 0;
863 int needed_sse_regs = 0;
864 int j;
865
866 /* Classify argument. */
867 amd64_classify (type, class);
868
869 /* Calculate the number of integer and SSE registers needed for
870 this argument. */
871 for (j = 0; j < 2; j++)
872 {
873 if (class[j] == AMD64_INTEGER)
874 needed_integer_regs++;
875 else if (class[j] == AMD64_SSE)
876 needed_sse_regs++;
877 }
878
879 /* Check whether enough registers are available, and if the
880 argument should be passed in registers at all. */
881 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
882 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
883 || (needed_integer_regs == 0 && needed_sse_regs == 0))
884 {
885 /* The argument will be passed on the stack. */
886 num_elements += ((len + 7) / 8);
887 stack_args[num_stack_args++] = args[i];
888 }
889 else
890 {
891 /* The argument will be passed in registers. */
892 const gdb_byte *valbuf = value_contents (args[i]);
893 gdb_byte buf[8];
894
895 gdb_assert (len <= 16);
896
897 for (j = 0; len > 0; j++, len -= 8)
898 {
899 int regnum = -1;
900 int offset = 0;
901
902 switch (class[j])
903 {
904 case AMD64_INTEGER:
905 regnum = integer_regnum[integer_reg++];
906 break;
907
908 case AMD64_SSE:
909 regnum = sse_regnum[sse_reg++];
910 break;
911
912 case AMD64_SSEUP:
913 gdb_assert (sse_reg > 0);
914 regnum = sse_regnum[sse_reg - 1];
915 offset = 8;
916 break;
917
918 default:
919 gdb_assert (!"Unexpected register class.");
920 }
921
922 gdb_assert (regnum != -1);
923 memset (buf, 0, sizeof buf);
924 memcpy (buf, valbuf + j * 8, min (len, 8));
925 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
926 }
927 }
928 }
929
930 /* Allocate space for the arguments on the stack. */
931 sp -= num_elements * 8;
932
933 /* The psABI says that "The end of the input argument area shall be
934 aligned on a 16 byte boundary." */
935 sp &= ~0xf;
936
937 /* Write out the arguments to the stack. */
938 for (i = 0; i < num_stack_args; i++)
939 {
940 struct type *type = value_type (stack_args[i]);
941 const gdb_byte *valbuf = value_contents (stack_args[i]);
942 int len = TYPE_LENGTH (type);
943
944 write_memory (sp + element * 8, valbuf, len);
945 element += ((len + 7) / 8);
946 }
947
948 /* The psABI says that "For calls that may call functions that use
949 varargs or stdargs (prototype-less calls or calls to functions
950 containing ellipsis (...) in the declaration) %al is used as
951 hidden argument to specify the number of SSE registers used. */
952 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
953 return sp;
954 }
955
956 static CORE_ADDR
957 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
958 struct regcache *regcache, CORE_ADDR bp_addr,
959 int nargs, struct value **args, CORE_ADDR sp,
960 int struct_return, CORE_ADDR struct_addr)
961 {
962 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
963 gdb_byte buf[8];
964
965 /* Pass arguments. */
966 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
967
968 /* Pass "hidden" argument". */
969 if (struct_return)
970 {
971 store_unsigned_integer (buf, 8, byte_order, struct_addr);
972 regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
973 }
974
975 /* Store return address. */
976 sp -= 8;
977 store_unsigned_integer (buf, 8, byte_order, bp_addr);
978 write_memory (sp, buf, 8);
979
980 /* Finally, update the stack pointer... */
981 store_unsigned_integer (buf, 8, byte_order, sp);
982 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
983
984 /* ...and fake a frame pointer. */
985 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
986
987 return sp + 16;
988 }
989 \f
990 /* Displaced instruction handling. */
991
992 /* A partially decoded instruction.
993 This contains enough details for displaced stepping purposes. */
994
995 struct amd64_insn
996 {
997 /* The number of opcode bytes. */
998 int opcode_len;
999 /* The offset of the rex prefix or -1 if not present. */
1000 int rex_offset;
1001 /* The offset to the first opcode byte. */
1002 int opcode_offset;
1003 /* The offset to the modrm byte or -1 if not present. */
1004 int modrm_offset;
1005
1006 /* The raw instruction. */
1007 gdb_byte *raw_insn;
1008 };
1009
1010 struct displaced_step_closure
1011 {
1012 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1013 int tmp_used;
1014 int tmp_regno;
1015 ULONGEST tmp_save;
1016
1017 /* Details of the instruction. */
1018 struct amd64_insn insn_details;
1019
1020 /* Amount of space allocated to insn_buf. */
1021 int max_len;
1022
1023 /* The possibly modified insn.
1024 This is a variable-length field. */
1025 gdb_byte insn_buf[1];
1026 };
1027
1028 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1029 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1030 at which point delete these in favor of libopcodes' versions). */
1031
1032 static const unsigned char onebyte_has_modrm[256] = {
1033 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1034 /* ------------------------------- */
1035 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1036 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1037 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1038 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1039 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1040 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1041 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1042 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1043 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1044 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1045 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1046 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1047 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1048 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1049 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1050 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1051 /* ------------------------------- */
1052 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1053 };
1054
1055 static const unsigned char twobyte_has_modrm[256] = {
1056 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1057 /* ------------------------------- */
1058 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1059 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1060 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1061 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1062 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1063 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1064 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1065 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1066 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1067 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1068 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1069 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1070 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1071 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1072 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1073 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1074 /* ------------------------------- */
1075 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1076 };
1077
1078 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
1079
1080 static int
1081 rex_prefix_p (gdb_byte pfx)
1082 {
1083 return REX_PREFIX_P (pfx);
1084 }
1085
1086 /* Skip the legacy instruction prefixes in INSN.
1087 We assume INSN is properly sentineled so we don't have to worry
1088 about falling off the end of the buffer. */
1089
1090 static gdb_byte *
1091 amd64_skip_prefixes (gdb_byte *insn)
1092 {
1093 while (1)
1094 {
1095 switch (*insn)
1096 {
1097 case DATA_PREFIX_OPCODE:
1098 case ADDR_PREFIX_OPCODE:
1099 case CS_PREFIX_OPCODE:
1100 case DS_PREFIX_OPCODE:
1101 case ES_PREFIX_OPCODE:
1102 case FS_PREFIX_OPCODE:
1103 case GS_PREFIX_OPCODE:
1104 case SS_PREFIX_OPCODE:
1105 case LOCK_PREFIX_OPCODE:
1106 case REPE_PREFIX_OPCODE:
1107 case REPNE_PREFIX_OPCODE:
1108 ++insn;
1109 continue;
1110 default:
1111 break;
1112 }
1113 break;
1114 }
1115
1116 return insn;
1117 }
1118
1119 /* Return an integer register (other than RSP) that is unused as an input
1120 operand in INSN.
1121 In order to not require adding a rex prefix if the insn doesn't already
1122 have one, the result is restricted to RAX ... RDI, sans RSP.
1123 The register numbering of the result follows architecture ordering,
1124 e.g. RDI = 7. */
1125
1126 static int
1127 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1128 {
1129 /* 1 bit for each reg */
1130 int used_regs_mask = 0;
1131
1132 /* There can be at most 3 int regs used as inputs in an insn, and we have
1133 7 to choose from (RAX ... RDI, sans RSP).
1134 This allows us to take a conservative approach and keep things simple.
1135 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1136 that implicitly specify RAX. */
1137
1138 /* Avoid RAX. */
1139 used_regs_mask |= 1 << EAX_REG_NUM;
1140 /* Similarily avoid RDX, implicit operand in divides. */
1141 used_regs_mask |= 1 << EDX_REG_NUM;
1142 /* Avoid RSP. */
1143 used_regs_mask |= 1 << ESP_REG_NUM;
1144
1145 /* If the opcode is one byte long and there's no ModRM byte,
1146 assume the opcode specifies a register. */
1147 if (details->opcode_len == 1 && details->modrm_offset == -1)
1148 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1149
1150 /* Mark used regs in the modrm/sib bytes. */
1151 if (details->modrm_offset != -1)
1152 {
1153 int modrm = details->raw_insn[details->modrm_offset];
1154 int mod = MODRM_MOD_FIELD (modrm);
1155 int reg = MODRM_REG_FIELD (modrm);
1156 int rm = MODRM_RM_FIELD (modrm);
1157 int have_sib = mod != 3 && rm == 4;
1158
1159 /* Assume the reg field of the modrm byte specifies a register. */
1160 used_regs_mask |= 1 << reg;
1161
1162 if (have_sib)
1163 {
1164 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1165 int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1166 used_regs_mask |= 1 << base;
1167 used_regs_mask |= 1 << idx;
1168 }
1169 else
1170 {
1171 used_regs_mask |= 1 << rm;
1172 }
1173 }
1174
1175 gdb_assert (used_regs_mask < 256);
1176 gdb_assert (used_regs_mask != 255);
1177
1178 /* Finally, find a free reg. */
1179 {
1180 int i;
1181
1182 for (i = 0; i < 8; ++i)
1183 {
1184 if (! (used_regs_mask & (1 << i)))
1185 return i;
1186 }
1187
1188 /* We shouldn't get here. */
1189 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1190 }
1191 }
1192
1193 /* Extract the details of INSN that we need. */
1194
1195 static void
1196 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1197 {
1198 gdb_byte *start = insn;
1199 int need_modrm;
1200
1201 details->raw_insn = insn;
1202
1203 details->opcode_len = -1;
1204 details->rex_offset = -1;
1205 details->opcode_offset = -1;
1206 details->modrm_offset = -1;
1207
1208 /* Skip legacy instruction prefixes. */
1209 insn = amd64_skip_prefixes (insn);
1210
1211 /* Skip REX instruction prefix. */
1212 if (rex_prefix_p (*insn))
1213 {
1214 details->rex_offset = insn - start;
1215 ++insn;
1216 }
1217
1218 details->opcode_offset = insn - start;
1219
1220 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1221 {
1222 /* Two or three-byte opcode. */
1223 ++insn;
1224 need_modrm = twobyte_has_modrm[*insn];
1225
1226 /* Check for three-byte opcode. */
1227 switch (*insn)
1228 {
1229 case 0x24:
1230 case 0x25:
1231 case 0x38:
1232 case 0x3a:
1233 case 0x7a:
1234 case 0x7b:
1235 ++insn;
1236 details->opcode_len = 3;
1237 break;
1238 default:
1239 details->opcode_len = 2;
1240 break;
1241 }
1242 }
1243 else
1244 {
1245 /* One-byte opcode. */
1246 need_modrm = onebyte_has_modrm[*insn];
1247 details->opcode_len = 1;
1248 }
1249
1250 if (need_modrm)
1251 {
1252 ++insn;
1253 details->modrm_offset = insn - start;
1254 }
1255 }
1256
1257 /* Update %rip-relative addressing in INSN.
1258
1259 %rip-relative addressing only uses a 32-bit displacement.
1260 32 bits is not enough to be guaranteed to cover the distance between where
1261 the real instruction is and where its copy is.
1262 Convert the insn to use base+disp addressing.
1263 We set base = pc + insn_length so we can leave disp unchanged. */
1264
1265 static void
1266 fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1267 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1268 {
1269 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1270 const struct amd64_insn *insn_details = &dsc->insn_details;
1271 int modrm_offset = insn_details->modrm_offset;
1272 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1273 CORE_ADDR rip_base;
1274 int32_t disp;
1275 int insn_length;
1276 int arch_tmp_regno, tmp_regno;
1277 ULONGEST orig_value;
1278
1279 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1280 ++insn;
1281
1282 /* Compute the rip-relative address. */
1283 disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
1284 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf,
1285 dsc->max_len, from);
1286 rip_base = from + insn_length;
1287
1288 /* We need a register to hold the address.
1289 Pick one not used in the insn.
1290 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1291 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1292 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1293
1294 /* REX.B should be unset as we were using rip-relative addressing,
1295 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1296 if (insn_details->rex_offset != -1)
1297 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1298
1299 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1300 dsc->tmp_regno = tmp_regno;
1301 dsc->tmp_save = orig_value;
1302 dsc->tmp_used = 1;
1303
1304 /* Convert the ModRM field to be base+disp. */
1305 dsc->insn_buf[modrm_offset] &= ~0xc7;
1306 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1307
1308 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1309
1310 if (debug_displaced)
1311 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1312 "displaced: using temp reg %d, old value %s, new value %s\n",
1313 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1314 paddress (gdbarch, rip_base));
1315 }
1316
1317 static void
1318 fixup_displaced_copy (struct gdbarch *gdbarch,
1319 struct displaced_step_closure *dsc,
1320 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1321 {
1322 const struct amd64_insn *details = &dsc->insn_details;
1323
1324 if (details->modrm_offset != -1)
1325 {
1326 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1327
1328 if ((modrm & 0xc7) == 0x05)
1329 {
1330 /* The insn uses rip-relative addressing.
1331 Deal with it. */
1332 fixup_riprel (gdbarch, dsc, from, to, regs);
1333 }
1334 }
1335 }
1336
1337 struct displaced_step_closure *
1338 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1339 CORE_ADDR from, CORE_ADDR to,
1340 struct regcache *regs)
1341 {
1342 int len = gdbarch_max_insn_length (gdbarch);
1343 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
1344 continually watch for running off the end of the buffer. */
1345 int fixup_sentinel_space = len;
1346 struct displaced_step_closure *dsc =
1347 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1348 gdb_byte *buf = &dsc->insn_buf[0];
1349 struct amd64_insn *details = &dsc->insn_details;
1350
1351 dsc->tmp_used = 0;
1352 dsc->max_len = len + fixup_sentinel_space;
1353
1354 read_memory (from, buf, len);
1355
1356 /* Set up the sentinel space so we don't have to worry about running
1357 off the end of the buffer. An excessive number of leading prefixes
1358 could otherwise cause this. */
1359 memset (buf + len, 0, fixup_sentinel_space);
1360
1361 amd64_get_insn_details (buf, details);
1362
1363 /* GDB may get control back after the insn after the syscall.
1364 Presumably this is a kernel bug.
1365 If this is a syscall, make sure there's a nop afterwards. */
1366 {
1367 int syscall_length;
1368
1369 if (amd64_syscall_p (details, &syscall_length))
1370 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1371 }
1372
1373 /* Modify the insn to cope with the address where it will be executed from.
1374 In particular, handle any rip-relative addressing. */
1375 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1376
1377 write_memory (to, buf, len);
1378
1379 if (debug_displaced)
1380 {
1381 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1382 paddress (gdbarch, from), paddress (gdbarch, to));
1383 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1384 }
1385
1386 return dsc;
1387 }
1388
1389 static int
1390 amd64_absolute_jmp_p (const struct amd64_insn *details)
1391 {
1392 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1393
1394 if (insn[0] == 0xff)
1395 {
1396 /* jump near, absolute indirect (/4) */
1397 if ((insn[1] & 0x38) == 0x20)
1398 return 1;
1399
1400 /* jump far, absolute indirect (/5) */
1401 if ((insn[1] & 0x38) == 0x28)
1402 return 1;
1403 }
1404
1405 return 0;
1406 }
1407
1408 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1409
1410 static int
1411 amd64_jmp_p (const struct amd64_insn *details)
1412 {
1413 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1414
1415 /* jump short, relative. */
1416 if (insn[0] == 0xeb)
1417 return 1;
1418
1419 /* jump near, relative. */
1420 if (insn[0] == 0xe9)
1421 return 1;
1422
1423 return amd64_absolute_jmp_p (details);
1424 }
1425
1426 static int
1427 amd64_absolute_call_p (const struct amd64_insn *details)
1428 {
1429 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1430
1431 if (insn[0] == 0xff)
1432 {
1433 /* Call near, absolute indirect (/2) */
1434 if ((insn[1] & 0x38) == 0x10)
1435 return 1;
1436
1437 /* Call far, absolute indirect (/3) */
1438 if ((insn[1] & 0x38) == 0x18)
1439 return 1;
1440 }
1441
1442 return 0;
1443 }
1444
1445 static int
1446 amd64_ret_p (const struct amd64_insn *details)
1447 {
1448 /* NOTE: gcc can emit "repz ; ret". */
1449 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1450
1451 switch (insn[0])
1452 {
1453 case 0xc2: /* ret near, pop N bytes */
1454 case 0xc3: /* ret near */
1455 case 0xca: /* ret far, pop N bytes */
1456 case 0xcb: /* ret far */
1457 case 0xcf: /* iret */
1458 return 1;
1459
1460 default:
1461 return 0;
1462 }
1463 }
1464
1465 static int
1466 amd64_call_p (const struct amd64_insn *details)
1467 {
1468 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1469
1470 if (amd64_absolute_call_p (details))
1471 return 1;
1472
1473 /* call near, relative */
1474 if (insn[0] == 0xe8)
1475 return 1;
1476
1477 return 0;
1478 }
1479
1480 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1481 length in bytes. Otherwise, return zero. */
1482
1483 static int
1484 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1485 {
1486 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1487
1488 if (insn[0] == 0x0f && insn[1] == 0x05)
1489 {
1490 *lengthp = 2;
1491 return 1;
1492 }
1493
1494 return 0;
1495 }
1496
1497 /* Classify the instruction at ADDR using PRED.
1498 Throw an error if the memory can't be read. */
1499
1500 static int
1501 amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
1502 int (*pred) (const struct amd64_insn *))
1503 {
1504 struct amd64_insn details;
1505 gdb_byte *buf;
1506 int len, classification;
1507
1508 len = gdbarch_max_insn_length (gdbarch);
1509 buf = alloca (len);
1510
1511 read_code (addr, buf, len);
1512 amd64_get_insn_details (buf, &details);
1513
1514 classification = pred (&details);
1515
1516 return classification;
1517 }
1518
1519 /* The gdbarch insn_is_call method. */
1520
1521 static int
1522 amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
1523 {
1524 return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
1525 }
1526
1527 /* The gdbarch insn_is_ret method. */
1528
1529 static int
1530 amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
1531 {
1532 return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
1533 }
1534
1535 /* The gdbarch insn_is_jump method. */
1536
1537 static int
1538 amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
1539 {
1540 return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
1541 }
1542
1543 /* Fix up the state of registers and memory after having single-stepped
1544 a displaced instruction. */
1545
1546 void
1547 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1548 struct displaced_step_closure *dsc,
1549 CORE_ADDR from, CORE_ADDR to,
1550 struct regcache *regs)
1551 {
1552 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1553 /* The offset we applied to the instruction's address. */
1554 ULONGEST insn_offset = to - from;
1555 gdb_byte *insn = dsc->insn_buf;
1556 const struct amd64_insn *insn_details = &dsc->insn_details;
1557
1558 if (debug_displaced)
1559 fprintf_unfiltered (gdb_stdlog,
1560 "displaced: fixup (%s, %s), "
1561 "insn = 0x%02x 0x%02x ...\n",
1562 paddress (gdbarch, from), paddress (gdbarch, to),
1563 insn[0], insn[1]);
1564
1565 /* If we used a tmp reg, restore it. */
1566
1567 if (dsc->tmp_used)
1568 {
1569 if (debug_displaced)
1570 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1571 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1572 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1573 }
1574
1575 /* The list of issues to contend with here is taken from
1576 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1577 Yay for Free Software! */
1578
1579 /* Relocate the %rip back to the program's instruction stream,
1580 if necessary. */
1581
1582 /* Except in the case of absolute or indirect jump or call
1583 instructions, or a return instruction, the new rip is relative to
1584 the displaced instruction; make it relative to the original insn.
1585 Well, signal handler returns don't need relocation either, but we use the
1586 value of %rip to recognize those; see below. */
1587 if (! amd64_absolute_jmp_p (insn_details)
1588 && ! amd64_absolute_call_p (insn_details)
1589 && ! amd64_ret_p (insn_details))
1590 {
1591 ULONGEST orig_rip;
1592 int insn_len;
1593
1594 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1595
1596 /* A signal trampoline system call changes the %rip, resuming
1597 execution of the main program after the signal handler has
1598 returned. That makes them like 'return' instructions; we
1599 shouldn't relocate %rip.
1600
1601 But most system calls don't, and we do need to relocate %rip.
1602
1603 Our heuristic for distinguishing these cases: if stepping
1604 over the system call instruction left control directly after
1605 the instruction, the we relocate --- control almost certainly
1606 doesn't belong in the displaced copy. Otherwise, we assume
1607 the instruction has put control where it belongs, and leave
1608 it unrelocated. Goodness help us if there are PC-relative
1609 system calls. */
1610 if (amd64_syscall_p (insn_details, &insn_len)
1611 && orig_rip != to + insn_len
1612 /* GDB can get control back after the insn after the syscall.
1613 Presumably this is a kernel bug.
1614 Fixup ensures its a nop, we add one to the length for it. */
1615 && orig_rip != to + insn_len + 1)
1616 {
1617 if (debug_displaced)
1618 fprintf_unfiltered (gdb_stdlog,
1619 "displaced: syscall changed %%rip; "
1620 "not relocating\n");
1621 }
1622 else
1623 {
1624 ULONGEST rip = orig_rip - insn_offset;
1625
1626 /* If we just stepped over a breakpoint insn, we don't backup
1627 the pc on purpose; this is to match behaviour without
1628 stepping. */
1629
1630 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1631
1632 if (debug_displaced)
1633 fprintf_unfiltered (gdb_stdlog,
1634 "displaced: "
1635 "relocated %%rip from %s to %s\n",
1636 paddress (gdbarch, orig_rip),
1637 paddress (gdbarch, rip));
1638 }
1639 }
1640
1641 /* If the instruction was PUSHFL, then the TF bit will be set in the
1642 pushed value, and should be cleared. We'll leave this for later,
1643 since GDB already messes up the TF flag when stepping over a
1644 pushfl. */
1645
1646 /* If the instruction was a call, the return address now atop the
1647 stack is the address following the copied instruction. We need
1648 to make it the address following the original instruction. */
1649 if (amd64_call_p (insn_details))
1650 {
1651 ULONGEST rsp;
1652 ULONGEST retaddr;
1653 const ULONGEST retaddr_len = 8;
1654
1655 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1656 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1657 retaddr = (retaddr - insn_offset) & 0xffffffffUL;
1658 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1659
1660 if (debug_displaced)
1661 fprintf_unfiltered (gdb_stdlog,
1662 "displaced: relocated return addr at %s "
1663 "to %s\n",
1664 paddress (gdbarch, rsp),
1665 paddress (gdbarch, retaddr));
1666 }
1667 }
1668
1669 /* If the instruction INSN uses RIP-relative addressing, return the
1670 offset into the raw INSN where the displacement to be adjusted is
1671 found. Returns 0 if the instruction doesn't use RIP-relative
1672 addressing. */
1673
1674 static int
1675 rip_relative_offset (struct amd64_insn *insn)
1676 {
1677 if (insn->modrm_offset != -1)
1678 {
1679 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1680
1681 if ((modrm & 0xc7) == 0x05)
1682 {
1683 /* The displacement is found right after the ModRM byte. */
1684 return insn->modrm_offset + 1;
1685 }
1686 }
1687
1688 return 0;
1689 }
1690
1691 static void
1692 append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1693 {
1694 target_write_memory (*to, buf, len);
1695 *to += len;
1696 }
1697
1698 static void
1699 amd64_relocate_instruction (struct gdbarch *gdbarch,
1700 CORE_ADDR *to, CORE_ADDR oldloc)
1701 {
1702 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1703 int len = gdbarch_max_insn_length (gdbarch);
1704 /* Extra space for sentinels. */
1705 int fixup_sentinel_space = len;
1706 gdb_byte *buf = xmalloc (len + fixup_sentinel_space);
1707 struct amd64_insn insn_details;
1708 int offset = 0;
1709 LONGEST rel32, newrel;
1710 gdb_byte *insn;
1711 int insn_length;
1712
1713 read_memory (oldloc, buf, len);
1714
1715 /* Set up the sentinel space so we don't have to worry about running
1716 off the end of the buffer. An excessive number of leading prefixes
1717 could otherwise cause this. */
1718 memset (buf + len, 0, fixup_sentinel_space);
1719
1720 insn = buf;
1721 amd64_get_insn_details (insn, &insn_details);
1722
1723 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1724
1725 /* Skip legacy instruction prefixes. */
1726 insn = amd64_skip_prefixes (insn);
1727
1728 /* Adjust calls with 32-bit relative addresses as push/jump, with
1729 the address pushed being the location where the original call in
1730 the user program would return to. */
1731 if (insn[0] == 0xe8)
1732 {
1733 gdb_byte push_buf[16];
1734 unsigned int ret_addr;
1735
1736 /* Where "ret" in the original code will return to. */
1737 ret_addr = oldloc + insn_length;
1738 push_buf[0] = 0x68; /* pushq $... */
1739 store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
1740 /* Push the push. */
1741 append_insns (to, 5, push_buf);
1742
1743 /* Convert the relative call to a relative jump. */
1744 insn[0] = 0xe9;
1745
1746 /* Adjust the destination offset. */
1747 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1748 newrel = (oldloc - *to) + rel32;
1749 store_signed_integer (insn + 1, 4, byte_order, newrel);
1750
1751 if (debug_displaced)
1752 fprintf_unfiltered (gdb_stdlog,
1753 "Adjusted insn rel32=%s at %s to"
1754 " rel32=%s at %s\n",
1755 hex_string (rel32), paddress (gdbarch, oldloc),
1756 hex_string (newrel), paddress (gdbarch, *to));
1757
1758 /* Write the adjusted jump into its displaced location. */
1759 append_insns (to, 5, insn);
1760 return;
1761 }
1762
1763 offset = rip_relative_offset (&insn_details);
1764 if (!offset)
1765 {
1766 /* Adjust jumps with 32-bit relative addresses. Calls are
1767 already handled above. */
1768 if (insn[0] == 0xe9)
1769 offset = 1;
1770 /* Adjust conditional jumps. */
1771 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1772 offset = 2;
1773 }
1774
1775 if (offset)
1776 {
1777 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1778 newrel = (oldloc - *to) + rel32;
1779 store_signed_integer (insn + offset, 4, byte_order, newrel);
1780 if (debug_displaced)
1781 fprintf_unfiltered (gdb_stdlog,
1782 "Adjusted insn rel32=%s at %s to"
1783 " rel32=%s at %s\n",
1784 hex_string (rel32), paddress (gdbarch, oldloc),
1785 hex_string (newrel), paddress (gdbarch, *to));
1786 }
1787
1788 /* Write the adjusted instruction into its displaced location. */
1789 append_insns (to, insn_length, buf);
1790 }
1791
1792 \f
1793 /* The maximum number of saved registers. This should include %rip. */
1794 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1795
1796 struct amd64_frame_cache
1797 {
1798 /* Base address. */
1799 CORE_ADDR base;
1800 int base_p;
1801 CORE_ADDR sp_offset;
1802 CORE_ADDR pc;
1803
1804 /* Saved registers. */
1805 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1806 CORE_ADDR saved_sp;
1807 int saved_sp_reg;
1808
1809 /* Do we have a frame? */
1810 int frameless_p;
1811 };
1812
1813 /* Initialize a frame cache. */
1814
1815 static void
1816 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1817 {
1818 int i;
1819
1820 /* Base address. */
1821 cache->base = 0;
1822 cache->base_p = 0;
1823 cache->sp_offset = -8;
1824 cache->pc = 0;
1825
1826 /* Saved registers. We initialize these to -1 since zero is a valid
1827 offset (that's where %rbp is supposed to be stored).
1828 The values start out as being offsets, and are later converted to
1829 addresses (at which point -1 is interpreted as an address, still meaning
1830 "invalid"). */
1831 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1832 cache->saved_regs[i] = -1;
1833 cache->saved_sp = 0;
1834 cache->saved_sp_reg = -1;
1835
1836 /* Frameless until proven otherwise. */
1837 cache->frameless_p = 1;
1838 }
1839
1840 /* Allocate and initialize a frame cache. */
1841
1842 static struct amd64_frame_cache *
1843 amd64_alloc_frame_cache (void)
1844 {
1845 struct amd64_frame_cache *cache;
1846
1847 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1848 amd64_init_frame_cache (cache);
1849 return cache;
1850 }
1851
1852 /* GCC 4.4 and later, can put code in the prologue to realign the
1853 stack pointer. Check whether PC points to such code, and update
1854 CACHE accordingly. Return the first instruction after the code
1855 sequence or CURRENT_PC, whichever is smaller. If we don't
1856 recognize the code, return PC. */
1857
1858 static CORE_ADDR
1859 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1860 struct amd64_frame_cache *cache)
1861 {
1862 /* There are 2 code sequences to re-align stack before the frame
1863 gets set up:
1864
1865 1. Use a caller-saved saved register:
1866
1867 leaq 8(%rsp), %reg
1868 andq $-XXX, %rsp
1869 pushq -8(%reg)
1870
1871 2. Use a callee-saved saved register:
1872
1873 pushq %reg
1874 leaq 16(%rsp), %reg
1875 andq $-XXX, %rsp
1876 pushq -8(%reg)
1877
1878 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1879
1880 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1881 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1882 */
1883
1884 gdb_byte buf[18];
1885 int reg, r;
1886 int offset, offset_and;
1887
1888 if (target_read_code (pc, buf, sizeof buf))
1889 return pc;
1890
1891 /* Check caller-saved saved register. The first instruction has
1892 to be "leaq 8(%rsp), %reg". */
1893 if ((buf[0] & 0xfb) == 0x48
1894 && buf[1] == 0x8d
1895 && buf[3] == 0x24
1896 && buf[4] == 0x8)
1897 {
1898 /* MOD must be binary 10 and R/M must be binary 100. */
1899 if ((buf[2] & 0xc7) != 0x44)
1900 return pc;
1901
1902 /* REG has register number. */
1903 reg = (buf[2] >> 3) & 7;
1904
1905 /* Check the REX.R bit. */
1906 if (buf[0] == 0x4c)
1907 reg += 8;
1908
1909 offset = 5;
1910 }
1911 else
1912 {
1913 /* Check callee-saved saved register. The first instruction
1914 has to be "pushq %reg". */
1915 reg = 0;
1916 if ((buf[0] & 0xf8) == 0x50)
1917 offset = 0;
1918 else if ((buf[0] & 0xf6) == 0x40
1919 && (buf[1] & 0xf8) == 0x50)
1920 {
1921 /* Check the REX.B bit. */
1922 if ((buf[0] & 1) != 0)
1923 reg = 8;
1924
1925 offset = 1;
1926 }
1927 else
1928 return pc;
1929
1930 /* Get register. */
1931 reg += buf[offset] & 0x7;
1932
1933 offset++;
1934
1935 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1936 if ((buf[offset] & 0xfb) != 0x48
1937 || buf[offset + 1] != 0x8d
1938 || buf[offset + 3] != 0x24
1939 || buf[offset + 4] != 0x10)
1940 return pc;
1941
1942 /* MOD must be binary 10 and R/M must be binary 100. */
1943 if ((buf[offset + 2] & 0xc7) != 0x44)
1944 return pc;
1945
1946 /* REG has register number. */
1947 r = (buf[offset + 2] >> 3) & 7;
1948
1949 /* Check the REX.R bit. */
1950 if (buf[offset] == 0x4c)
1951 r += 8;
1952
1953 /* Registers in pushq and leaq have to be the same. */
1954 if (reg != r)
1955 return pc;
1956
1957 offset += 5;
1958 }
1959
1960 /* Rigister can't be %rsp nor %rbp. */
1961 if (reg == 4 || reg == 5)
1962 return pc;
1963
1964 /* The next instruction has to be "andq $-XXX, %rsp". */
1965 if (buf[offset] != 0x48
1966 || buf[offset + 2] != 0xe4
1967 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1968 return pc;
1969
1970 offset_and = offset;
1971 offset += buf[offset + 1] == 0x81 ? 7 : 4;
1972
1973 /* The next instruction has to be "pushq -8(%reg)". */
1974 r = 0;
1975 if (buf[offset] == 0xff)
1976 offset++;
1977 else if ((buf[offset] & 0xf6) == 0x40
1978 && buf[offset + 1] == 0xff)
1979 {
1980 /* Check the REX.B bit. */
1981 if ((buf[offset] & 0x1) != 0)
1982 r = 8;
1983 offset += 2;
1984 }
1985 else
1986 return pc;
1987
1988 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1989 01. */
1990 if (buf[offset + 1] != 0xf8
1991 || (buf[offset] & 0xf8) != 0x70)
1992 return pc;
1993
1994 /* R/M has register. */
1995 r += buf[offset] & 7;
1996
1997 /* Registers in leaq and pushq have to be the same. */
1998 if (reg != r)
1999 return pc;
2000
2001 if (current_pc > pc + offset_and)
2002 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2003
2004 return min (pc + offset + 2, current_pc);
2005 }
2006
2007 /* Similar to amd64_analyze_stack_align for x32. */
2008
2009 static CORE_ADDR
2010 amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2011 struct amd64_frame_cache *cache)
2012 {
2013 /* There are 2 code sequences to re-align stack before the frame
2014 gets set up:
2015
2016 1. Use a caller-saved saved register:
2017
2018 leaq 8(%rsp), %reg
2019 andq $-XXX, %rsp
2020 pushq -8(%reg)
2021
2022 or
2023
2024 [addr32] leal 8(%rsp), %reg
2025 andl $-XXX, %esp
2026 [addr32] pushq -8(%reg)
2027
2028 2. Use a callee-saved saved register:
2029
2030 pushq %reg
2031 leaq 16(%rsp), %reg
2032 andq $-XXX, %rsp
2033 pushq -8(%reg)
2034
2035 or
2036
2037 pushq %reg
2038 [addr32] leal 16(%rsp), %reg
2039 andl $-XXX, %esp
2040 [addr32] pushq -8(%reg)
2041
2042 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2043
2044 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2045 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2046
2047 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2048
2049 0x83 0xe4 0xf0 andl $-16, %esp
2050 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2051 */
2052
2053 gdb_byte buf[19];
2054 int reg, r;
2055 int offset, offset_and;
2056
2057 if (target_read_memory (pc, buf, sizeof buf))
2058 return pc;
2059
2060 /* Skip optional addr32 prefix. */
2061 offset = buf[0] == 0x67 ? 1 : 0;
2062
2063 /* Check caller-saved saved register. The first instruction has
2064 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2065 if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
2066 && buf[offset + 1] == 0x8d
2067 && buf[offset + 3] == 0x24
2068 && buf[offset + 4] == 0x8)
2069 {
2070 /* MOD must be binary 10 and R/M must be binary 100. */
2071 if ((buf[offset + 2] & 0xc7) != 0x44)
2072 return pc;
2073
2074 /* REG has register number. */
2075 reg = (buf[offset + 2] >> 3) & 7;
2076
2077 /* Check the REX.R bit. */
2078 if ((buf[offset] & 0x4) != 0)
2079 reg += 8;
2080
2081 offset += 5;
2082 }
2083 else
2084 {
2085 /* Check callee-saved saved register. The first instruction
2086 has to be "pushq %reg". */
2087 reg = 0;
2088 if ((buf[offset] & 0xf6) == 0x40
2089 && (buf[offset + 1] & 0xf8) == 0x50)
2090 {
2091 /* Check the REX.B bit. */
2092 if ((buf[offset] & 1) != 0)
2093 reg = 8;
2094
2095 offset += 1;
2096 }
2097 else if ((buf[offset] & 0xf8) != 0x50)
2098 return pc;
2099
2100 /* Get register. */
2101 reg += buf[offset] & 0x7;
2102
2103 offset++;
2104
2105 /* Skip optional addr32 prefix. */
2106 if (buf[offset] == 0x67)
2107 offset++;
2108
2109 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2110 "leal 16(%rsp), %reg". */
2111 if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
2112 || buf[offset + 1] != 0x8d
2113 || buf[offset + 3] != 0x24
2114 || buf[offset + 4] != 0x10)
2115 return pc;
2116
2117 /* MOD must be binary 10 and R/M must be binary 100. */
2118 if ((buf[offset + 2] & 0xc7) != 0x44)
2119 return pc;
2120
2121 /* REG has register number. */
2122 r = (buf[offset + 2] >> 3) & 7;
2123
2124 /* Check the REX.R bit. */
2125 if ((buf[offset] & 0x4) != 0)
2126 r += 8;
2127
2128 /* Registers in pushq and leaq have to be the same. */
2129 if (reg != r)
2130 return pc;
2131
2132 offset += 5;
2133 }
2134
2135 /* Rigister can't be %rsp nor %rbp. */
2136 if (reg == 4 || reg == 5)
2137 return pc;
2138
2139 /* The next instruction may be "andq $-XXX, %rsp" or
2140 "andl $-XXX, %esp". */
2141 if (buf[offset] != 0x48)
2142 offset--;
2143
2144 if (buf[offset + 2] != 0xe4
2145 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2146 return pc;
2147
2148 offset_and = offset;
2149 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2150
2151 /* Skip optional addr32 prefix. */
2152 if (buf[offset] == 0x67)
2153 offset++;
2154
2155 /* The next instruction has to be "pushq -8(%reg)". */
2156 r = 0;
2157 if (buf[offset] == 0xff)
2158 offset++;
2159 else if ((buf[offset] & 0xf6) == 0x40
2160 && buf[offset + 1] == 0xff)
2161 {
2162 /* Check the REX.B bit. */
2163 if ((buf[offset] & 0x1) != 0)
2164 r = 8;
2165 offset += 2;
2166 }
2167 else
2168 return pc;
2169
2170 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2171 01. */
2172 if (buf[offset + 1] != 0xf8
2173 || (buf[offset] & 0xf8) != 0x70)
2174 return pc;
2175
2176 /* R/M has register. */
2177 r += buf[offset] & 7;
2178
2179 /* Registers in leaq and pushq have to be the same. */
2180 if (reg != r)
2181 return pc;
2182
2183 if (current_pc > pc + offset_and)
2184 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2185
2186 return min (pc + offset + 2, current_pc);
2187 }
2188
2189 /* Do a limited analysis of the prologue at PC and update CACHE
2190 accordingly. Bail out early if CURRENT_PC is reached. Return the
2191 address where the analysis stopped.
2192
2193 We will handle only functions beginning with:
2194
2195 pushq %rbp 0x55
2196 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
2197
2198 or (for the X32 ABI):
2199
2200 pushq %rbp 0x55
2201 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2202
2203 Any function that doesn't start with one of these sequences will be
2204 assumed to have no prologue and thus no valid frame pointer in
2205 %rbp. */
2206
2207 static CORE_ADDR
2208 amd64_analyze_prologue (struct gdbarch *gdbarch,
2209 CORE_ADDR pc, CORE_ADDR current_pc,
2210 struct amd64_frame_cache *cache)
2211 {
2212 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2213 /* There are two variations of movq %rsp, %rbp. */
2214 static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
2215 static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
2216 /* Ditto for movl %esp, %ebp. */
2217 static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
2218 static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
2219
2220 gdb_byte buf[3];
2221 gdb_byte op;
2222
2223 if (current_pc <= pc)
2224 return current_pc;
2225
2226 if (gdbarch_ptr_bit (gdbarch) == 32)
2227 pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
2228 else
2229 pc = amd64_analyze_stack_align (pc, current_pc, cache);
2230
2231 op = read_code_unsigned_integer (pc, 1, byte_order);
2232
2233 if (op == 0x55) /* pushq %rbp */
2234 {
2235 /* Take into account that we've executed the `pushq %rbp' that
2236 starts this instruction sequence. */
2237 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
2238 cache->sp_offset += 8;
2239
2240 /* If that's all, return now. */
2241 if (current_pc <= pc + 1)
2242 return current_pc;
2243
2244 read_code (pc + 1, buf, 3);
2245
2246 /* Check for `movq %rsp, %rbp'. */
2247 if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
2248 || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
2249 {
2250 /* OK, we actually have a frame. */
2251 cache->frameless_p = 0;
2252 return pc + 4;
2253 }
2254
2255 /* For X32, also check for `movq %esp, %ebp'. */
2256 if (gdbarch_ptr_bit (gdbarch) == 32)
2257 {
2258 if (memcmp (buf, mov_esp_ebp_1, 2) == 0
2259 || memcmp (buf, mov_esp_ebp_2, 2) == 0)
2260 {
2261 /* OK, we actually have a frame. */
2262 cache->frameless_p = 0;
2263 return pc + 3;
2264 }
2265 }
2266
2267 return pc + 1;
2268 }
2269
2270 return pc;
2271 }
2272
2273 /* Work around false termination of prologue - GCC PR debug/48827.
2274
2275 START_PC is the first instruction of a function, PC is its minimal already
2276 determined advanced address. Function returns PC if it has nothing to do.
2277
2278 84 c0 test %al,%al
2279 74 23 je after
2280 <-- here is 0 lines advance - the false prologue end marker.
2281 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2282 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2283 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2284 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2285 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2286 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2287 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2288 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2289 after: */
2290
2291 static CORE_ADDR
2292 amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
2293 {
2294 struct symtab_and_line start_pc_sal, next_sal;
2295 gdb_byte buf[4 + 8 * 7];
2296 int offset, xmmreg;
2297
2298 if (pc == start_pc)
2299 return pc;
2300
2301 start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
2302 if (start_pc_sal.symtab == NULL
2303 || producer_is_gcc_ge_4 (start_pc_sal.symtab->producer) < 6
2304 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
2305 return pc;
2306
2307 next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
2308 if (next_sal.line != start_pc_sal.line)
2309 return pc;
2310
2311 /* START_PC can be from overlayed memory, ignored here. */
2312 if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
2313 return pc;
2314
2315 /* test %al,%al */
2316 if (buf[0] != 0x84 || buf[1] != 0xc0)
2317 return pc;
2318 /* je AFTER */
2319 if (buf[2] != 0x74)
2320 return pc;
2321
2322 offset = 4;
2323 for (xmmreg = 0; xmmreg < 8; xmmreg++)
2324 {
2325 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
2326 if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
2327 || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
2328 return pc;
2329
2330 /* 0b01?????? */
2331 if ((buf[offset + 2] & 0xc0) == 0x40)
2332 {
2333 /* 8-bit displacement. */
2334 offset += 4;
2335 }
2336 /* 0b10?????? */
2337 else if ((buf[offset + 2] & 0xc0) == 0x80)
2338 {
2339 /* 32-bit displacement. */
2340 offset += 7;
2341 }
2342 else
2343 return pc;
2344 }
2345
2346 /* je AFTER */
2347 if (offset - 4 != buf[3])
2348 return pc;
2349
2350 return next_sal.end;
2351 }
2352
2353 /* Return PC of first real instruction. */
2354
2355 static CORE_ADDR
2356 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
2357 {
2358 struct amd64_frame_cache cache;
2359 CORE_ADDR pc;
2360 CORE_ADDR func_addr;
2361
2362 if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
2363 {
2364 CORE_ADDR post_prologue_pc
2365 = skip_prologue_using_sal (gdbarch, func_addr);
2366 struct symtab *s = find_pc_symtab (func_addr);
2367
2368 /* Clang always emits a line note before the prologue and another
2369 one after. We trust clang to emit usable line notes. */
2370 if (post_prologue_pc
2371 && (s != NULL
2372 && s->producer != NULL
2373 && strncmp (s->producer, "clang ", sizeof ("clang ") - 1) == 0))
2374 return max (start_pc, post_prologue_pc);
2375 }
2376
2377 amd64_init_frame_cache (&cache);
2378 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
2379 &cache);
2380 if (cache.frameless_p)
2381 return start_pc;
2382
2383 return amd64_skip_xmm_prologue (pc, start_pc);
2384 }
2385 \f
2386
2387 /* Normal frames. */
2388
2389 static void
2390 amd64_frame_cache_1 (struct frame_info *this_frame,
2391 struct amd64_frame_cache *cache)
2392 {
2393 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2394 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2395 gdb_byte buf[8];
2396 int i;
2397
2398 cache->pc = get_frame_func (this_frame);
2399 if (cache->pc != 0)
2400 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
2401 cache);
2402
2403 if (cache->frameless_p)
2404 {
2405 /* We didn't find a valid frame. If we're at the start of a
2406 function, or somewhere half-way its prologue, the function's
2407 frame probably hasn't been fully setup yet. Try to
2408 reconstruct the base address for the stack frame by looking
2409 at the stack pointer. For truly "frameless" functions this
2410 might work too. */
2411
2412 if (cache->saved_sp_reg != -1)
2413 {
2414 /* Stack pointer has been saved. */
2415 get_frame_register (this_frame, cache->saved_sp_reg, buf);
2416 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
2417
2418 /* We're halfway aligning the stack. */
2419 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
2420 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
2421
2422 /* This will be added back below. */
2423 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
2424 }
2425 else
2426 {
2427 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2428 cache->base = extract_unsigned_integer (buf, 8, byte_order)
2429 + cache->sp_offset;
2430 }
2431 }
2432 else
2433 {
2434 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
2435 cache->base = extract_unsigned_integer (buf, 8, byte_order);
2436 }
2437
2438 /* Now that we have the base address for the stack frame we can
2439 calculate the value of %rsp in the calling frame. */
2440 cache->saved_sp = cache->base + 16;
2441
2442 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2443 frame we find it at the same offset from the reconstructed base
2444 address. If we're halfway aligning the stack, %rip is handled
2445 differently (see above). */
2446 if (!cache->frameless_p || cache->saved_sp_reg == -1)
2447 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
2448
2449 /* Adjust all the saved registers such that they contain addresses
2450 instead of offsets. */
2451 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
2452 if (cache->saved_regs[i] != -1)
2453 cache->saved_regs[i] += cache->base;
2454
2455 cache->base_p = 1;
2456 }
2457
2458 static struct amd64_frame_cache *
2459 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
2460 {
2461 volatile struct gdb_exception ex;
2462 struct amd64_frame_cache *cache;
2463
2464 if (*this_cache)
2465 return *this_cache;
2466
2467 cache = amd64_alloc_frame_cache ();
2468 *this_cache = cache;
2469
2470 TRY_CATCH (ex, RETURN_MASK_ERROR)
2471 {
2472 amd64_frame_cache_1 (this_frame, cache);
2473 }
2474 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2475 throw_exception (ex);
2476
2477 return cache;
2478 }
2479
2480 static enum unwind_stop_reason
2481 amd64_frame_unwind_stop_reason (struct frame_info *this_frame,
2482 void **this_cache)
2483 {
2484 struct amd64_frame_cache *cache =
2485 amd64_frame_cache (this_frame, this_cache);
2486
2487 if (!cache->base_p)
2488 return UNWIND_UNAVAILABLE;
2489
2490 /* This marks the outermost frame. */
2491 if (cache->base == 0)
2492 return UNWIND_OUTERMOST;
2493
2494 return UNWIND_NO_REASON;
2495 }
2496
2497 static void
2498 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
2499 struct frame_id *this_id)
2500 {
2501 struct amd64_frame_cache *cache =
2502 amd64_frame_cache (this_frame, this_cache);
2503
2504 if (!cache->base_p)
2505 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2506 else if (cache->base == 0)
2507 {
2508 /* This marks the outermost frame. */
2509 return;
2510 }
2511 else
2512 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
2513 }
2514
2515 static struct value *
2516 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
2517 int regnum)
2518 {
2519 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2520 struct amd64_frame_cache *cache =
2521 amd64_frame_cache (this_frame, this_cache);
2522
2523 gdb_assert (regnum >= 0);
2524
2525 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
2526 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
2527
2528 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
2529 return frame_unwind_got_memory (this_frame, regnum,
2530 cache->saved_regs[regnum]);
2531
2532 return frame_unwind_got_register (this_frame, regnum, regnum);
2533 }
2534
2535 static const struct frame_unwind amd64_frame_unwind =
2536 {
2537 NORMAL_FRAME,
2538 amd64_frame_unwind_stop_reason,
2539 amd64_frame_this_id,
2540 amd64_frame_prev_register,
2541 NULL,
2542 default_frame_sniffer
2543 };
2544 \f
2545 /* Generate a bytecode expression to get the value of the saved PC. */
2546
2547 static void
2548 amd64_gen_return_address (struct gdbarch *gdbarch,
2549 struct agent_expr *ax, struct axs_value *value,
2550 CORE_ADDR scope)
2551 {
2552 /* The following sequence assumes the traditional use of the base
2553 register. */
2554 ax_reg (ax, AMD64_RBP_REGNUM);
2555 ax_const_l (ax, 8);
2556 ax_simple (ax, aop_add);
2557 value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
2558 value->kind = axs_lvalue_memory;
2559 }
2560 \f
2561
2562 /* Signal trampolines. */
2563
2564 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2565 64-bit variants. This would require using identical frame caches
2566 on both platforms. */
2567
2568 static struct amd64_frame_cache *
2569 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2570 {
2571 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2572 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2573 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2574 volatile struct gdb_exception ex;
2575 struct amd64_frame_cache *cache;
2576 CORE_ADDR addr;
2577 gdb_byte buf[8];
2578 int i;
2579
2580 if (*this_cache)
2581 return *this_cache;
2582
2583 cache = amd64_alloc_frame_cache ();
2584
2585 TRY_CATCH (ex, RETURN_MASK_ERROR)
2586 {
2587 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2588 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2589
2590 addr = tdep->sigcontext_addr (this_frame);
2591 gdb_assert (tdep->sc_reg_offset);
2592 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2593 for (i = 0; i < tdep->sc_num_regs; i++)
2594 if (tdep->sc_reg_offset[i] != -1)
2595 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
2596
2597 cache->base_p = 1;
2598 }
2599 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2600 throw_exception (ex);
2601
2602 *this_cache = cache;
2603 return cache;
2604 }
2605
2606 static enum unwind_stop_reason
2607 amd64_sigtramp_frame_unwind_stop_reason (struct frame_info *this_frame,
2608 void **this_cache)
2609 {
2610 struct amd64_frame_cache *cache =
2611 amd64_sigtramp_frame_cache (this_frame, this_cache);
2612
2613 if (!cache->base_p)
2614 return UNWIND_UNAVAILABLE;
2615
2616 return UNWIND_NO_REASON;
2617 }
2618
2619 static void
2620 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
2621 void **this_cache, struct frame_id *this_id)
2622 {
2623 struct amd64_frame_cache *cache =
2624 amd64_sigtramp_frame_cache (this_frame, this_cache);
2625
2626 if (!cache->base_p)
2627 (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
2628 else if (cache->base == 0)
2629 {
2630 /* This marks the outermost frame. */
2631 return;
2632 }
2633 else
2634 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
2635 }
2636
2637 static struct value *
2638 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2639 void **this_cache, int regnum)
2640 {
2641 /* Make sure we've initialized the cache. */
2642 amd64_sigtramp_frame_cache (this_frame, this_cache);
2643
2644 return amd64_frame_prev_register (this_frame, this_cache, regnum);
2645 }
2646
2647 static int
2648 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2649 struct frame_info *this_frame,
2650 void **this_cache)
2651 {
2652 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2653
2654 /* We shouldn't even bother if we don't have a sigcontext_addr
2655 handler. */
2656 if (tdep->sigcontext_addr == NULL)
2657 return 0;
2658
2659 if (tdep->sigtramp_p != NULL)
2660 {
2661 if (tdep->sigtramp_p (this_frame))
2662 return 1;
2663 }
2664
2665 if (tdep->sigtramp_start != 0)
2666 {
2667 CORE_ADDR pc = get_frame_pc (this_frame);
2668
2669 gdb_assert (tdep->sigtramp_end != 0);
2670 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
2671 return 1;
2672 }
2673
2674 return 0;
2675 }
2676
2677 static const struct frame_unwind amd64_sigtramp_frame_unwind =
2678 {
2679 SIGTRAMP_FRAME,
2680 amd64_sigtramp_frame_unwind_stop_reason,
2681 amd64_sigtramp_frame_this_id,
2682 amd64_sigtramp_frame_prev_register,
2683 NULL,
2684 amd64_sigtramp_frame_sniffer
2685 };
2686 \f
2687
2688 static CORE_ADDR
2689 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2690 {
2691 struct amd64_frame_cache *cache =
2692 amd64_frame_cache (this_frame, this_cache);
2693
2694 return cache->base;
2695 }
2696
2697 static const struct frame_base amd64_frame_base =
2698 {
2699 &amd64_frame_unwind,
2700 amd64_frame_base_address,
2701 amd64_frame_base_address,
2702 amd64_frame_base_address
2703 };
2704
2705 /* Normal frames, but in a function epilogue. */
2706
2707 /* The epilogue is defined here as the 'ret' instruction, which will
2708 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2709 the function's stack frame. */
2710
2711 static int
2712 amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2713 {
2714 gdb_byte insn;
2715 struct symtab *symtab;
2716
2717 symtab = find_pc_symtab (pc);
2718 if (symtab && symtab->epilogue_unwind_valid)
2719 return 0;
2720
2721 if (target_read_memory (pc, &insn, 1))
2722 return 0; /* Can't read memory at pc. */
2723
2724 if (insn != 0xc3) /* 'ret' instruction. */
2725 return 0;
2726
2727 return 1;
2728 }
2729
2730 static int
2731 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2732 struct frame_info *this_frame,
2733 void **this_prologue_cache)
2734 {
2735 if (frame_relative_level (this_frame) == 0)
2736 return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
2737 get_frame_pc (this_frame));
2738 else
2739 return 0;
2740 }
2741
2742 static struct amd64_frame_cache *
2743 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2744 {
2745 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2746 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2747 volatile struct gdb_exception ex;
2748 struct amd64_frame_cache *cache;
2749 gdb_byte buf[8];
2750
2751 if (*this_cache)
2752 return *this_cache;
2753
2754 cache = amd64_alloc_frame_cache ();
2755 *this_cache = cache;
2756
2757 TRY_CATCH (ex, RETURN_MASK_ERROR)
2758 {
2759 /* Cache base will be %esp plus cache->sp_offset (-8). */
2760 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2761 cache->base = extract_unsigned_integer (buf, 8,
2762 byte_order) + cache->sp_offset;
2763
2764 /* Cache pc will be the frame func. */
2765 cache->pc = get_frame_pc (this_frame);
2766
2767 /* The saved %esp will be at cache->base plus 16. */
2768 cache->saved_sp = cache->base + 16;
2769
2770 /* The saved %eip will be at cache->base plus 8. */
2771 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2772
2773 cache->base_p = 1;
2774 }
2775 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2776 throw_exception (ex);
2777
2778 return cache;
2779 }
2780
2781 static enum unwind_stop_reason
2782 amd64_epilogue_frame_unwind_stop_reason (struct frame_info *this_frame,
2783 void **this_cache)
2784 {
2785 struct amd64_frame_cache *cache
2786 = amd64_epilogue_frame_cache (this_frame, this_cache);
2787
2788 if (!cache->base_p)
2789 return UNWIND_UNAVAILABLE;
2790
2791 return UNWIND_NO_REASON;
2792 }
2793
2794 static void
2795 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2796 void **this_cache,
2797 struct frame_id *this_id)
2798 {
2799 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2800 this_cache);
2801
2802 if (!cache->base_p)
2803 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2804 else
2805 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
2806 }
2807
2808 static const struct frame_unwind amd64_epilogue_frame_unwind =
2809 {
2810 NORMAL_FRAME,
2811 amd64_epilogue_frame_unwind_stop_reason,
2812 amd64_epilogue_frame_this_id,
2813 amd64_frame_prev_register,
2814 NULL,
2815 amd64_epilogue_frame_sniffer
2816 };
2817
2818 static struct frame_id
2819 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2820 {
2821 CORE_ADDR fp;
2822
2823 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
2824
2825 return frame_id_build (fp + 16, get_frame_pc (this_frame));
2826 }
2827
2828 /* 16 byte align the SP per frame requirements. */
2829
2830 static CORE_ADDR
2831 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2832 {
2833 return sp & -(CORE_ADDR)16;
2834 }
2835 \f
2836
2837 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
2838 in the floating-point register set REGSET to register cache
2839 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
2840
2841 static void
2842 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2843 int regnum, const void *fpregs, size_t len)
2844 {
2845 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2846 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2847
2848 gdb_assert (len == tdep->sizeof_fpregset);
2849 amd64_supply_fxsave (regcache, regnum, fpregs);
2850 }
2851
2852 /* Collect register REGNUM from the register cache REGCACHE and store
2853 it in the buffer specified by FPREGS and LEN as described by the
2854 floating-point register set REGSET. If REGNUM is -1, do this for
2855 all registers in REGSET. */
2856
2857 static void
2858 amd64_collect_fpregset (const struct regset *regset,
2859 const struct regcache *regcache,
2860 int regnum, void *fpregs, size_t len)
2861 {
2862 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2863 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2864
2865 gdb_assert (len == tdep->sizeof_fpregset);
2866 amd64_collect_fxsave (regcache, regnum, fpregs);
2867 }
2868
2869 /* Similar to amd64_supply_fpregset, but use XSAVE extended state. */
2870
2871 static void
2872 amd64_supply_xstateregset (const struct regset *regset,
2873 struct regcache *regcache, int regnum,
2874 const void *xstateregs, size_t len)
2875 {
2876 amd64_supply_xsave (regcache, regnum, xstateregs);
2877 }
2878
2879 /* Similar to amd64_collect_fpregset, but use XSAVE extended state. */
2880
2881 static void
2882 amd64_collect_xstateregset (const struct regset *regset,
2883 const struct regcache *regcache,
2884 int regnum, void *xstateregs, size_t len)
2885 {
2886 amd64_collect_xsave (regcache, regnum, xstateregs, 1);
2887 }
2888
2889 static const struct regset amd64_fpregset =
2890 {
2891 NULL, amd64_supply_fpregset, amd64_collect_fpregset
2892 };
2893
2894 static const struct regset amd64_xstateregset =
2895 {
2896 NULL, amd64_supply_xstateregset, amd64_collect_xstateregset
2897 };
2898
2899 /* Return the appropriate register set for the core section identified
2900 by SECT_NAME and SECT_SIZE. */
2901
2902 static const struct regset *
2903 amd64_regset_from_core_section (struct gdbarch *gdbarch,
2904 const char *sect_name, size_t sect_size)
2905 {
2906 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2907
2908 if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
2909 return &amd64_fpregset;
2910
2911 if (strcmp (sect_name, ".reg-xstate") == 0)
2912 return &amd64_xstateregset;
2913
2914 return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
2915 }
2916 \f
2917
2918 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
2919 %rdi. We expect its value to be a pointer to the jmp_buf structure
2920 from which we extract the address that we will land at. This
2921 address is copied into PC. This routine returns non-zero on
2922 success. */
2923
2924 static int
2925 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2926 {
2927 gdb_byte buf[8];
2928 CORE_ADDR jb_addr;
2929 struct gdbarch *gdbarch = get_frame_arch (frame);
2930 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
2931 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
2932
2933 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2934 longjmp will land. */
2935 if (jb_pc_offset == -1)
2936 return 0;
2937
2938 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
2939 jb_addr= extract_typed_address
2940 (buf, builtin_type (gdbarch)->builtin_data_ptr);
2941 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2942 return 0;
2943
2944 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
2945
2946 return 1;
2947 }
2948
2949 static const int amd64_record_regmap[] =
2950 {
2951 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2952 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2953 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2954 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2955 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2956 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2957 };
2958
2959 void
2960 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
2961 {
2962 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2963 const struct target_desc *tdesc = info.target_desc;
2964 static const char *const stap_integer_prefixes[] = { "$", NULL };
2965 static const char *const stap_register_prefixes[] = { "%", NULL };
2966 static const char *const stap_register_indirection_prefixes[] = { "(",
2967 NULL };
2968 static const char *const stap_register_indirection_suffixes[] = { ")",
2969 NULL };
2970
2971 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2972 floating-point registers. */
2973 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
2974
2975 if (! tdesc_has_registers (tdesc))
2976 tdesc = tdesc_amd64;
2977 tdep->tdesc = tdesc;
2978
2979 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
2980 tdep->register_names = amd64_register_names;
2981
2982 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
2983 {
2984 tdep->zmmh_register_names = amd64_zmmh_names;
2985 tdep->k_register_names = amd64_k_names;
2986 tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
2987 tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
2988
2989 tdep->num_zmm_regs = 32;
2990 tdep->num_xmm_avx512_regs = 16;
2991 tdep->num_ymm_avx512_regs = 16;
2992
2993 tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
2994 tdep->k0_regnum = AMD64_K0_REGNUM;
2995 tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
2996 tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
2997 }
2998
2999 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
3000 {
3001 tdep->ymmh_register_names = amd64_ymmh_names;
3002 tdep->num_ymm_regs = 16;
3003 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
3004 }
3005
3006 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
3007 {
3008 tdep->mpx_register_names = amd64_mpx_names;
3009 tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
3010 tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
3011 }
3012
3013 tdep->num_byte_regs = 20;
3014 tdep->num_word_regs = 16;
3015 tdep->num_dword_regs = 16;
3016 /* Avoid wiring in the MMX registers for now. */
3017 tdep->num_mmx_regs = 0;
3018
3019 set_gdbarch_pseudo_register_read_value (gdbarch,
3020 amd64_pseudo_register_read_value);
3021 set_gdbarch_pseudo_register_write (gdbarch,
3022 amd64_pseudo_register_write);
3023
3024 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
3025
3026 /* AMD64 has an FPU and 16 SSE registers. */
3027 tdep->st0_regnum = AMD64_ST0_REGNUM;
3028 tdep->num_xmm_regs = 16;
3029
3030 /* This is what all the fuss is about. */
3031 set_gdbarch_long_bit (gdbarch, 64);
3032 set_gdbarch_long_long_bit (gdbarch, 64);
3033 set_gdbarch_ptr_bit (gdbarch, 64);
3034
3035 /* In contrast to the i386, on AMD64 a `long double' actually takes
3036 up 128 bits, even though it's still based on the i387 extended
3037 floating-point format which has only 80 significant bits. */
3038 set_gdbarch_long_double_bit (gdbarch, 128);
3039
3040 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
3041
3042 /* Register numbers of various important registers. */
3043 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
3044 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
3045 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
3046 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
3047
3048 /* The "default" register numbering scheme for AMD64 is referred to
3049 as the "DWARF Register Number Mapping" in the System V psABI.
3050 The preferred debugging format for all known AMD64 targets is
3051 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3052 DWARF-1), but we provide the same mapping just in case. This
3053 mapping is also used for stabs, which GCC does support. */
3054 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3055 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3056
3057 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
3058 be in use on any of the supported AMD64 targets. */
3059
3060 /* Call dummy code. */
3061 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
3062 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
3063 set_gdbarch_frame_red_zone_size (gdbarch, 128);
3064
3065 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
3066 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
3067 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
3068
3069 set_gdbarch_return_value (gdbarch, amd64_return_value);
3070
3071 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
3072
3073 tdep->record_regmap = amd64_record_regmap;
3074
3075 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
3076
3077 /* Hook the function epilogue frame unwinder. This unwinder is
3078 appended to the list first, so that it supercedes the other
3079 unwinders in function epilogues. */
3080 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
3081
3082 /* Hook the prologue-based frame unwinders. */
3083 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
3084 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
3085 frame_base_set_default (gdbarch, &amd64_frame_base);
3086
3087 /* If we have a register mapping, enable the generic core file support. */
3088 if (tdep->gregset_reg_offset)
3089 set_gdbarch_regset_from_core_section (gdbarch,
3090 amd64_regset_from_core_section);
3091
3092 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
3093
3094 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
3095
3096 set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
3097
3098 /* SystemTap variables and functions. */
3099 set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
3100 set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
3101 set_gdbarch_stap_register_indirection_prefixes (gdbarch,
3102 stap_register_indirection_prefixes);
3103 set_gdbarch_stap_register_indirection_suffixes (gdbarch,
3104 stap_register_indirection_suffixes);
3105 set_gdbarch_stap_is_single_operand (gdbarch,
3106 i386_stap_is_single_operand);
3107 set_gdbarch_stap_parse_special_token (gdbarch,
3108 i386_stap_parse_special_token);
3109 set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
3110 set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
3111 set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
3112 }
3113 \f
3114
3115 static struct type *
3116 amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3117 {
3118 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3119
3120 switch (regnum - tdep->eax_regnum)
3121 {
3122 case AMD64_RBP_REGNUM: /* %ebp */
3123 case AMD64_RSP_REGNUM: /* %esp */
3124 return builtin_type (gdbarch)->builtin_data_ptr;
3125 case AMD64_RIP_REGNUM: /* %eip */
3126 return builtin_type (gdbarch)->builtin_func_ptr;
3127 }
3128
3129 return i386_pseudo_register_type (gdbarch, regnum);
3130 }
3131
3132 void
3133 amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
3134 {
3135 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3136 const struct target_desc *tdesc = info.target_desc;
3137
3138 amd64_init_abi (info, gdbarch);
3139
3140 if (! tdesc_has_registers (tdesc))
3141 tdesc = tdesc_x32;
3142 tdep->tdesc = tdesc;
3143
3144 tdep->num_dword_regs = 17;
3145 set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
3146
3147 set_gdbarch_long_bit (gdbarch, 32);
3148 set_gdbarch_ptr_bit (gdbarch, 32);
3149 }
3150
3151 /* Provide a prototype to silence -Wmissing-prototypes. */
3152 void _initialize_amd64_tdep (void);
3153
3154 void
3155 _initialize_amd64_tdep (void)
3156 {
3157 initialize_tdesc_amd64 ();
3158 initialize_tdesc_amd64_avx ();
3159 initialize_tdesc_amd64_mpx ();
3160 initialize_tdesc_amd64_avx512 ();
3161
3162 initialize_tdesc_x32 ();
3163 initialize_tdesc_x32_avx ();
3164 initialize_tdesc_x32_avx512 ();
3165 }
3166 \f
3167
3168 /* The 64-bit FXSAVE format differs from the 32-bit format in the
3169 sense that the instruction pointer and data pointer are simply
3170 64-bit offsets into the code segment and the data segment instead
3171 of a selector offset pair. The functions below store the upper 32
3172 bits of these pointers (instead of just the 16-bits of the segment
3173 selector). */
3174
3175 /* Fill register REGNUM in REGCACHE with the appropriate
3176 floating-point or SSE register value from *FXSAVE. If REGNUM is
3177 -1, do this for all registers. This function masks off any of the
3178 reserved bits in *FXSAVE. */
3179
3180 void
3181 amd64_supply_fxsave (struct regcache *regcache, int regnum,
3182 const void *fxsave)
3183 {
3184 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3185 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3186
3187 i387_supply_fxsave (regcache, regnum, fxsave);
3188
3189 if (fxsave
3190 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3191 {
3192 const gdb_byte *regs = fxsave;
3193
3194 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3195 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3196 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3197 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
3198 }
3199 }
3200
3201 /* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3202
3203 void
3204 amd64_supply_xsave (struct regcache *regcache, int regnum,
3205 const void *xsave)
3206 {
3207 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3208 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3209
3210 i387_supply_xsave (regcache, regnum, xsave);
3211
3212 if (xsave
3213 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3214 {
3215 const gdb_byte *regs = xsave;
3216
3217 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3218 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep),
3219 regs + 12);
3220 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3221 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep),
3222 regs + 20);
3223 }
3224 }
3225
3226 /* Fill register REGNUM (if it is a floating-point or SSE register) in
3227 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3228 all registers. This function doesn't touch any of the reserved
3229 bits in *FXSAVE. */
3230
3231 void
3232 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
3233 void *fxsave)
3234 {
3235 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3236 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3237 gdb_byte *regs = fxsave;
3238
3239 i387_collect_fxsave (regcache, regnum, fxsave);
3240
3241 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3242 {
3243 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3244 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3245 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3246 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
3247 }
3248 }
3249
3250 /* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
3251
3252 void
3253 amd64_collect_xsave (const struct regcache *regcache, int regnum,
3254 void *xsave, int gcore)
3255 {
3256 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3257 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3258 gdb_byte *regs = xsave;
3259
3260 i387_collect_xsave (regcache, regnum, xsave, gcore);
3261
3262 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3263 {
3264 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3265 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep),
3266 regs + 12);
3267 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3268 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep),
3269 regs + 20);
3270 }
3271 }