]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/amd64-tdep.c
gdb: handle case where type alignment is unknown
[thirdparty/binutils-gdb.git] / gdb / amd64-tdep.c
1 /* Target-dependent code for AMD64.
2
3 Copyright (C) 2001-2021 Free Software Foundation, Inc.
4
5 Contributed by Jiri Smid, SuSE Labs.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "opcode/i386.h"
24 #include "dis-asm.h"
25 #include "arch-utils.h"
26 #include "block.h"
27 #include "dummy-frame.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "inferior.h"
32 #include "infrun.h"
33 #include "gdbcmd.h"
34 #include "gdbcore.h"
35 #include "objfiles.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "symfile.h"
39 #include "disasm.h"
40 #include "amd64-tdep.h"
41 #include "i387-tdep.h"
42 #include "gdbsupport/x86-xstate.h"
43 #include <algorithm>
44 #include "target-descriptions.h"
45 #include "arch/amd64.h"
46 #include "producer.h"
47 #include "ax.h"
48 #include "ax-gdb.h"
49 #include "gdbsupport/byte-vector.h"
50 #include "osabi.h"
51 #include "x86-tdep.h"
52 #include "amd64-ravenscar-thread.h"
53
54 /* Note that the AMD64 architecture was previously known as x86-64.
55 The latter is (forever) engraved into the canonical system name as
56 returned by config.guess, and used as the name for the AMD64 port
57 of GNU/Linux. The BSD's have renamed their ports to amd64; they
58 don't like to shout. For GDB we prefer the amd64_-prefix over the
59 x86_64_-prefix since it's so much easier to type. */
60
61 /* Register information. */
62
63 static const char * const amd64_register_names[] =
64 {
65 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
66
67 /* %r8 is indeed register number 8. */
68 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
69 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
70
71 /* %st0 is register number 24. */
72 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
73 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
74
75 /* %xmm0 is register number 40. */
76 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
77 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
78 "mxcsr",
79 };
80
81 static const char * const amd64_ymm_names[] =
82 {
83 "ymm0", "ymm1", "ymm2", "ymm3",
84 "ymm4", "ymm5", "ymm6", "ymm7",
85 "ymm8", "ymm9", "ymm10", "ymm11",
86 "ymm12", "ymm13", "ymm14", "ymm15"
87 };
88
89 static const char * const amd64_ymm_avx512_names[] =
90 {
91 "ymm16", "ymm17", "ymm18", "ymm19",
92 "ymm20", "ymm21", "ymm22", "ymm23",
93 "ymm24", "ymm25", "ymm26", "ymm27",
94 "ymm28", "ymm29", "ymm30", "ymm31"
95 };
96
97 static const char * const amd64_ymmh_names[] =
98 {
99 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
100 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
101 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
102 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
103 };
104
105 static const char * const amd64_ymmh_avx512_names[] =
106 {
107 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
108 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
109 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
110 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
111 };
112
113 static const char * const amd64_mpx_names[] =
114 {
115 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
116 };
117
118 static const char * const amd64_k_names[] =
119 {
120 "k0", "k1", "k2", "k3",
121 "k4", "k5", "k6", "k7"
122 };
123
124 static const char * const amd64_zmmh_names[] =
125 {
126 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
127 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
128 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
129 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
130 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
131 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
132 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
133 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
134 };
135
136 static const char * const amd64_zmm_names[] =
137 {
138 "zmm0", "zmm1", "zmm2", "zmm3",
139 "zmm4", "zmm5", "zmm6", "zmm7",
140 "zmm8", "zmm9", "zmm10", "zmm11",
141 "zmm12", "zmm13", "zmm14", "zmm15",
142 "zmm16", "zmm17", "zmm18", "zmm19",
143 "zmm20", "zmm21", "zmm22", "zmm23",
144 "zmm24", "zmm25", "zmm26", "zmm27",
145 "zmm28", "zmm29", "zmm30", "zmm31"
146 };
147
148 static const char * const amd64_xmm_avx512_names[] = {
149 "xmm16", "xmm17", "xmm18", "xmm19",
150 "xmm20", "xmm21", "xmm22", "xmm23",
151 "xmm24", "xmm25", "xmm26", "xmm27",
152 "xmm28", "xmm29", "xmm30", "xmm31"
153 };
154
155 static const char * const amd64_pkeys_names[] = {
156 "pkru"
157 };
158
159 /* DWARF Register Number Mapping as defined in the System V psABI,
160 section 3.6. */
161
162 static int amd64_dwarf_regmap[] =
163 {
164 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
165 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
166 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
167 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
168
169 /* Frame Pointer Register RBP. */
170 AMD64_RBP_REGNUM,
171
172 /* Stack Pointer Register RSP. */
173 AMD64_RSP_REGNUM,
174
175 /* Extended Integer Registers 8 - 15. */
176 AMD64_R8_REGNUM, /* %r8 */
177 AMD64_R9_REGNUM, /* %r9 */
178 AMD64_R10_REGNUM, /* %r10 */
179 AMD64_R11_REGNUM, /* %r11 */
180 AMD64_R12_REGNUM, /* %r12 */
181 AMD64_R13_REGNUM, /* %r13 */
182 AMD64_R14_REGNUM, /* %r14 */
183 AMD64_R15_REGNUM, /* %r15 */
184
185 /* Return Address RA. Mapped to RIP. */
186 AMD64_RIP_REGNUM,
187
188 /* SSE Registers 0 - 7. */
189 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
190 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
191 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
192 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
193
194 /* Extended SSE Registers 8 - 15. */
195 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
196 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
197 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
198 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
199
200 /* Floating Point Registers 0-7. */
201 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
202 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
203 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
204 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
205
206 /* MMX Registers 0 - 7.
207 We have to handle those registers specifically, as their register
208 number within GDB depends on the target (or they may even not be
209 available at all). */
210 -1, -1, -1, -1, -1, -1, -1, -1,
211
212 /* Control and Status Flags Register. */
213 AMD64_EFLAGS_REGNUM,
214
215 /* Selector Registers. */
216 AMD64_ES_REGNUM,
217 AMD64_CS_REGNUM,
218 AMD64_SS_REGNUM,
219 AMD64_DS_REGNUM,
220 AMD64_FS_REGNUM,
221 AMD64_GS_REGNUM,
222 -1,
223 -1,
224
225 /* Segment Base Address Registers. */
226 -1,
227 -1,
228 -1,
229 -1,
230
231 /* Special Selector Registers. */
232 -1,
233 -1,
234
235 /* Floating Point Control Registers. */
236 AMD64_MXCSR_REGNUM,
237 AMD64_FCTRL_REGNUM,
238 AMD64_FSTAT_REGNUM
239 };
240
241 static const int amd64_dwarf_regmap_len =
242 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
243
244 /* Convert DWARF register number REG to the appropriate register
245 number used by GDB. */
246
247 static int
248 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
249 {
250 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
251 int ymm0_regnum = tdep->ymm0_regnum;
252 int regnum = -1;
253
254 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
255 regnum = amd64_dwarf_regmap[reg];
256
257 if (ymm0_regnum >= 0
258 && i386_xmm_regnum_p (gdbarch, regnum))
259 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
260
261 return regnum;
262 }
263
264 /* Map architectural register numbers to gdb register numbers. */
265
266 static const int amd64_arch_regmap[16] =
267 {
268 AMD64_RAX_REGNUM, /* %rax */
269 AMD64_RCX_REGNUM, /* %rcx */
270 AMD64_RDX_REGNUM, /* %rdx */
271 AMD64_RBX_REGNUM, /* %rbx */
272 AMD64_RSP_REGNUM, /* %rsp */
273 AMD64_RBP_REGNUM, /* %rbp */
274 AMD64_RSI_REGNUM, /* %rsi */
275 AMD64_RDI_REGNUM, /* %rdi */
276 AMD64_R8_REGNUM, /* %r8 */
277 AMD64_R9_REGNUM, /* %r9 */
278 AMD64_R10_REGNUM, /* %r10 */
279 AMD64_R11_REGNUM, /* %r11 */
280 AMD64_R12_REGNUM, /* %r12 */
281 AMD64_R13_REGNUM, /* %r13 */
282 AMD64_R14_REGNUM, /* %r14 */
283 AMD64_R15_REGNUM /* %r15 */
284 };
285
286 static const int amd64_arch_regmap_len =
287 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
288
289 /* Convert architectural register number REG to the appropriate register
290 number used by GDB. */
291
292 static int
293 amd64_arch_reg_to_regnum (int reg)
294 {
295 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
296
297 return amd64_arch_regmap[reg];
298 }
299
300 /* Register names for byte pseudo-registers. */
301
302 static const char * const amd64_byte_names[] =
303 {
304 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
305 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
306 "ah", "bh", "ch", "dh"
307 };
308
309 /* Number of lower byte registers. */
310 #define AMD64_NUM_LOWER_BYTE_REGS 16
311
312 /* Register names for word pseudo-registers. */
313
314 static const char * const amd64_word_names[] =
315 {
316 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
317 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
318 };
319
320 /* Register names for dword pseudo-registers. */
321
322 static const char * const amd64_dword_names[] =
323 {
324 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
325 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
326 "eip"
327 };
328
329 /* Return the name of register REGNUM. */
330
331 static const char *
332 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
333 {
334 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
335 if (i386_byte_regnum_p (gdbarch, regnum))
336 return amd64_byte_names[regnum - tdep->al_regnum];
337 else if (i386_zmm_regnum_p (gdbarch, regnum))
338 return amd64_zmm_names[regnum - tdep->zmm0_regnum];
339 else if (i386_ymm_regnum_p (gdbarch, regnum))
340 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
341 else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
342 return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
343 else if (i386_word_regnum_p (gdbarch, regnum))
344 return amd64_word_names[regnum - tdep->ax_regnum];
345 else if (i386_dword_regnum_p (gdbarch, regnum))
346 return amd64_dword_names[regnum - tdep->eax_regnum];
347 else
348 return i386_pseudo_register_name (gdbarch, regnum);
349 }
350
351 static struct value *
352 amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
353 readable_regcache *regcache,
354 int regnum)
355 {
356 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
357
358 value *result_value = allocate_value (register_type (gdbarch, regnum));
359 VALUE_LVAL (result_value) = lval_register;
360 VALUE_REGNUM (result_value) = regnum;
361 gdb_byte *buf = value_contents_raw (result_value);
362
363 if (i386_byte_regnum_p (gdbarch, regnum))
364 {
365 int gpnum = regnum - tdep->al_regnum;
366
367 /* Extract (always little endian). */
368 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
369 {
370 gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
371 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
372
373 /* Special handling for AH, BH, CH, DH. */
374 register_status status = regcache->raw_read (gpnum, raw_buf);
375 if (status == REG_VALID)
376 memcpy (buf, raw_buf + 1, 1);
377 else
378 mark_value_bytes_unavailable (result_value, 0,
379 TYPE_LENGTH (value_type (result_value)));
380 }
381 else
382 {
383 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
384 register_status status = regcache->raw_read (gpnum, raw_buf);
385 if (status == REG_VALID)
386 memcpy (buf, raw_buf, 1);
387 else
388 mark_value_bytes_unavailable (result_value, 0,
389 TYPE_LENGTH (value_type (result_value)));
390 }
391 }
392 else if (i386_dword_regnum_p (gdbarch, regnum))
393 {
394 int gpnum = regnum - tdep->eax_regnum;
395 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
396 /* Extract (always little endian). */
397 register_status status = regcache->raw_read (gpnum, raw_buf);
398 if (status == REG_VALID)
399 memcpy (buf, raw_buf, 4);
400 else
401 mark_value_bytes_unavailable (result_value, 0,
402 TYPE_LENGTH (value_type (result_value)));
403 }
404 else
405 i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
406 result_value);
407
408 return result_value;
409 }
410
411 static void
412 amd64_pseudo_register_write (struct gdbarch *gdbarch,
413 struct regcache *regcache,
414 int regnum, const gdb_byte *buf)
415 {
416 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
417
418 if (i386_byte_regnum_p (gdbarch, regnum))
419 {
420 int gpnum = regnum - tdep->al_regnum;
421
422 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
423 {
424 gpnum -= AMD64_NUM_LOWER_BYTE_REGS;
425 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
426
427 /* Read ... AH, BH, CH, DH. */
428 regcache->raw_read (gpnum, raw_buf);
429 /* ... Modify ... (always little endian). */
430 memcpy (raw_buf + 1, buf, 1);
431 /* ... Write. */
432 regcache->raw_write (gpnum, raw_buf);
433 }
434 else
435 {
436 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
437
438 /* Read ... */
439 regcache->raw_read (gpnum, raw_buf);
440 /* ... Modify ... (always little endian). */
441 memcpy (raw_buf, buf, 1);
442 /* ... Write. */
443 regcache->raw_write (gpnum, raw_buf);
444 }
445 }
446 else if (i386_dword_regnum_p (gdbarch, regnum))
447 {
448 int gpnum = regnum - tdep->eax_regnum;
449 gdb_byte raw_buf[register_size (gdbarch, gpnum)];
450
451 /* Read ... */
452 regcache->raw_read (gpnum, raw_buf);
453 /* ... Modify ... (always little endian). */
454 memcpy (raw_buf, buf, 4);
455 /* ... Write. */
456 regcache->raw_write (gpnum, raw_buf);
457 }
458 else
459 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
460 }
461
462 /* Implement the 'ax_pseudo_register_collect' gdbarch method. */
463
464 static int
465 amd64_ax_pseudo_register_collect (struct gdbarch *gdbarch,
466 struct agent_expr *ax, int regnum)
467 {
468 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
469
470 if (i386_byte_regnum_p (gdbarch, regnum))
471 {
472 int gpnum = regnum - tdep->al_regnum;
473
474 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
475 ax_reg_mask (ax, gpnum - AMD64_NUM_LOWER_BYTE_REGS);
476 else
477 ax_reg_mask (ax, gpnum);
478 return 0;
479 }
480 else if (i386_dword_regnum_p (gdbarch, regnum))
481 {
482 int gpnum = regnum - tdep->eax_regnum;
483
484 ax_reg_mask (ax, gpnum);
485 return 0;
486 }
487 else
488 return i386_ax_pseudo_register_collect (gdbarch, ax, regnum);
489 }
490
491 \f
492
493 /* Register classes as defined in the psABI. */
494
495 enum amd64_reg_class
496 {
497 AMD64_INTEGER,
498 AMD64_SSE,
499 AMD64_SSEUP,
500 AMD64_X87,
501 AMD64_X87UP,
502 AMD64_COMPLEX_X87,
503 AMD64_NO_CLASS,
504 AMD64_MEMORY
505 };
506
507 /* Return the union class of CLASS1 and CLASS2. See the psABI for
508 details. */
509
510 static enum amd64_reg_class
511 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
512 {
513 /* Rule (a): If both classes are equal, this is the resulting class. */
514 if (class1 == class2)
515 return class1;
516
517 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
518 is the other class. */
519 if (class1 == AMD64_NO_CLASS)
520 return class2;
521 if (class2 == AMD64_NO_CLASS)
522 return class1;
523
524 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
525 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
526 return AMD64_MEMORY;
527
528 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
529 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
530 return AMD64_INTEGER;
531
532 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
533 MEMORY is used as class. */
534 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
535 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
536 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
537 return AMD64_MEMORY;
538
539 /* Rule (f): Otherwise class SSE is used. */
540 return AMD64_SSE;
541 }
542
543 static void amd64_classify (struct type *type, enum amd64_reg_class theclass[2]);
544
545 /* Return true if TYPE is a structure or union with unaligned fields. */
546
547 static bool
548 amd64_has_unaligned_fields (struct type *type)
549 {
550 if (type->code () == TYPE_CODE_STRUCT
551 || type->code () == TYPE_CODE_UNION)
552 {
553 for (int i = 0; i < type->num_fields (); i++)
554 {
555 struct type *subtype = check_typedef (type->field (i).type ());
556 int bitpos = TYPE_FIELD_BITPOS (type, i);
557
558 /* Ignore static fields, empty fields (for example nested
559 empty structures), and bitfields (these are handled by
560 the caller). */
561 if (field_is_static (&type->field (i))
562 || (TYPE_FIELD_BITSIZE (type, i) == 0
563 && TYPE_LENGTH (subtype) == 0)
564 || TYPE_FIELD_PACKED (type, i))
565 continue;
566
567 if (bitpos % 8 != 0)
568 return true;
569
570 int align = type_align (subtype);
571 if (align == 0)
572 error (_("could not determine alignment of type"));
573
574 int bytepos = bitpos / 8;
575 if (bytepos % align != 0)
576 return true;
577
578 if (amd64_has_unaligned_fields (subtype))
579 return true;
580 }
581 }
582
583 return false;
584 }
585
586 /* Classify field I of TYPE starting at BITOFFSET according to the rules for
587 structures and union types, and store the result in THECLASS. */
588
589 static void
590 amd64_classify_aggregate_field (struct type *type, int i,
591 enum amd64_reg_class theclass[2],
592 unsigned int bitoffset)
593 {
594 struct type *subtype = check_typedef (type->field (i).type ());
595 int bitpos = bitoffset + TYPE_FIELD_BITPOS (type, i);
596 int pos = bitpos / 64;
597 enum amd64_reg_class subclass[2];
598 int bitsize = TYPE_FIELD_BITSIZE (type, i);
599 int endpos;
600
601 if (bitsize == 0)
602 bitsize = TYPE_LENGTH (subtype) * 8;
603 endpos = (bitpos + bitsize - 1) / 64;
604
605 /* Ignore static fields, or empty fields, for example nested
606 empty structures.*/
607 if (field_is_static (&type->field (i)) || bitsize == 0)
608 return;
609
610 if (subtype->code () == TYPE_CODE_STRUCT
611 || subtype->code () == TYPE_CODE_UNION)
612 {
613 /* Each field of an object is classified recursively. */
614 int j;
615 for (j = 0; j < subtype->num_fields (); j++)
616 amd64_classify_aggregate_field (subtype, j, theclass, bitpos);
617 return;
618 }
619
620 gdb_assert (pos == 0 || pos == 1);
621
622 amd64_classify (subtype, subclass);
623 theclass[pos] = amd64_merge_classes (theclass[pos], subclass[0]);
624 if (bitsize <= 64 && pos == 0 && endpos == 1)
625 /* This is a bit of an odd case: We have a field that would
626 normally fit in one of the two eightbytes, except that
627 it is placed in a way that this field straddles them.
628 This has been seen with a structure containing an array.
629
630 The ABI is a bit unclear in this case, but we assume that
631 this field's class (stored in subclass[0]) must also be merged
632 into class[1]. In other words, our field has a piece stored
633 in the second eight-byte, and thus its class applies to
634 the second eight-byte as well.
635
636 In the case where the field length exceeds 8 bytes,
637 it should not be necessary to merge the field class
638 into class[1]. As LEN > 8, subclass[1] is necessarily
639 different from AMD64_NO_CLASS. If subclass[1] is equal
640 to subclass[0], then the normal class[1]/subclass[1]
641 merging will take care of everything. For subclass[1]
642 to be different from subclass[0], I can only see the case
643 where we have a SSE/SSEUP or X87/X87UP pair, which both
644 use up all 16 bytes of the aggregate, and are already
645 handled just fine (because each portion sits on its own
646 8-byte). */
647 theclass[1] = amd64_merge_classes (theclass[1], subclass[0]);
648 if (pos == 0)
649 theclass[1] = amd64_merge_classes (theclass[1], subclass[1]);
650 }
651
652 /* Classify TYPE according to the rules for aggregate (structures and
653 arrays) and union types, and store the result in CLASS. */
654
655 static void
656 amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2])
657 {
658 /* 1. If the size of an object is larger than two eightbytes, or it has
659 unaligned fields, it has class memory. */
660 if (TYPE_LENGTH (type) > 16 || amd64_has_unaligned_fields (type))
661 {
662 theclass[0] = theclass[1] = AMD64_MEMORY;
663 return;
664 }
665
666 /* 2. Both eightbytes get initialized to class NO_CLASS. */
667 theclass[0] = theclass[1] = AMD64_NO_CLASS;
668
669 /* 3. Each field of an object is classified recursively so that
670 always two fields are considered. The resulting class is
671 calculated according to the classes of the fields in the
672 eightbyte: */
673
674 if (type->code () == TYPE_CODE_ARRAY)
675 {
676 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
677
678 /* All fields in an array have the same type. */
679 amd64_classify (subtype, theclass);
680 if (TYPE_LENGTH (type) > 8 && theclass[1] == AMD64_NO_CLASS)
681 theclass[1] = theclass[0];
682 }
683 else
684 {
685 int i;
686
687 /* Structure or union. */
688 gdb_assert (type->code () == TYPE_CODE_STRUCT
689 || type->code () == TYPE_CODE_UNION);
690
691 for (i = 0; i < type->num_fields (); i++)
692 amd64_classify_aggregate_field (type, i, theclass, 0);
693 }
694
695 /* 4. Then a post merger cleanup is done: */
696
697 /* Rule (a): If one of the classes is MEMORY, the whole argument is
698 passed in memory. */
699 if (theclass[0] == AMD64_MEMORY || theclass[1] == AMD64_MEMORY)
700 theclass[0] = theclass[1] = AMD64_MEMORY;
701
702 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
703 SSE. */
704 if (theclass[0] == AMD64_SSEUP)
705 theclass[0] = AMD64_SSE;
706 if (theclass[1] == AMD64_SSEUP && theclass[0] != AMD64_SSE)
707 theclass[1] = AMD64_SSE;
708 }
709
710 /* Classify TYPE, and store the result in CLASS. */
711
712 static void
713 amd64_classify (struct type *type, enum amd64_reg_class theclass[2])
714 {
715 enum type_code code = type->code ();
716 int len = TYPE_LENGTH (type);
717
718 theclass[0] = theclass[1] = AMD64_NO_CLASS;
719
720 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
721 long, long long, and pointers are in the INTEGER class. Similarly,
722 range types, used by languages such as Ada, are also in the INTEGER
723 class. */
724 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
725 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
726 || code == TYPE_CODE_CHAR
727 || code == TYPE_CODE_PTR || TYPE_IS_REFERENCE (type))
728 && (len == 1 || len == 2 || len == 4 || len == 8))
729 theclass[0] = AMD64_INTEGER;
730
731 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
732 are in class SSE. */
733 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
734 && (len == 4 || len == 8))
735 /* FIXME: __m64 . */
736 theclass[0] = AMD64_SSE;
737
738 /* Arguments of types __float128, _Decimal128 and __m128 are split into
739 two halves. The least significant ones belong to class SSE, the most
740 significant one to class SSEUP. */
741 else if (code == TYPE_CODE_DECFLOAT && len == 16)
742 /* FIXME: __float128, __m128. */
743 theclass[0] = AMD64_SSE, theclass[1] = AMD64_SSEUP;
744
745 /* The 64-bit mantissa of arguments of type long double belongs to
746 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
747 class X87UP. */
748 else if (code == TYPE_CODE_FLT && len == 16)
749 /* Class X87 and X87UP. */
750 theclass[0] = AMD64_X87, theclass[1] = AMD64_X87UP;
751
752 /* Arguments of complex T where T is one of the types float or
753 double get treated as if they are implemented as:
754
755 struct complexT {
756 T real;
757 T imag;
758 };
759
760 */
761 else if (code == TYPE_CODE_COMPLEX && len == 8)
762 theclass[0] = AMD64_SSE;
763 else if (code == TYPE_CODE_COMPLEX && len == 16)
764 theclass[0] = theclass[1] = AMD64_SSE;
765
766 /* A variable of type complex long double is classified as type
767 COMPLEX_X87. */
768 else if (code == TYPE_CODE_COMPLEX && len == 32)
769 theclass[0] = AMD64_COMPLEX_X87;
770
771 /* Aggregates. */
772 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
773 || code == TYPE_CODE_UNION)
774 amd64_classify_aggregate (type, theclass);
775 }
776
777 static enum return_value_convention
778 amd64_return_value (struct gdbarch *gdbarch, struct value *function,
779 struct type *type, struct regcache *regcache,
780 gdb_byte *readbuf, const gdb_byte *writebuf)
781 {
782 enum amd64_reg_class theclass[2];
783 int len = TYPE_LENGTH (type);
784 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
785 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
786 int integer_reg = 0;
787 int sse_reg = 0;
788 int i;
789
790 gdb_assert (!(readbuf && writebuf));
791
792 /* 1. Classify the return type with the classification algorithm. */
793 amd64_classify (type, theclass);
794
795 /* 2. If the type has class MEMORY, then the caller provides space
796 for the return value and passes the address of this storage in
797 %rdi as if it were the first argument to the function. In effect,
798 this address becomes a hidden first argument.
799
800 On return %rax will contain the address that has been passed in
801 by the caller in %rdi. */
802 if (theclass[0] == AMD64_MEMORY)
803 {
804 /* As indicated by the comment above, the ABI guarantees that we
805 can always find the return value just after the function has
806 returned. */
807
808 if (readbuf)
809 {
810 ULONGEST addr;
811
812 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
813 read_memory (addr, readbuf, TYPE_LENGTH (type));
814 }
815
816 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
817 }
818
819 /* 8. If the class is COMPLEX_X87, the real part of the value is
820 returned in %st0 and the imaginary part in %st1. */
821 if (theclass[0] == AMD64_COMPLEX_X87)
822 {
823 if (readbuf)
824 {
825 regcache->raw_read (AMD64_ST0_REGNUM, readbuf);
826 regcache->raw_read (AMD64_ST1_REGNUM, readbuf + 16);
827 }
828
829 if (writebuf)
830 {
831 i387_return_value (gdbarch, regcache);
832 regcache->raw_write (AMD64_ST0_REGNUM, writebuf);
833 regcache->raw_write (AMD64_ST1_REGNUM, writebuf + 16);
834
835 /* Fix up the tag word such that both %st(0) and %st(1) are
836 marked as valid. */
837 regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
838 }
839
840 return RETURN_VALUE_REGISTER_CONVENTION;
841 }
842
843 gdb_assert (theclass[1] != AMD64_MEMORY);
844 gdb_assert (len <= 16);
845
846 for (i = 0; len > 0; i++, len -= 8)
847 {
848 int regnum = -1;
849 int offset = 0;
850
851 switch (theclass[i])
852 {
853 case AMD64_INTEGER:
854 /* 3. If the class is INTEGER, the next available register
855 of the sequence %rax, %rdx is used. */
856 regnum = integer_regnum[integer_reg++];
857 break;
858
859 case AMD64_SSE:
860 /* 4. If the class is SSE, the next available SSE register
861 of the sequence %xmm0, %xmm1 is used. */
862 regnum = sse_regnum[sse_reg++];
863 break;
864
865 case AMD64_SSEUP:
866 /* 5. If the class is SSEUP, the eightbyte is passed in the
867 upper half of the last used SSE register. */
868 gdb_assert (sse_reg > 0);
869 regnum = sse_regnum[sse_reg - 1];
870 offset = 8;
871 break;
872
873 case AMD64_X87:
874 /* 6. If the class is X87, the value is returned on the X87
875 stack in %st0 as 80-bit x87 number. */
876 regnum = AMD64_ST0_REGNUM;
877 if (writebuf)
878 i387_return_value (gdbarch, regcache);
879 break;
880
881 case AMD64_X87UP:
882 /* 7. If the class is X87UP, the value is returned together
883 with the previous X87 value in %st0. */
884 gdb_assert (i > 0 && theclass[0] == AMD64_X87);
885 regnum = AMD64_ST0_REGNUM;
886 offset = 8;
887 len = 2;
888 break;
889
890 case AMD64_NO_CLASS:
891 continue;
892
893 default:
894 gdb_assert (!"Unexpected register class.");
895 }
896
897 gdb_assert (regnum != -1);
898
899 if (readbuf)
900 regcache->raw_read_part (regnum, offset, std::min (len, 8),
901 readbuf + i * 8);
902 if (writebuf)
903 regcache->raw_write_part (regnum, offset, std::min (len, 8),
904 writebuf + i * 8);
905 }
906
907 return RETURN_VALUE_REGISTER_CONVENTION;
908 }
909 \f
910
911 static CORE_ADDR
912 amd64_push_arguments (struct regcache *regcache, int nargs, struct value **args,
913 CORE_ADDR sp, function_call_return_method return_method)
914 {
915 static int integer_regnum[] =
916 {
917 AMD64_RDI_REGNUM, /* %rdi */
918 AMD64_RSI_REGNUM, /* %rsi */
919 AMD64_RDX_REGNUM, /* %rdx */
920 AMD64_RCX_REGNUM, /* %rcx */
921 AMD64_R8_REGNUM, /* %r8 */
922 AMD64_R9_REGNUM /* %r9 */
923 };
924 static int sse_regnum[] =
925 {
926 /* %xmm0 ... %xmm7 */
927 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
928 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
929 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
930 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
931 };
932 struct value **stack_args = XALLOCAVEC (struct value *, nargs);
933 int num_stack_args = 0;
934 int num_elements = 0;
935 int element = 0;
936 int integer_reg = 0;
937 int sse_reg = 0;
938 int i;
939
940 /* Reserve a register for the "hidden" argument. */
941 if (return_method == return_method_struct)
942 integer_reg++;
943
944 for (i = 0; i < nargs; i++)
945 {
946 struct type *type = value_type (args[i]);
947 int len = TYPE_LENGTH (type);
948 enum amd64_reg_class theclass[2];
949 int needed_integer_regs = 0;
950 int needed_sse_regs = 0;
951 int j;
952
953 /* Classify argument. */
954 amd64_classify (type, theclass);
955
956 /* Calculate the number of integer and SSE registers needed for
957 this argument. */
958 for (j = 0; j < 2; j++)
959 {
960 if (theclass[j] == AMD64_INTEGER)
961 needed_integer_regs++;
962 else if (theclass[j] == AMD64_SSE)
963 needed_sse_regs++;
964 }
965
966 /* Check whether enough registers are available, and if the
967 argument should be passed in registers at all. */
968 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
969 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
970 || (needed_integer_regs == 0 && needed_sse_regs == 0))
971 {
972 /* The argument will be passed on the stack. */
973 num_elements += ((len + 7) / 8);
974 stack_args[num_stack_args++] = args[i];
975 }
976 else
977 {
978 /* The argument will be passed in registers. */
979 const gdb_byte *valbuf = value_contents (args[i]);
980 gdb_byte buf[8];
981
982 gdb_assert (len <= 16);
983
984 for (j = 0; len > 0; j++, len -= 8)
985 {
986 int regnum = -1;
987 int offset = 0;
988
989 switch (theclass[j])
990 {
991 case AMD64_INTEGER:
992 regnum = integer_regnum[integer_reg++];
993 break;
994
995 case AMD64_SSE:
996 regnum = sse_regnum[sse_reg++];
997 break;
998
999 case AMD64_SSEUP:
1000 gdb_assert (sse_reg > 0);
1001 regnum = sse_regnum[sse_reg - 1];
1002 offset = 8;
1003 break;
1004
1005 case AMD64_NO_CLASS:
1006 continue;
1007
1008 default:
1009 gdb_assert (!"Unexpected register class.");
1010 }
1011
1012 gdb_assert (regnum != -1);
1013 memset (buf, 0, sizeof buf);
1014 memcpy (buf, valbuf + j * 8, std::min (len, 8));
1015 regcache->raw_write_part (regnum, offset, 8, buf);
1016 }
1017 }
1018 }
1019
1020 /* Allocate space for the arguments on the stack. */
1021 sp -= num_elements * 8;
1022
1023 /* The psABI says that "The end of the input argument area shall be
1024 aligned on a 16 byte boundary." */
1025 sp &= ~0xf;
1026
1027 /* Write out the arguments to the stack. */
1028 for (i = 0; i < num_stack_args; i++)
1029 {
1030 struct type *type = value_type (stack_args[i]);
1031 const gdb_byte *valbuf = value_contents (stack_args[i]);
1032 int len = TYPE_LENGTH (type);
1033
1034 write_memory (sp + element * 8, valbuf, len);
1035 element += ((len + 7) / 8);
1036 }
1037
1038 /* The psABI says that "For calls that may call functions that use
1039 varargs or stdargs (prototype-less calls or calls to functions
1040 containing ellipsis (...) in the declaration) %al is used as
1041 hidden argument to specify the number of SSE registers used. */
1042 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
1043 return sp;
1044 }
1045
1046 static CORE_ADDR
1047 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1048 struct regcache *regcache, CORE_ADDR bp_addr,
1049 int nargs, struct value **args, CORE_ADDR sp,
1050 function_call_return_method return_method,
1051 CORE_ADDR struct_addr)
1052 {
1053 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1054 gdb_byte buf[8];
1055
1056 /* BND registers can be in arbitrary values at the moment of the
1057 inferior call. This can cause boundary violations that are not
1058 due to a real bug or even desired by the user. The best to be done
1059 is set the BND registers to allow access to the whole memory, INIT
1060 state, before pushing the inferior call. */
1061 i387_reset_bnd_regs (gdbarch, regcache);
1062
1063 /* Pass arguments. */
1064 sp = amd64_push_arguments (regcache, nargs, args, sp, return_method);
1065
1066 /* Pass "hidden" argument". */
1067 if (return_method == return_method_struct)
1068 {
1069 store_unsigned_integer (buf, 8, byte_order, struct_addr);
1070 regcache->cooked_write (AMD64_RDI_REGNUM, buf);
1071 }
1072
1073 /* Store return address. */
1074 sp -= 8;
1075 store_unsigned_integer (buf, 8, byte_order, bp_addr);
1076 write_memory (sp, buf, 8);
1077
1078 /* Finally, update the stack pointer... */
1079 store_unsigned_integer (buf, 8, byte_order, sp);
1080 regcache->cooked_write (AMD64_RSP_REGNUM, buf);
1081
1082 /* ...and fake a frame pointer. */
1083 regcache->cooked_write (AMD64_RBP_REGNUM, buf);
1084
1085 return sp + 16;
1086 }
1087 \f
1088 /* Displaced instruction handling. */
1089
1090 /* A partially decoded instruction.
1091 This contains enough details for displaced stepping purposes. */
1092
1093 struct amd64_insn
1094 {
1095 /* The number of opcode bytes. */
1096 int opcode_len;
1097 /* The offset of the REX/VEX instruction encoding prefix or -1 if
1098 not present. */
1099 int enc_prefix_offset;
1100 /* The offset to the first opcode byte. */
1101 int opcode_offset;
1102 /* The offset to the modrm byte or -1 if not present. */
1103 int modrm_offset;
1104
1105 /* The raw instruction. */
1106 gdb_byte *raw_insn;
1107 };
1108
1109 struct amd64_displaced_step_copy_insn_closure
1110 : public displaced_step_copy_insn_closure
1111 {
1112 amd64_displaced_step_copy_insn_closure (int insn_buf_len)
1113 : insn_buf (insn_buf_len, 0)
1114 {}
1115
1116 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1117 int tmp_used = 0;
1118 int tmp_regno;
1119 ULONGEST tmp_save;
1120
1121 /* Details of the instruction. */
1122 struct amd64_insn insn_details;
1123
1124 /* The possibly modified insn. */
1125 gdb::byte_vector insn_buf;
1126 };
1127
1128 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1129 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1130 at which point delete these in favor of libopcodes' versions). */
1131
1132 static const unsigned char onebyte_has_modrm[256] = {
1133 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1134 /* ------------------------------- */
1135 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1136 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1137 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1138 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1139 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1140 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1141 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1142 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1143 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1144 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1145 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1146 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1147 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1148 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1149 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1150 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1151 /* ------------------------------- */
1152 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1153 };
1154
1155 static const unsigned char twobyte_has_modrm[256] = {
1156 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1157 /* ------------------------------- */
1158 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1159 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1160 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1161 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1162 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1163 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1164 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1165 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1166 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1167 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1168 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1169 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1170 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1171 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1172 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1173 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1174 /* ------------------------------- */
1175 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1176 };
1177
1178 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
1179
1180 static int
1181 rex_prefix_p (gdb_byte pfx)
1182 {
1183 return REX_PREFIX_P (pfx);
1184 }
1185
1186 /* True if PFX is the start of the 2-byte VEX prefix. */
1187
1188 static bool
1189 vex2_prefix_p (gdb_byte pfx)
1190 {
1191 return pfx == 0xc5;
1192 }
1193
1194 /* True if PFX is the start of the 3-byte VEX prefix. */
1195
1196 static bool
1197 vex3_prefix_p (gdb_byte pfx)
1198 {
1199 return pfx == 0xc4;
1200 }
1201
1202 /* Skip the legacy instruction prefixes in INSN.
1203 We assume INSN is properly sentineled so we don't have to worry
1204 about falling off the end of the buffer. */
1205
1206 static gdb_byte *
1207 amd64_skip_prefixes (gdb_byte *insn)
1208 {
1209 while (1)
1210 {
1211 switch (*insn)
1212 {
1213 case DATA_PREFIX_OPCODE:
1214 case ADDR_PREFIX_OPCODE:
1215 case CS_PREFIX_OPCODE:
1216 case DS_PREFIX_OPCODE:
1217 case ES_PREFIX_OPCODE:
1218 case FS_PREFIX_OPCODE:
1219 case GS_PREFIX_OPCODE:
1220 case SS_PREFIX_OPCODE:
1221 case LOCK_PREFIX_OPCODE:
1222 case REPE_PREFIX_OPCODE:
1223 case REPNE_PREFIX_OPCODE:
1224 ++insn;
1225 continue;
1226 default:
1227 break;
1228 }
1229 break;
1230 }
1231
1232 return insn;
1233 }
1234
1235 /* Return an integer register (other than RSP) that is unused as an input
1236 operand in INSN.
1237 In order to not require adding a rex prefix if the insn doesn't already
1238 have one, the result is restricted to RAX ... RDI, sans RSP.
1239 The register numbering of the result follows architecture ordering,
1240 e.g. RDI = 7. */
1241
1242 static int
1243 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1244 {
1245 /* 1 bit for each reg */
1246 int used_regs_mask = 0;
1247
1248 /* There can be at most 3 int regs used as inputs in an insn, and we have
1249 7 to choose from (RAX ... RDI, sans RSP).
1250 This allows us to take a conservative approach and keep things simple.
1251 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1252 that implicitly specify RAX. */
1253
1254 /* Avoid RAX. */
1255 used_regs_mask |= 1 << EAX_REG_NUM;
1256 /* Similarily avoid RDX, implicit operand in divides. */
1257 used_regs_mask |= 1 << EDX_REG_NUM;
1258 /* Avoid RSP. */
1259 used_regs_mask |= 1 << ESP_REG_NUM;
1260
1261 /* If the opcode is one byte long and there's no ModRM byte,
1262 assume the opcode specifies a register. */
1263 if (details->opcode_len == 1 && details->modrm_offset == -1)
1264 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1265
1266 /* Mark used regs in the modrm/sib bytes. */
1267 if (details->modrm_offset != -1)
1268 {
1269 int modrm = details->raw_insn[details->modrm_offset];
1270 int mod = MODRM_MOD_FIELD (modrm);
1271 int reg = MODRM_REG_FIELD (modrm);
1272 int rm = MODRM_RM_FIELD (modrm);
1273 int have_sib = mod != 3 && rm == 4;
1274
1275 /* Assume the reg field of the modrm byte specifies a register. */
1276 used_regs_mask |= 1 << reg;
1277
1278 if (have_sib)
1279 {
1280 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1281 int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1282 used_regs_mask |= 1 << base;
1283 used_regs_mask |= 1 << idx;
1284 }
1285 else
1286 {
1287 used_regs_mask |= 1 << rm;
1288 }
1289 }
1290
1291 gdb_assert (used_regs_mask < 256);
1292 gdb_assert (used_regs_mask != 255);
1293
1294 /* Finally, find a free reg. */
1295 {
1296 int i;
1297
1298 for (i = 0; i < 8; ++i)
1299 {
1300 if (! (used_regs_mask & (1 << i)))
1301 return i;
1302 }
1303
1304 /* We shouldn't get here. */
1305 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1306 }
1307 }
1308
1309 /* Extract the details of INSN that we need. */
1310
1311 static void
1312 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1313 {
1314 gdb_byte *start = insn;
1315 int need_modrm;
1316
1317 details->raw_insn = insn;
1318
1319 details->opcode_len = -1;
1320 details->enc_prefix_offset = -1;
1321 details->opcode_offset = -1;
1322 details->modrm_offset = -1;
1323
1324 /* Skip legacy instruction prefixes. */
1325 insn = amd64_skip_prefixes (insn);
1326
1327 /* Skip REX/VEX instruction encoding prefixes. */
1328 if (rex_prefix_p (*insn))
1329 {
1330 details->enc_prefix_offset = insn - start;
1331 ++insn;
1332 }
1333 else if (vex2_prefix_p (*insn))
1334 {
1335 /* Don't record the offset in this case because this prefix has
1336 no REX.B equivalent. */
1337 insn += 2;
1338 }
1339 else if (vex3_prefix_p (*insn))
1340 {
1341 details->enc_prefix_offset = insn - start;
1342 insn += 3;
1343 }
1344
1345 details->opcode_offset = insn - start;
1346
1347 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1348 {
1349 /* Two or three-byte opcode. */
1350 ++insn;
1351 need_modrm = twobyte_has_modrm[*insn];
1352
1353 /* Check for three-byte opcode. */
1354 switch (*insn)
1355 {
1356 case 0x24:
1357 case 0x25:
1358 case 0x38:
1359 case 0x3a:
1360 case 0x7a:
1361 case 0x7b:
1362 ++insn;
1363 details->opcode_len = 3;
1364 break;
1365 default:
1366 details->opcode_len = 2;
1367 break;
1368 }
1369 }
1370 else
1371 {
1372 /* One-byte opcode. */
1373 need_modrm = onebyte_has_modrm[*insn];
1374 details->opcode_len = 1;
1375 }
1376
1377 if (need_modrm)
1378 {
1379 ++insn;
1380 details->modrm_offset = insn - start;
1381 }
1382 }
1383
1384 /* Update %rip-relative addressing in INSN.
1385
1386 %rip-relative addressing only uses a 32-bit displacement.
1387 32 bits is not enough to be guaranteed to cover the distance between where
1388 the real instruction is and where its copy is.
1389 Convert the insn to use base+disp addressing.
1390 We set base = pc + insn_length so we can leave disp unchanged. */
1391
1392 static void
1393 fixup_riprel (struct gdbarch *gdbarch,
1394 amd64_displaced_step_copy_insn_closure *dsc,
1395 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1396 {
1397 const struct amd64_insn *insn_details = &dsc->insn_details;
1398 int modrm_offset = insn_details->modrm_offset;
1399 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1400 CORE_ADDR rip_base;
1401 int insn_length;
1402 int arch_tmp_regno, tmp_regno;
1403 ULONGEST orig_value;
1404
1405 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1406 ++insn;
1407
1408 /* Compute the rip-relative address. */
1409 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf.data (),
1410 dsc->insn_buf.size (), from);
1411 rip_base = from + insn_length;
1412
1413 /* We need a register to hold the address.
1414 Pick one not used in the insn.
1415 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1416 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1417 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1418
1419 /* Position of the not-B bit in the 3-byte VEX prefix (in byte 1). */
1420 static constexpr gdb_byte VEX3_NOT_B = 0x20;
1421
1422 /* REX.B should be unset (VEX.!B set) as we were using rip-relative
1423 addressing, but ensure it's unset (set for VEX) anyway, tmp_regno
1424 is not r8-r15. */
1425 if (insn_details->enc_prefix_offset != -1)
1426 {
1427 gdb_byte *pfx = &dsc->insn_buf[insn_details->enc_prefix_offset];
1428 if (rex_prefix_p (pfx[0]))
1429 pfx[0] &= ~REX_B;
1430 else if (vex3_prefix_p (pfx[0]))
1431 pfx[1] |= VEX3_NOT_B;
1432 else
1433 gdb_assert_not_reached ("unhandled prefix");
1434 }
1435
1436 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1437 dsc->tmp_regno = tmp_regno;
1438 dsc->tmp_save = orig_value;
1439 dsc->tmp_used = 1;
1440
1441 /* Convert the ModRM field to be base+disp. */
1442 dsc->insn_buf[modrm_offset] &= ~0xc7;
1443 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1444
1445 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1446
1447 displaced_debug_printf ("%%rip-relative addressing used.");
1448 displaced_debug_printf ("using temp reg %d, old value %s, new value %s",
1449 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1450 paddress (gdbarch, rip_base));
1451 }
1452
1453 static void
1454 fixup_displaced_copy (struct gdbarch *gdbarch,
1455 amd64_displaced_step_copy_insn_closure *dsc,
1456 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1457 {
1458 const struct amd64_insn *details = &dsc->insn_details;
1459
1460 if (details->modrm_offset != -1)
1461 {
1462 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1463
1464 if ((modrm & 0xc7) == 0x05)
1465 {
1466 /* The insn uses rip-relative addressing.
1467 Deal with it. */
1468 fixup_riprel (gdbarch, dsc, from, to, regs);
1469 }
1470 }
1471 }
1472
1473 displaced_step_copy_insn_closure_up
1474 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1475 CORE_ADDR from, CORE_ADDR to,
1476 struct regcache *regs)
1477 {
1478 int len = gdbarch_max_insn_length (gdbarch);
1479 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
1480 continually watch for running off the end of the buffer. */
1481 int fixup_sentinel_space = len;
1482 std::unique_ptr<amd64_displaced_step_copy_insn_closure> dsc
1483 (new amd64_displaced_step_copy_insn_closure (len + fixup_sentinel_space));
1484 gdb_byte *buf = &dsc->insn_buf[0];
1485 struct amd64_insn *details = &dsc->insn_details;
1486
1487 read_memory (from, buf, len);
1488
1489 /* Set up the sentinel space so we don't have to worry about running
1490 off the end of the buffer. An excessive number of leading prefixes
1491 could otherwise cause this. */
1492 memset (buf + len, 0, fixup_sentinel_space);
1493
1494 amd64_get_insn_details (buf, details);
1495
1496 /* GDB may get control back after the insn after the syscall.
1497 Presumably this is a kernel bug.
1498 If this is a syscall, make sure there's a nop afterwards. */
1499 {
1500 int syscall_length;
1501
1502 if (amd64_syscall_p (details, &syscall_length))
1503 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1504 }
1505
1506 /* Modify the insn to cope with the address where it will be executed from.
1507 In particular, handle any rip-relative addressing. */
1508 fixup_displaced_copy (gdbarch, dsc.get (), from, to, regs);
1509
1510 write_memory (to, buf, len);
1511
1512 displaced_debug_printf ("copy %s->%s: %s",
1513 paddress (gdbarch, from), paddress (gdbarch, to),
1514 displaced_step_dump_bytes (buf, len).c_str ());
1515
1516 /* This is a work around for a problem with g++ 4.8. */
1517 return displaced_step_copy_insn_closure_up (dsc.release ());
1518 }
1519
1520 static int
1521 amd64_absolute_jmp_p (const struct amd64_insn *details)
1522 {
1523 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1524
1525 if (insn[0] == 0xff)
1526 {
1527 /* jump near, absolute indirect (/4) */
1528 if ((insn[1] & 0x38) == 0x20)
1529 return 1;
1530
1531 /* jump far, absolute indirect (/5) */
1532 if ((insn[1] & 0x38) == 0x28)
1533 return 1;
1534 }
1535
1536 return 0;
1537 }
1538
1539 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1540
1541 static int
1542 amd64_jmp_p (const struct amd64_insn *details)
1543 {
1544 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1545
1546 /* jump short, relative. */
1547 if (insn[0] == 0xeb)
1548 return 1;
1549
1550 /* jump near, relative. */
1551 if (insn[0] == 0xe9)
1552 return 1;
1553
1554 return amd64_absolute_jmp_p (details);
1555 }
1556
1557 static int
1558 amd64_absolute_call_p (const struct amd64_insn *details)
1559 {
1560 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1561
1562 if (insn[0] == 0xff)
1563 {
1564 /* Call near, absolute indirect (/2) */
1565 if ((insn[1] & 0x38) == 0x10)
1566 return 1;
1567
1568 /* Call far, absolute indirect (/3) */
1569 if ((insn[1] & 0x38) == 0x18)
1570 return 1;
1571 }
1572
1573 return 0;
1574 }
1575
1576 static int
1577 amd64_ret_p (const struct amd64_insn *details)
1578 {
1579 /* NOTE: gcc can emit "repz ; ret". */
1580 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1581
1582 switch (insn[0])
1583 {
1584 case 0xc2: /* ret near, pop N bytes */
1585 case 0xc3: /* ret near */
1586 case 0xca: /* ret far, pop N bytes */
1587 case 0xcb: /* ret far */
1588 case 0xcf: /* iret */
1589 return 1;
1590
1591 default:
1592 return 0;
1593 }
1594 }
1595
1596 static int
1597 amd64_call_p (const struct amd64_insn *details)
1598 {
1599 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1600
1601 if (amd64_absolute_call_p (details))
1602 return 1;
1603
1604 /* call near, relative */
1605 if (insn[0] == 0xe8)
1606 return 1;
1607
1608 return 0;
1609 }
1610
1611 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1612 length in bytes. Otherwise, return zero. */
1613
1614 static int
1615 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1616 {
1617 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1618
1619 if (insn[0] == 0x0f && insn[1] == 0x05)
1620 {
1621 *lengthp = 2;
1622 return 1;
1623 }
1624
1625 return 0;
1626 }
1627
1628 /* Classify the instruction at ADDR using PRED.
1629 Throw an error if the memory can't be read. */
1630
1631 static int
1632 amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
1633 int (*pred) (const struct amd64_insn *))
1634 {
1635 struct amd64_insn details;
1636 gdb_byte *buf;
1637 int len, classification;
1638
1639 len = gdbarch_max_insn_length (gdbarch);
1640 buf = (gdb_byte *) alloca (len);
1641
1642 read_code (addr, buf, len);
1643 amd64_get_insn_details (buf, &details);
1644
1645 classification = pred (&details);
1646
1647 return classification;
1648 }
1649
1650 /* The gdbarch insn_is_call method. */
1651
1652 static int
1653 amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
1654 {
1655 return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
1656 }
1657
1658 /* The gdbarch insn_is_ret method. */
1659
1660 static int
1661 amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
1662 {
1663 return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
1664 }
1665
1666 /* The gdbarch insn_is_jump method. */
1667
1668 static int
1669 amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
1670 {
1671 return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
1672 }
1673
1674 /* Fix up the state of registers and memory after having single-stepped
1675 a displaced instruction. */
1676
1677 void
1678 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1679 struct displaced_step_copy_insn_closure *dsc_,
1680 CORE_ADDR from, CORE_ADDR to,
1681 struct regcache *regs)
1682 {
1683 amd64_displaced_step_copy_insn_closure *dsc
1684 = (amd64_displaced_step_copy_insn_closure *) dsc_;
1685 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1686 /* The offset we applied to the instruction's address. */
1687 ULONGEST insn_offset = to - from;
1688 gdb_byte *insn = dsc->insn_buf.data ();
1689 const struct amd64_insn *insn_details = &dsc->insn_details;
1690
1691 displaced_debug_printf ("fixup (%s, %s), insn = 0x%02x 0x%02x ...",
1692 paddress (gdbarch, from), paddress (gdbarch, to),
1693 insn[0], insn[1]);
1694
1695 /* If we used a tmp reg, restore it. */
1696
1697 if (dsc->tmp_used)
1698 {
1699 displaced_debug_printf ("restoring reg %d to %s",
1700 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1701 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1702 }
1703
1704 /* The list of issues to contend with here is taken from
1705 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1706 Yay for Free Software! */
1707
1708 /* Relocate the %rip back to the program's instruction stream,
1709 if necessary. */
1710
1711 /* Except in the case of absolute or indirect jump or call
1712 instructions, or a return instruction, the new rip is relative to
1713 the displaced instruction; make it relative to the original insn.
1714 Well, signal handler returns don't need relocation either, but we use the
1715 value of %rip to recognize those; see below. */
1716 if (! amd64_absolute_jmp_p (insn_details)
1717 && ! amd64_absolute_call_p (insn_details)
1718 && ! amd64_ret_p (insn_details))
1719 {
1720 ULONGEST orig_rip;
1721 int insn_len;
1722
1723 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1724
1725 /* A signal trampoline system call changes the %rip, resuming
1726 execution of the main program after the signal handler has
1727 returned. That makes them like 'return' instructions; we
1728 shouldn't relocate %rip.
1729
1730 But most system calls don't, and we do need to relocate %rip.
1731
1732 Our heuristic for distinguishing these cases: if stepping
1733 over the system call instruction left control directly after
1734 the instruction, the we relocate --- control almost certainly
1735 doesn't belong in the displaced copy. Otherwise, we assume
1736 the instruction has put control where it belongs, and leave
1737 it unrelocated. Goodness help us if there are PC-relative
1738 system calls. */
1739 if (amd64_syscall_p (insn_details, &insn_len)
1740 && orig_rip != to + insn_len
1741 /* GDB can get control back after the insn after the syscall.
1742 Presumably this is a kernel bug.
1743 Fixup ensures its a nop, we add one to the length for it. */
1744 && orig_rip != to + insn_len + 1)
1745 displaced_debug_printf ("syscall changed %%rip; not relocating");
1746 else
1747 {
1748 ULONGEST rip = orig_rip - insn_offset;
1749
1750 /* If we just stepped over a breakpoint insn, we don't backup
1751 the pc on purpose; this is to match behaviour without
1752 stepping. */
1753
1754 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1755
1756 displaced_debug_printf ("relocated %%rip from %s to %s",
1757 paddress (gdbarch, orig_rip),
1758 paddress (gdbarch, rip));
1759 }
1760 }
1761
1762 /* If the instruction was PUSHFL, then the TF bit will be set in the
1763 pushed value, and should be cleared. We'll leave this for later,
1764 since GDB already messes up the TF flag when stepping over a
1765 pushfl. */
1766
1767 /* If the instruction was a call, the return address now atop the
1768 stack is the address following the copied instruction. We need
1769 to make it the address following the original instruction. */
1770 if (amd64_call_p (insn_details))
1771 {
1772 ULONGEST rsp;
1773 ULONGEST retaddr;
1774 const ULONGEST retaddr_len = 8;
1775
1776 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1777 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1778 retaddr = (retaddr - insn_offset) & 0xffffffffffffffffULL;
1779 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1780
1781 displaced_debug_printf ("relocated return addr at %s to %s",
1782 paddress (gdbarch, rsp),
1783 paddress (gdbarch, retaddr));
1784 }
1785 }
1786
1787 /* If the instruction INSN uses RIP-relative addressing, return the
1788 offset into the raw INSN where the displacement to be adjusted is
1789 found. Returns 0 if the instruction doesn't use RIP-relative
1790 addressing. */
1791
1792 static int
1793 rip_relative_offset (struct amd64_insn *insn)
1794 {
1795 if (insn->modrm_offset != -1)
1796 {
1797 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1798
1799 if ((modrm & 0xc7) == 0x05)
1800 {
1801 /* The displacement is found right after the ModRM byte. */
1802 return insn->modrm_offset + 1;
1803 }
1804 }
1805
1806 return 0;
1807 }
1808
1809 static void
1810 append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1811 {
1812 target_write_memory (*to, buf, len);
1813 *to += len;
1814 }
1815
1816 static void
1817 amd64_relocate_instruction (struct gdbarch *gdbarch,
1818 CORE_ADDR *to, CORE_ADDR oldloc)
1819 {
1820 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1821 int len = gdbarch_max_insn_length (gdbarch);
1822 /* Extra space for sentinels. */
1823 int fixup_sentinel_space = len;
1824 gdb_byte *buf = (gdb_byte *) xmalloc (len + fixup_sentinel_space);
1825 struct amd64_insn insn_details;
1826 int offset = 0;
1827 LONGEST rel32, newrel;
1828 gdb_byte *insn;
1829 int insn_length;
1830
1831 read_memory (oldloc, buf, len);
1832
1833 /* Set up the sentinel space so we don't have to worry about running
1834 off the end of the buffer. An excessive number of leading prefixes
1835 could otherwise cause this. */
1836 memset (buf + len, 0, fixup_sentinel_space);
1837
1838 insn = buf;
1839 amd64_get_insn_details (insn, &insn_details);
1840
1841 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1842
1843 /* Skip legacy instruction prefixes. */
1844 insn = amd64_skip_prefixes (insn);
1845
1846 /* Adjust calls with 32-bit relative addresses as push/jump, with
1847 the address pushed being the location where the original call in
1848 the user program would return to. */
1849 if (insn[0] == 0xe8)
1850 {
1851 gdb_byte push_buf[32];
1852 CORE_ADDR ret_addr;
1853 int i = 0;
1854
1855 /* Where "ret" in the original code will return to. */
1856 ret_addr = oldloc + insn_length;
1857
1858 /* If pushing an address higher than or equal to 0x80000000,
1859 avoid 'pushq', as that sign extends its 32-bit operand, which
1860 would be incorrect. */
1861 if (ret_addr <= 0x7fffffff)
1862 {
1863 push_buf[0] = 0x68; /* pushq $... */
1864 store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
1865 i = 5;
1866 }
1867 else
1868 {
1869 push_buf[i++] = 0x48; /* sub $0x8,%rsp */
1870 push_buf[i++] = 0x83;
1871 push_buf[i++] = 0xec;
1872 push_buf[i++] = 0x08;
1873
1874 push_buf[i++] = 0xc7; /* movl $imm,(%rsp) */
1875 push_buf[i++] = 0x04;
1876 push_buf[i++] = 0x24;
1877 store_unsigned_integer (&push_buf[i], 4, byte_order,
1878 ret_addr & 0xffffffff);
1879 i += 4;
1880
1881 push_buf[i++] = 0xc7; /* movl $imm,4(%rsp) */
1882 push_buf[i++] = 0x44;
1883 push_buf[i++] = 0x24;
1884 push_buf[i++] = 0x04;
1885 store_unsigned_integer (&push_buf[i], 4, byte_order,
1886 ret_addr >> 32);
1887 i += 4;
1888 }
1889 gdb_assert (i <= sizeof (push_buf));
1890 /* Push the push. */
1891 append_insns (to, i, push_buf);
1892
1893 /* Convert the relative call to a relative jump. */
1894 insn[0] = 0xe9;
1895
1896 /* Adjust the destination offset. */
1897 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1898 newrel = (oldloc - *to) + rel32;
1899 store_signed_integer (insn + 1, 4, byte_order, newrel);
1900
1901 displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
1902 hex_string (rel32), paddress (gdbarch, oldloc),
1903 hex_string (newrel), paddress (gdbarch, *to));
1904
1905 /* Write the adjusted jump into its displaced location. */
1906 append_insns (to, 5, insn);
1907 return;
1908 }
1909
1910 offset = rip_relative_offset (&insn_details);
1911 if (!offset)
1912 {
1913 /* Adjust jumps with 32-bit relative addresses. Calls are
1914 already handled above. */
1915 if (insn[0] == 0xe9)
1916 offset = 1;
1917 /* Adjust conditional jumps. */
1918 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1919 offset = 2;
1920 }
1921
1922 if (offset)
1923 {
1924 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1925 newrel = (oldloc - *to) + rel32;
1926 store_signed_integer (insn + offset, 4, byte_order, newrel);
1927 displaced_debug_printf ("adjusted insn rel32=%s at %s to rel32=%s at %s",
1928 hex_string (rel32), paddress (gdbarch, oldloc),
1929 hex_string (newrel), paddress (gdbarch, *to));
1930 }
1931
1932 /* Write the adjusted instruction into its displaced location. */
1933 append_insns (to, insn_length, buf);
1934 }
1935
1936 \f
1937 /* The maximum number of saved registers. This should include %rip. */
1938 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1939
1940 struct amd64_frame_cache
1941 {
1942 /* Base address. */
1943 CORE_ADDR base;
1944 int base_p;
1945 CORE_ADDR sp_offset;
1946 CORE_ADDR pc;
1947
1948 /* Saved registers. */
1949 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1950 CORE_ADDR saved_sp;
1951 int saved_sp_reg;
1952
1953 /* Do we have a frame? */
1954 int frameless_p;
1955 };
1956
1957 /* Initialize a frame cache. */
1958
1959 static void
1960 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1961 {
1962 int i;
1963
1964 /* Base address. */
1965 cache->base = 0;
1966 cache->base_p = 0;
1967 cache->sp_offset = -8;
1968 cache->pc = 0;
1969
1970 /* Saved registers. We initialize these to -1 since zero is a valid
1971 offset (that's where %rbp is supposed to be stored).
1972 The values start out as being offsets, and are later converted to
1973 addresses (at which point -1 is interpreted as an address, still meaning
1974 "invalid"). */
1975 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1976 cache->saved_regs[i] = -1;
1977 cache->saved_sp = 0;
1978 cache->saved_sp_reg = -1;
1979
1980 /* Frameless until proven otherwise. */
1981 cache->frameless_p = 1;
1982 }
1983
1984 /* Allocate and initialize a frame cache. */
1985
1986 static struct amd64_frame_cache *
1987 amd64_alloc_frame_cache (void)
1988 {
1989 struct amd64_frame_cache *cache;
1990
1991 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1992 amd64_init_frame_cache (cache);
1993 return cache;
1994 }
1995
1996 /* GCC 4.4 and later, can put code in the prologue to realign the
1997 stack pointer. Check whether PC points to such code, and update
1998 CACHE accordingly. Return the first instruction after the code
1999 sequence or CURRENT_PC, whichever is smaller. If we don't
2000 recognize the code, return PC. */
2001
2002 static CORE_ADDR
2003 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2004 struct amd64_frame_cache *cache)
2005 {
2006 /* There are 2 code sequences to re-align stack before the frame
2007 gets set up:
2008
2009 1. Use a caller-saved saved register:
2010
2011 leaq 8(%rsp), %reg
2012 andq $-XXX, %rsp
2013 pushq -8(%reg)
2014
2015 2. Use a callee-saved saved register:
2016
2017 pushq %reg
2018 leaq 16(%rsp), %reg
2019 andq $-XXX, %rsp
2020 pushq -8(%reg)
2021
2022 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2023
2024 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2025 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2026 */
2027
2028 gdb_byte buf[18];
2029 int reg, r;
2030 int offset, offset_and;
2031
2032 if (target_read_code (pc, buf, sizeof buf))
2033 return pc;
2034
2035 /* Check caller-saved saved register. The first instruction has
2036 to be "leaq 8(%rsp), %reg". */
2037 if ((buf[0] & 0xfb) == 0x48
2038 && buf[1] == 0x8d
2039 && buf[3] == 0x24
2040 && buf[4] == 0x8)
2041 {
2042 /* MOD must be binary 10 and R/M must be binary 100. */
2043 if ((buf[2] & 0xc7) != 0x44)
2044 return pc;
2045
2046 /* REG has register number. */
2047 reg = (buf[2] >> 3) & 7;
2048
2049 /* Check the REX.R bit. */
2050 if (buf[0] == 0x4c)
2051 reg += 8;
2052
2053 offset = 5;
2054 }
2055 else
2056 {
2057 /* Check callee-saved saved register. The first instruction
2058 has to be "pushq %reg". */
2059 reg = 0;
2060 if ((buf[0] & 0xf8) == 0x50)
2061 offset = 0;
2062 else if ((buf[0] & 0xf6) == 0x40
2063 && (buf[1] & 0xf8) == 0x50)
2064 {
2065 /* Check the REX.B bit. */
2066 if ((buf[0] & 1) != 0)
2067 reg = 8;
2068
2069 offset = 1;
2070 }
2071 else
2072 return pc;
2073
2074 /* Get register. */
2075 reg += buf[offset] & 0x7;
2076
2077 offset++;
2078
2079 /* The next instruction has to be "leaq 16(%rsp), %reg". */
2080 if ((buf[offset] & 0xfb) != 0x48
2081 || buf[offset + 1] != 0x8d
2082 || buf[offset + 3] != 0x24
2083 || buf[offset + 4] != 0x10)
2084 return pc;
2085
2086 /* MOD must be binary 10 and R/M must be binary 100. */
2087 if ((buf[offset + 2] & 0xc7) != 0x44)
2088 return pc;
2089
2090 /* REG has register number. */
2091 r = (buf[offset + 2] >> 3) & 7;
2092
2093 /* Check the REX.R bit. */
2094 if (buf[offset] == 0x4c)
2095 r += 8;
2096
2097 /* Registers in pushq and leaq have to be the same. */
2098 if (reg != r)
2099 return pc;
2100
2101 offset += 5;
2102 }
2103
2104 /* Rigister can't be %rsp nor %rbp. */
2105 if (reg == 4 || reg == 5)
2106 return pc;
2107
2108 /* The next instruction has to be "andq $-XXX, %rsp". */
2109 if (buf[offset] != 0x48
2110 || buf[offset + 2] != 0xe4
2111 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2112 return pc;
2113
2114 offset_and = offset;
2115 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2116
2117 /* The next instruction has to be "pushq -8(%reg)". */
2118 r = 0;
2119 if (buf[offset] == 0xff)
2120 offset++;
2121 else if ((buf[offset] & 0xf6) == 0x40
2122 && buf[offset + 1] == 0xff)
2123 {
2124 /* Check the REX.B bit. */
2125 if ((buf[offset] & 0x1) != 0)
2126 r = 8;
2127 offset += 2;
2128 }
2129 else
2130 return pc;
2131
2132 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2133 01. */
2134 if (buf[offset + 1] != 0xf8
2135 || (buf[offset] & 0xf8) != 0x70)
2136 return pc;
2137
2138 /* R/M has register. */
2139 r += buf[offset] & 7;
2140
2141 /* Registers in leaq and pushq have to be the same. */
2142 if (reg != r)
2143 return pc;
2144
2145 if (current_pc > pc + offset_and)
2146 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2147
2148 return std::min (pc + offset + 2, current_pc);
2149 }
2150
2151 /* Similar to amd64_analyze_stack_align for x32. */
2152
2153 static CORE_ADDR
2154 amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2155 struct amd64_frame_cache *cache)
2156 {
2157 /* There are 2 code sequences to re-align stack before the frame
2158 gets set up:
2159
2160 1. Use a caller-saved saved register:
2161
2162 leaq 8(%rsp), %reg
2163 andq $-XXX, %rsp
2164 pushq -8(%reg)
2165
2166 or
2167
2168 [addr32] leal 8(%rsp), %reg
2169 andl $-XXX, %esp
2170 [addr32] pushq -8(%reg)
2171
2172 2. Use a callee-saved saved register:
2173
2174 pushq %reg
2175 leaq 16(%rsp), %reg
2176 andq $-XXX, %rsp
2177 pushq -8(%reg)
2178
2179 or
2180
2181 pushq %reg
2182 [addr32] leal 16(%rsp), %reg
2183 andl $-XXX, %esp
2184 [addr32] pushq -8(%reg)
2185
2186 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2187
2188 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2189 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2190
2191 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2192
2193 0x83 0xe4 0xf0 andl $-16, %esp
2194 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2195 */
2196
2197 gdb_byte buf[19];
2198 int reg, r;
2199 int offset, offset_and;
2200
2201 if (target_read_memory (pc, buf, sizeof buf))
2202 return pc;
2203
2204 /* Skip optional addr32 prefix. */
2205 offset = buf[0] == 0x67 ? 1 : 0;
2206
2207 /* Check caller-saved saved register. The first instruction has
2208 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2209 if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
2210 && buf[offset + 1] == 0x8d
2211 && buf[offset + 3] == 0x24
2212 && buf[offset + 4] == 0x8)
2213 {
2214 /* MOD must be binary 10 and R/M must be binary 100. */
2215 if ((buf[offset + 2] & 0xc7) != 0x44)
2216 return pc;
2217
2218 /* REG has register number. */
2219 reg = (buf[offset + 2] >> 3) & 7;
2220
2221 /* Check the REX.R bit. */
2222 if ((buf[offset] & 0x4) != 0)
2223 reg += 8;
2224
2225 offset += 5;
2226 }
2227 else
2228 {
2229 /* Check callee-saved saved register. The first instruction
2230 has to be "pushq %reg". */
2231 reg = 0;
2232 if ((buf[offset] & 0xf6) == 0x40
2233 && (buf[offset + 1] & 0xf8) == 0x50)
2234 {
2235 /* Check the REX.B bit. */
2236 if ((buf[offset] & 1) != 0)
2237 reg = 8;
2238
2239 offset += 1;
2240 }
2241 else if ((buf[offset] & 0xf8) != 0x50)
2242 return pc;
2243
2244 /* Get register. */
2245 reg += buf[offset] & 0x7;
2246
2247 offset++;
2248
2249 /* Skip optional addr32 prefix. */
2250 if (buf[offset] == 0x67)
2251 offset++;
2252
2253 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2254 "leal 16(%rsp), %reg". */
2255 if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
2256 || buf[offset + 1] != 0x8d
2257 || buf[offset + 3] != 0x24
2258 || buf[offset + 4] != 0x10)
2259 return pc;
2260
2261 /* MOD must be binary 10 and R/M must be binary 100. */
2262 if ((buf[offset + 2] & 0xc7) != 0x44)
2263 return pc;
2264
2265 /* REG has register number. */
2266 r = (buf[offset + 2] >> 3) & 7;
2267
2268 /* Check the REX.R bit. */
2269 if ((buf[offset] & 0x4) != 0)
2270 r += 8;
2271
2272 /* Registers in pushq and leaq have to be the same. */
2273 if (reg != r)
2274 return pc;
2275
2276 offset += 5;
2277 }
2278
2279 /* Rigister can't be %rsp nor %rbp. */
2280 if (reg == 4 || reg == 5)
2281 return pc;
2282
2283 /* The next instruction may be "andq $-XXX, %rsp" or
2284 "andl $-XXX, %esp". */
2285 if (buf[offset] != 0x48)
2286 offset--;
2287
2288 if (buf[offset + 2] != 0xe4
2289 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2290 return pc;
2291
2292 offset_and = offset;
2293 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2294
2295 /* Skip optional addr32 prefix. */
2296 if (buf[offset] == 0x67)
2297 offset++;
2298
2299 /* The next instruction has to be "pushq -8(%reg)". */
2300 r = 0;
2301 if (buf[offset] == 0xff)
2302 offset++;
2303 else if ((buf[offset] & 0xf6) == 0x40
2304 && buf[offset + 1] == 0xff)
2305 {
2306 /* Check the REX.B bit. */
2307 if ((buf[offset] & 0x1) != 0)
2308 r = 8;
2309 offset += 2;
2310 }
2311 else
2312 return pc;
2313
2314 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2315 01. */
2316 if (buf[offset + 1] != 0xf8
2317 || (buf[offset] & 0xf8) != 0x70)
2318 return pc;
2319
2320 /* R/M has register. */
2321 r += buf[offset] & 7;
2322
2323 /* Registers in leaq and pushq have to be the same. */
2324 if (reg != r)
2325 return pc;
2326
2327 if (current_pc > pc + offset_and)
2328 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2329
2330 return std::min (pc + offset + 2, current_pc);
2331 }
2332
2333 /* Do a limited analysis of the prologue at PC and update CACHE
2334 accordingly. Bail out early if CURRENT_PC is reached. Return the
2335 address where the analysis stopped.
2336
2337 We will handle only functions beginning with:
2338
2339 pushq %rbp 0x55
2340 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
2341
2342 or (for the X32 ABI):
2343
2344 pushq %rbp 0x55
2345 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2346
2347 The `endbr64` instruction can be found before these sequences, and will be
2348 skipped if found.
2349
2350 Any function that doesn't start with one of these sequences will be
2351 assumed to have no prologue and thus no valid frame pointer in
2352 %rbp. */
2353
2354 static CORE_ADDR
2355 amd64_analyze_prologue (struct gdbarch *gdbarch,
2356 CORE_ADDR pc, CORE_ADDR current_pc,
2357 struct amd64_frame_cache *cache)
2358 {
2359 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2360 /* The `endbr64` instruction. */
2361 static const gdb_byte endbr64[4] = { 0xf3, 0x0f, 0x1e, 0xfa };
2362 /* There are two variations of movq %rsp, %rbp. */
2363 static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
2364 static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
2365 /* Ditto for movl %esp, %ebp. */
2366 static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
2367 static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
2368
2369 gdb_byte buf[3];
2370 gdb_byte op;
2371
2372 if (current_pc <= pc)
2373 return current_pc;
2374
2375 if (gdbarch_ptr_bit (gdbarch) == 32)
2376 pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
2377 else
2378 pc = amd64_analyze_stack_align (pc, current_pc, cache);
2379
2380 op = read_code_unsigned_integer (pc, 1, byte_order);
2381
2382 /* Check for the `endbr64` instruction, skip it if found. */
2383 if (op == endbr64[0])
2384 {
2385 read_code (pc + 1, buf, 3);
2386
2387 if (memcmp (buf, &endbr64[1], 3) == 0)
2388 pc += 4;
2389
2390 op = read_code_unsigned_integer (pc, 1, byte_order);
2391 }
2392
2393 if (current_pc <= pc)
2394 return current_pc;
2395
2396 if (op == 0x55) /* pushq %rbp */
2397 {
2398 /* Take into account that we've executed the `pushq %rbp' that
2399 starts this instruction sequence. */
2400 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
2401 cache->sp_offset += 8;
2402
2403 /* If that's all, return now. */
2404 if (current_pc <= pc + 1)
2405 return current_pc;
2406
2407 read_code (pc + 1, buf, 3);
2408
2409 /* Check for `movq %rsp, %rbp'. */
2410 if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
2411 || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
2412 {
2413 /* OK, we actually have a frame. */
2414 cache->frameless_p = 0;
2415 return pc + 4;
2416 }
2417
2418 /* For X32, also check for `movl %esp, %ebp'. */
2419 if (gdbarch_ptr_bit (gdbarch) == 32)
2420 {
2421 if (memcmp (buf, mov_esp_ebp_1, 2) == 0
2422 || memcmp (buf, mov_esp_ebp_2, 2) == 0)
2423 {
2424 /* OK, we actually have a frame. */
2425 cache->frameless_p = 0;
2426 return pc + 3;
2427 }
2428 }
2429
2430 return pc + 1;
2431 }
2432
2433 return pc;
2434 }
2435
2436 /* Work around false termination of prologue - GCC PR debug/48827.
2437
2438 START_PC is the first instruction of a function, PC is its minimal already
2439 determined advanced address. Function returns PC if it has nothing to do.
2440
2441 84 c0 test %al,%al
2442 74 23 je after
2443 <-- here is 0 lines advance - the false prologue end marker.
2444 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2445 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2446 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2447 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2448 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2449 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2450 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2451 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2452 after: */
2453
2454 static CORE_ADDR
2455 amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
2456 {
2457 struct symtab_and_line start_pc_sal, next_sal;
2458 gdb_byte buf[4 + 8 * 7];
2459 int offset, xmmreg;
2460
2461 if (pc == start_pc)
2462 return pc;
2463
2464 start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
2465 if (start_pc_sal.symtab == NULL
2466 || producer_is_gcc_ge_4 (COMPUNIT_PRODUCER
2467 (SYMTAB_COMPUNIT (start_pc_sal.symtab))) < 6
2468 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
2469 return pc;
2470
2471 next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
2472 if (next_sal.line != start_pc_sal.line)
2473 return pc;
2474
2475 /* START_PC can be from overlayed memory, ignored here. */
2476 if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
2477 return pc;
2478
2479 /* test %al,%al */
2480 if (buf[0] != 0x84 || buf[1] != 0xc0)
2481 return pc;
2482 /* je AFTER */
2483 if (buf[2] != 0x74)
2484 return pc;
2485
2486 offset = 4;
2487 for (xmmreg = 0; xmmreg < 8; xmmreg++)
2488 {
2489 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
2490 if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
2491 || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
2492 return pc;
2493
2494 /* 0b01?????? */
2495 if ((buf[offset + 2] & 0xc0) == 0x40)
2496 {
2497 /* 8-bit displacement. */
2498 offset += 4;
2499 }
2500 /* 0b10?????? */
2501 else if ((buf[offset + 2] & 0xc0) == 0x80)
2502 {
2503 /* 32-bit displacement. */
2504 offset += 7;
2505 }
2506 else
2507 return pc;
2508 }
2509
2510 /* je AFTER */
2511 if (offset - 4 != buf[3])
2512 return pc;
2513
2514 return next_sal.end;
2515 }
2516
2517 /* Return PC of first real instruction. */
2518
2519 static CORE_ADDR
2520 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
2521 {
2522 struct amd64_frame_cache cache;
2523 CORE_ADDR pc;
2524 CORE_ADDR func_addr;
2525
2526 if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
2527 {
2528 CORE_ADDR post_prologue_pc
2529 = skip_prologue_using_sal (gdbarch, func_addr);
2530 struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
2531
2532 /* LLVM backend (Clang/Flang) always emits a line note before the
2533 prologue and another one after. We trust clang and newer Intel
2534 compilers to emit usable line notes. */
2535 if (post_prologue_pc
2536 && (cust != NULL
2537 && COMPUNIT_PRODUCER (cust) != NULL
2538 && (producer_is_llvm (COMPUNIT_PRODUCER (cust))
2539 || producer_is_icc_ge_19 (COMPUNIT_PRODUCER (cust)))))
2540 return std::max (start_pc, post_prologue_pc);
2541 }
2542
2543 amd64_init_frame_cache (&cache);
2544 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
2545 &cache);
2546 if (cache.frameless_p)
2547 return start_pc;
2548
2549 return amd64_skip_xmm_prologue (pc, start_pc);
2550 }
2551 \f
2552
2553 /* Normal frames. */
2554
2555 static void
2556 amd64_frame_cache_1 (struct frame_info *this_frame,
2557 struct amd64_frame_cache *cache)
2558 {
2559 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2560 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2561 gdb_byte buf[8];
2562 int i;
2563
2564 cache->pc = get_frame_func (this_frame);
2565 if (cache->pc != 0)
2566 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
2567 cache);
2568
2569 if (cache->frameless_p)
2570 {
2571 /* We didn't find a valid frame. If we're at the start of a
2572 function, or somewhere half-way its prologue, the function's
2573 frame probably hasn't been fully setup yet. Try to
2574 reconstruct the base address for the stack frame by looking
2575 at the stack pointer. For truly "frameless" functions this
2576 might work too. */
2577
2578 if (cache->saved_sp_reg != -1)
2579 {
2580 /* Stack pointer has been saved. */
2581 get_frame_register (this_frame, cache->saved_sp_reg, buf);
2582 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
2583
2584 /* We're halfway aligning the stack. */
2585 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
2586 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
2587
2588 /* This will be added back below. */
2589 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
2590 }
2591 else
2592 {
2593 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2594 cache->base = extract_unsigned_integer (buf, 8, byte_order)
2595 + cache->sp_offset;
2596 }
2597 }
2598 else
2599 {
2600 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
2601 cache->base = extract_unsigned_integer (buf, 8, byte_order);
2602 }
2603
2604 /* Now that we have the base address for the stack frame we can
2605 calculate the value of %rsp in the calling frame. */
2606 cache->saved_sp = cache->base + 16;
2607
2608 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2609 frame we find it at the same offset from the reconstructed base
2610 address. If we're halfway aligning the stack, %rip is handled
2611 differently (see above). */
2612 if (!cache->frameless_p || cache->saved_sp_reg == -1)
2613 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
2614
2615 /* Adjust all the saved registers such that they contain addresses
2616 instead of offsets. */
2617 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
2618 if (cache->saved_regs[i] != -1)
2619 cache->saved_regs[i] += cache->base;
2620
2621 cache->base_p = 1;
2622 }
2623
2624 static struct amd64_frame_cache *
2625 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
2626 {
2627 struct amd64_frame_cache *cache;
2628
2629 if (*this_cache)
2630 return (struct amd64_frame_cache *) *this_cache;
2631
2632 cache = amd64_alloc_frame_cache ();
2633 *this_cache = cache;
2634
2635 try
2636 {
2637 amd64_frame_cache_1 (this_frame, cache);
2638 }
2639 catch (const gdb_exception_error &ex)
2640 {
2641 if (ex.error != NOT_AVAILABLE_ERROR)
2642 throw;
2643 }
2644
2645 return cache;
2646 }
2647
2648 static enum unwind_stop_reason
2649 amd64_frame_unwind_stop_reason (struct frame_info *this_frame,
2650 void **this_cache)
2651 {
2652 struct amd64_frame_cache *cache =
2653 amd64_frame_cache (this_frame, this_cache);
2654
2655 if (!cache->base_p)
2656 return UNWIND_UNAVAILABLE;
2657
2658 /* This marks the outermost frame. */
2659 if (cache->base == 0)
2660 return UNWIND_OUTERMOST;
2661
2662 return UNWIND_NO_REASON;
2663 }
2664
2665 static void
2666 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
2667 struct frame_id *this_id)
2668 {
2669 struct amd64_frame_cache *cache =
2670 amd64_frame_cache (this_frame, this_cache);
2671
2672 if (!cache->base_p)
2673 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2674 else if (cache->base == 0)
2675 {
2676 /* This marks the outermost frame. */
2677 return;
2678 }
2679 else
2680 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
2681 }
2682
2683 static struct value *
2684 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
2685 int regnum)
2686 {
2687 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2688 struct amd64_frame_cache *cache =
2689 amd64_frame_cache (this_frame, this_cache);
2690
2691 gdb_assert (regnum >= 0);
2692
2693 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
2694 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
2695
2696 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
2697 return frame_unwind_got_memory (this_frame, regnum,
2698 cache->saved_regs[regnum]);
2699
2700 return frame_unwind_got_register (this_frame, regnum, regnum);
2701 }
2702
2703 static const struct frame_unwind amd64_frame_unwind =
2704 {
2705 NORMAL_FRAME,
2706 amd64_frame_unwind_stop_reason,
2707 amd64_frame_this_id,
2708 amd64_frame_prev_register,
2709 NULL,
2710 default_frame_sniffer
2711 };
2712 \f
2713 /* Generate a bytecode expression to get the value of the saved PC. */
2714
2715 static void
2716 amd64_gen_return_address (struct gdbarch *gdbarch,
2717 struct agent_expr *ax, struct axs_value *value,
2718 CORE_ADDR scope)
2719 {
2720 /* The following sequence assumes the traditional use of the base
2721 register. */
2722 ax_reg (ax, AMD64_RBP_REGNUM);
2723 ax_const_l (ax, 8);
2724 ax_simple (ax, aop_add);
2725 value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
2726 value->kind = axs_lvalue_memory;
2727 }
2728 \f
2729
2730 /* Signal trampolines. */
2731
2732 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2733 64-bit variants. This would require using identical frame caches
2734 on both platforms. */
2735
2736 static struct amd64_frame_cache *
2737 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2738 {
2739 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2740 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2741 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2742 struct amd64_frame_cache *cache;
2743 CORE_ADDR addr;
2744 gdb_byte buf[8];
2745 int i;
2746
2747 if (*this_cache)
2748 return (struct amd64_frame_cache *) *this_cache;
2749
2750 cache = amd64_alloc_frame_cache ();
2751
2752 try
2753 {
2754 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2755 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2756
2757 addr = tdep->sigcontext_addr (this_frame);
2758 gdb_assert (tdep->sc_reg_offset);
2759 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2760 for (i = 0; i < tdep->sc_num_regs; i++)
2761 if (tdep->sc_reg_offset[i] != -1)
2762 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
2763
2764 cache->base_p = 1;
2765 }
2766 catch (const gdb_exception_error &ex)
2767 {
2768 if (ex.error != NOT_AVAILABLE_ERROR)
2769 throw;
2770 }
2771
2772 *this_cache = cache;
2773 return cache;
2774 }
2775
2776 static enum unwind_stop_reason
2777 amd64_sigtramp_frame_unwind_stop_reason (struct frame_info *this_frame,
2778 void **this_cache)
2779 {
2780 struct amd64_frame_cache *cache =
2781 amd64_sigtramp_frame_cache (this_frame, this_cache);
2782
2783 if (!cache->base_p)
2784 return UNWIND_UNAVAILABLE;
2785
2786 return UNWIND_NO_REASON;
2787 }
2788
2789 static void
2790 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
2791 void **this_cache, struct frame_id *this_id)
2792 {
2793 struct amd64_frame_cache *cache =
2794 amd64_sigtramp_frame_cache (this_frame, this_cache);
2795
2796 if (!cache->base_p)
2797 (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
2798 else if (cache->base == 0)
2799 {
2800 /* This marks the outermost frame. */
2801 return;
2802 }
2803 else
2804 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
2805 }
2806
2807 static struct value *
2808 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2809 void **this_cache, int regnum)
2810 {
2811 /* Make sure we've initialized the cache. */
2812 amd64_sigtramp_frame_cache (this_frame, this_cache);
2813
2814 return amd64_frame_prev_register (this_frame, this_cache, regnum);
2815 }
2816
2817 static int
2818 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2819 struct frame_info *this_frame,
2820 void **this_cache)
2821 {
2822 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2823
2824 /* We shouldn't even bother if we don't have a sigcontext_addr
2825 handler. */
2826 if (tdep->sigcontext_addr == NULL)
2827 return 0;
2828
2829 if (tdep->sigtramp_p != NULL)
2830 {
2831 if (tdep->sigtramp_p (this_frame))
2832 return 1;
2833 }
2834
2835 if (tdep->sigtramp_start != 0)
2836 {
2837 CORE_ADDR pc = get_frame_pc (this_frame);
2838
2839 gdb_assert (tdep->sigtramp_end != 0);
2840 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
2841 return 1;
2842 }
2843
2844 return 0;
2845 }
2846
2847 static const struct frame_unwind amd64_sigtramp_frame_unwind =
2848 {
2849 SIGTRAMP_FRAME,
2850 amd64_sigtramp_frame_unwind_stop_reason,
2851 amd64_sigtramp_frame_this_id,
2852 amd64_sigtramp_frame_prev_register,
2853 NULL,
2854 amd64_sigtramp_frame_sniffer
2855 };
2856 \f
2857
2858 static CORE_ADDR
2859 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2860 {
2861 struct amd64_frame_cache *cache =
2862 amd64_frame_cache (this_frame, this_cache);
2863
2864 return cache->base;
2865 }
2866
2867 static const struct frame_base amd64_frame_base =
2868 {
2869 &amd64_frame_unwind,
2870 amd64_frame_base_address,
2871 amd64_frame_base_address,
2872 amd64_frame_base_address
2873 };
2874
2875 /* Normal frames, but in a function epilogue. */
2876
2877 /* Implement the stack_frame_destroyed_p gdbarch method.
2878
2879 The epilogue is defined here as the 'ret' instruction, which will
2880 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2881 the function's stack frame. */
2882
2883 static int
2884 amd64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2885 {
2886 gdb_byte insn;
2887 struct compunit_symtab *cust;
2888
2889 cust = find_pc_compunit_symtab (pc);
2890 if (cust != NULL && COMPUNIT_EPILOGUE_UNWIND_VALID (cust))
2891 return 0;
2892
2893 if (target_read_memory (pc, &insn, 1))
2894 return 0; /* Can't read memory at pc. */
2895
2896 if (insn != 0xc3) /* 'ret' instruction. */
2897 return 0;
2898
2899 return 1;
2900 }
2901
2902 static int
2903 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2904 struct frame_info *this_frame,
2905 void **this_prologue_cache)
2906 {
2907 if (frame_relative_level (this_frame) == 0)
2908 return amd64_stack_frame_destroyed_p (get_frame_arch (this_frame),
2909 get_frame_pc (this_frame));
2910 else
2911 return 0;
2912 }
2913
2914 static struct amd64_frame_cache *
2915 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2916 {
2917 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2918 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2919 struct amd64_frame_cache *cache;
2920 gdb_byte buf[8];
2921
2922 if (*this_cache)
2923 return (struct amd64_frame_cache *) *this_cache;
2924
2925 cache = amd64_alloc_frame_cache ();
2926 *this_cache = cache;
2927
2928 try
2929 {
2930 /* Cache base will be %esp plus cache->sp_offset (-8). */
2931 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2932 cache->base = extract_unsigned_integer (buf, 8,
2933 byte_order) + cache->sp_offset;
2934
2935 /* Cache pc will be the frame func. */
2936 cache->pc = get_frame_pc (this_frame);
2937
2938 /* The saved %esp will be at cache->base plus 16. */
2939 cache->saved_sp = cache->base + 16;
2940
2941 /* The saved %eip will be at cache->base plus 8. */
2942 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2943
2944 cache->base_p = 1;
2945 }
2946 catch (const gdb_exception_error &ex)
2947 {
2948 if (ex.error != NOT_AVAILABLE_ERROR)
2949 throw;
2950 }
2951
2952 return cache;
2953 }
2954
2955 static enum unwind_stop_reason
2956 amd64_epilogue_frame_unwind_stop_reason (struct frame_info *this_frame,
2957 void **this_cache)
2958 {
2959 struct amd64_frame_cache *cache
2960 = amd64_epilogue_frame_cache (this_frame, this_cache);
2961
2962 if (!cache->base_p)
2963 return UNWIND_UNAVAILABLE;
2964
2965 return UNWIND_NO_REASON;
2966 }
2967
2968 static void
2969 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2970 void **this_cache,
2971 struct frame_id *this_id)
2972 {
2973 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2974 this_cache);
2975
2976 if (!cache->base_p)
2977 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2978 else
2979 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
2980 }
2981
2982 static const struct frame_unwind amd64_epilogue_frame_unwind =
2983 {
2984 NORMAL_FRAME,
2985 amd64_epilogue_frame_unwind_stop_reason,
2986 amd64_epilogue_frame_this_id,
2987 amd64_frame_prev_register,
2988 NULL,
2989 amd64_epilogue_frame_sniffer
2990 };
2991
2992 static struct frame_id
2993 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2994 {
2995 CORE_ADDR fp;
2996
2997 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
2998
2999 return frame_id_build (fp + 16, get_frame_pc (this_frame));
3000 }
3001
3002 /* 16 byte align the SP per frame requirements. */
3003
3004 static CORE_ADDR
3005 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3006 {
3007 return sp & -(CORE_ADDR)16;
3008 }
3009 \f
3010
3011 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
3012 in the floating-point register set REGSET to register cache
3013 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
3014
3015 static void
3016 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
3017 int regnum, const void *fpregs, size_t len)
3018 {
3019 struct gdbarch *gdbarch = regcache->arch ();
3020 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3021
3022 gdb_assert (len >= tdep->sizeof_fpregset);
3023 amd64_supply_fxsave (regcache, regnum, fpregs);
3024 }
3025
3026 /* Collect register REGNUM from the register cache REGCACHE and store
3027 it in the buffer specified by FPREGS and LEN as described by the
3028 floating-point register set REGSET. If REGNUM is -1, do this for
3029 all registers in REGSET. */
3030
3031 static void
3032 amd64_collect_fpregset (const struct regset *regset,
3033 const struct regcache *regcache,
3034 int regnum, void *fpregs, size_t len)
3035 {
3036 struct gdbarch *gdbarch = regcache->arch ();
3037 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3038
3039 gdb_assert (len >= tdep->sizeof_fpregset);
3040 amd64_collect_fxsave (regcache, regnum, fpregs);
3041 }
3042
3043 const struct regset amd64_fpregset =
3044 {
3045 NULL, amd64_supply_fpregset, amd64_collect_fpregset
3046 };
3047 \f
3048
3049 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
3050 %rdi. We expect its value to be a pointer to the jmp_buf structure
3051 from which we extract the address that we will land at. This
3052 address is copied into PC. This routine returns non-zero on
3053 success. */
3054
3055 static int
3056 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
3057 {
3058 gdb_byte buf[8];
3059 CORE_ADDR jb_addr;
3060 struct gdbarch *gdbarch = get_frame_arch (frame);
3061 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
3062 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
3063
3064 /* If JB_PC_OFFSET is -1, we have no way to find out where the
3065 longjmp will land. */
3066 if (jb_pc_offset == -1)
3067 return 0;
3068
3069 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
3070 jb_addr= extract_typed_address
3071 (buf, builtin_type (gdbarch)->builtin_data_ptr);
3072 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
3073 return 0;
3074
3075 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
3076
3077 return 1;
3078 }
3079
3080 static const int amd64_record_regmap[] =
3081 {
3082 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
3083 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
3084 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
3085 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
3086 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
3087 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
3088 };
3089
3090 /* Implement the "in_indirect_branch_thunk" gdbarch function. */
3091
3092 static bool
3093 amd64_in_indirect_branch_thunk (struct gdbarch *gdbarch, CORE_ADDR pc)
3094 {
3095 return x86_in_indirect_branch_thunk (pc, amd64_register_names,
3096 AMD64_RAX_REGNUM,
3097 AMD64_RIP_REGNUM);
3098 }
3099
3100 void
3101 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
3102 const target_desc *default_tdesc)
3103 {
3104 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3105 const struct target_desc *tdesc = info.target_desc;
3106 static const char *const stap_integer_prefixes[] = { "$", NULL };
3107 static const char *const stap_register_prefixes[] = { "%", NULL };
3108 static const char *const stap_register_indirection_prefixes[] = { "(",
3109 NULL };
3110 static const char *const stap_register_indirection_suffixes[] = { ")",
3111 NULL };
3112
3113 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
3114 floating-point registers. */
3115 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
3116 tdep->fpregset = &amd64_fpregset;
3117
3118 if (! tdesc_has_registers (tdesc))
3119 tdesc = default_tdesc;
3120 tdep->tdesc = tdesc;
3121
3122 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
3123 tdep->register_names = amd64_register_names;
3124
3125 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
3126 {
3127 tdep->zmmh_register_names = amd64_zmmh_names;
3128 tdep->k_register_names = amd64_k_names;
3129 tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
3130 tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
3131
3132 tdep->num_zmm_regs = 32;
3133 tdep->num_xmm_avx512_regs = 16;
3134 tdep->num_ymm_avx512_regs = 16;
3135
3136 tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
3137 tdep->k0_regnum = AMD64_K0_REGNUM;
3138 tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
3139 tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
3140 }
3141
3142 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
3143 {
3144 tdep->ymmh_register_names = amd64_ymmh_names;
3145 tdep->num_ymm_regs = 16;
3146 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
3147 }
3148
3149 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
3150 {
3151 tdep->mpx_register_names = amd64_mpx_names;
3152 tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
3153 tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
3154 }
3155
3156 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments") != NULL)
3157 {
3158 tdep->fsbase_regnum = AMD64_FSBASE_REGNUM;
3159 }
3160
3161 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.pkeys") != NULL)
3162 {
3163 tdep->pkeys_register_names = amd64_pkeys_names;
3164 tdep->pkru_regnum = AMD64_PKRU_REGNUM;
3165 tdep->num_pkeys_regs = 1;
3166 }
3167
3168 tdep->num_byte_regs = 20;
3169 tdep->num_word_regs = 16;
3170 tdep->num_dword_regs = 16;
3171 /* Avoid wiring in the MMX registers for now. */
3172 tdep->num_mmx_regs = 0;
3173
3174 set_gdbarch_pseudo_register_read_value (gdbarch,
3175 amd64_pseudo_register_read_value);
3176 set_gdbarch_pseudo_register_write (gdbarch,
3177 amd64_pseudo_register_write);
3178 set_gdbarch_ax_pseudo_register_collect (gdbarch,
3179 amd64_ax_pseudo_register_collect);
3180
3181 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
3182
3183 /* AMD64 has an FPU and 16 SSE registers. */
3184 tdep->st0_regnum = AMD64_ST0_REGNUM;
3185 tdep->num_xmm_regs = 16;
3186
3187 /* This is what all the fuss is about. */
3188 set_gdbarch_long_bit (gdbarch, 64);
3189 set_gdbarch_long_long_bit (gdbarch, 64);
3190 set_gdbarch_ptr_bit (gdbarch, 64);
3191
3192 /* In contrast to the i386, on AMD64 a `long double' actually takes
3193 up 128 bits, even though it's still based on the i387 extended
3194 floating-point format which has only 80 significant bits. */
3195 set_gdbarch_long_double_bit (gdbarch, 128);
3196
3197 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
3198
3199 /* Register numbers of various important registers. */
3200 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
3201 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
3202 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
3203 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
3204
3205 /* The "default" register numbering scheme for AMD64 is referred to
3206 as the "DWARF Register Number Mapping" in the System V psABI.
3207 The preferred debugging format for all known AMD64 targets is
3208 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3209 DWARF-1), but we provide the same mapping just in case. This
3210 mapping is also used for stabs, which GCC does support. */
3211 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3212 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3213
3214 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
3215 be in use on any of the supported AMD64 targets. */
3216
3217 /* Call dummy code. */
3218 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
3219 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
3220 set_gdbarch_frame_red_zone_size (gdbarch, 128);
3221
3222 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
3223 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
3224 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
3225
3226 set_gdbarch_return_value (gdbarch, amd64_return_value);
3227
3228 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
3229
3230 tdep->record_regmap = amd64_record_regmap;
3231
3232 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
3233
3234 /* Hook the function epilogue frame unwinder. This unwinder is
3235 appended to the list first, so that it supercedes the other
3236 unwinders in function epilogues. */
3237 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
3238
3239 /* Hook the prologue-based frame unwinders. */
3240 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
3241 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
3242 frame_base_set_default (gdbarch, &amd64_frame_base);
3243
3244 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
3245
3246 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
3247
3248 set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
3249
3250 /* SystemTap variables and functions. */
3251 set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
3252 set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
3253 set_gdbarch_stap_register_indirection_prefixes (gdbarch,
3254 stap_register_indirection_prefixes);
3255 set_gdbarch_stap_register_indirection_suffixes (gdbarch,
3256 stap_register_indirection_suffixes);
3257 set_gdbarch_stap_is_single_operand (gdbarch,
3258 i386_stap_is_single_operand);
3259 set_gdbarch_stap_parse_special_token (gdbarch,
3260 i386_stap_parse_special_token);
3261 set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
3262 set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
3263 set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
3264
3265 set_gdbarch_in_indirect_branch_thunk (gdbarch,
3266 amd64_in_indirect_branch_thunk);
3267
3268 register_amd64_ravenscar_ops (gdbarch);
3269 }
3270
3271 /* Initialize ARCH for x86-64, no osabi. */
3272
3273 static void
3274 amd64_none_init_abi (gdbarch_info info, gdbarch *arch)
3275 {
3276 amd64_init_abi (info, arch, amd64_target_description (X86_XSTATE_SSE_MASK,
3277 true));
3278 }
3279
3280 static struct type *
3281 amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3282 {
3283 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3284
3285 switch (regnum - tdep->eax_regnum)
3286 {
3287 case AMD64_RBP_REGNUM: /* %ebp */
3288 case AMD64_RSP_REGNUM: /* %esp */
3289 return builtin_type (gdbarch)->builtin_data_ptr;
3290 case AMD64_RIP_REGNUM: /* %eip */
3291 return builtin_type (gdbarch)->builtin_func_ptr;
3292 }
3293
3294 return i386_pseudo_register_type (gdbarch, regnum);
3295 }
3296
3297 void
3298 amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
3299 const target_desc *default_tdesc)
3300 {
3301 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3302
3303 amd64_init_abi (info, gdbarch, default_tdesc);
3304
3305 tdep->num_dword_regs = 17;
3306 set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
3307
3308 set_gdbarch_long_bit (gdbarch, 32);
3309 set_gdbarch_ptr_bit (gdbarch, 32);
3310 }
3311
3312 /* Initialize ARCH for x64-32, no osabi. */
3313
3314 static void
3315 amd64_x32_none_init_abi (gdbarch_info info, gdbarch *arch)
3316 {
3317 amd64_x32_init_abi (info, arch,
3318 amd64_target_description (X86_XSTATE_SSE_MASK, true));
3319 }
3320
3321 /* Return the target description for a specified XSAVE feature mask. */
3322
3323 const struct target_desc *
3324 amd64_target_description (uint64_t xcr0, bool segments)
3325 {
3326 static target_desc *amd64_tdescs \
3327 [2/*AVX*/][2/*MPX*/][2/*AVX512*/][2/*PKRU*/][2/*segments*/] = {};
3328 target_desc **tdesc;
3329
3330 tdesc = &amd64_tdescs[(xcr0 & X86_XSTATE_AVX) ? 1 : 0]
3331 [(xcr0 & X86_XSTATE_MPX) ? 1 : 0]
3332 [(xcr0 & X86_XSTATE_AVX512) ? 1 : 0]
3333 [(xcr0 & X86_XSTATE_PKRU) ? 1 : 0]
3334 [segments ? 1 : 0];
3335
3336 if (*tdesc == NULL)
3337 *tdesc = amd64_create_target_description (xcr0, false, false,
3338 segments);
3339
3340 return *tdesc;
3341 }
3342
3343 void _initialize_amd64_tdep ();
3344 void
3345 _initialize_amd64_tdep ()
3346 {
3347 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_NONE,
3348 amd64_none_init_abi);
3349 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x64_32, GDB_OSABI_NONE,
3350 amd64_x32_none_init_abi);
3351 }
3352 \f
3353
3354 /* The 64-bit FXSAVE format differs from the 32-bit format in the
3355 sense that the instruction pointer and data pointer are simply
3356 64-bit offsets into the code segment and the data segment instead
3357 of a selector offset pair. The functions below store the upper 32
3358 bits of these pointers (instead of just the 16-bits of the segment
3359 selector). */
3360
3361 /* Fill register REGNUM in REGCACHE with the appropriate
3362 floating-point or SSE register value from *FXSAVE. If REGNUM is
3363 -1, do this for all registers. This function masks off any of the
3364 reserved bits in *FXSAVE. */
3365
3366 void
3367 amd64_supply_fxsave (struct regcache *regcache, int regnum,
3368 const void *fxsave)
3369 {
3370 struct gdbarch *gdbarch = regcache->arch ();
3371 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3372
3373 i387_supply_fxsave (regcache, regnum, fxsave);
3374
3375 if (fxsave
3376 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3377 {
3378 const gdb_byte *regs = (const gdb_byte *) fxsave;
3379
3380 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3381 regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
3382 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3383 regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
3384 }
3385 }
3386
3387 /* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3388
3389 void
3390 amd64_supply_xsave (struct regcache *regcache, int regnum,
3391 const void *xsave)
3392 {
3393 struct gdbarch *gdbarch = regcache->arch ();
3394 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3395
3396 i387_supply_xsave (regcache, regnum, xsave);
3397
3398 if (xsave
3399 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3400 {
3401 const gdb_byte *regs = (const gdb_byte *) xsave;
3402 ULONGEST clear_bv;
3403
3404 clear_bv = i387_xsave_get_clear_bv (gdbarch, xsave);
3405
3406 /* If the FISEG and FOSEG registers have not been initialised yet
3407 (their CLEAR_BV bit is set) then their default values of zero will
3408 have already been setup by I387_SUPPLY_XSAVE. */
3409 if (!(clear_bv & X86_XSTATE_X87))
3410 {
3411 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3412 regcache->raw_supply (I387_FISEG_REGNUM (tdep), regs + 12);
3413 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3414 regcache->raw_supply (I387_FOSEG_REGNUM (tdep), regs + 20);
3415 }
3416 }
3417 }
3418
3419 /* Fill register REGNUM (if it is a floating-point or SSE register) in
3420 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3421 all registers. This function doesn't touch any of the reserved
3422 bits in *FXSAVE. */
3423
3424 void
3425 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
3426 void *fxsave)
3427 {
3428 struct gdbarch *gdbarch = regcache->arch ();
3429 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3430 gdb_byte *regs = (gdb_byte *) fxsave;
3431
3432 i387_collect_fxsave (regcache, regnum, fxsave);
3433
3434 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3435 {
3436 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3437 regcache->raw_collect (I387_FISEG_REGNUM (tdep), regs + 12);
3438 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3439 regcache->raw_collect (I387_FOSEG_REGNUM (tdep), regs + 20);
3440 }
3441 }
3442
3443 /* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
3444
3445 void
3446 amd64_collect_xsave (const struct regcache *regcache, int regnum,
3447 void *xsave, int gcore)
3448 {
3449 struct gdbarch *gdbarch = regcache->arch ();
3450 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3451 gdb_byte *regs = (gdb_byte *) xsave;
3452
3453 i387_collect_xsave (regcache, regnum, xsave, gcore);
3454
3455 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3456 {
3457 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3458 regcache->raw_collect (I387_FISEG_REGNUM (tdep),
3459 regs + 12);
3460 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3461 regcache->raw_collect (I387_FOSEG_REGNUM (tdep),
3462 regs + 20);
3463 }
3464 }