]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/amd64-tdep.c
Remove x32 non-linux target descriptions
[thirdparty/binutils-gdb.git] / gdb / amd64-tdep.c
1 /* Target-dependent code for AMD64.
2
3 Copyright (C) 2001-2017 Free Software Foundation, Inc.
4
5 Contributed by Jiri Smid, SuSE Labs.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "opcode/i386.h"
24 #include "dis-asm.h"
25 #include "arch-utils.h"
26 #include "block.h"
27 #include "dummy-frame.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "inferior.h"
32 #include "infrun.h"
33 #include "gdbcmd.h"
34 #include "gdbcore.h"
35 #include "objfiles.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "symfile.h"
39 #include "disasm.h"
40 #include "amd64-tdep.h"
41 #include "i387-tdep.h"
42 #include "x86-xstate.h"
43 #include <algorithm>
44
45 #include "features/i386/amd64.c"
46 #include "features/i386/amd64-avx.c"
47 #include "features/i386/amd64-mpx.c"
48 #include "features/i386/amd64-avx-mpx.c"
49 #include "features/i386/amd64-avx-avx512.c"
50 #include "features/i386/amd64-avx-mpx-avx512-pku.c"
51
52 #include "ax.h"
53 #include "ax-gdb.h"
54
55 /* Note that the AMD64 architecture was previously known as x86-64.
56 The latter is (forever) engraved into the canonical system name as
57 returned by config.guess, and used as the name for the AMD64 port
58 of GNU/Linux. The BSD's have renamed their ports to amd64; they
59 don't like to shout. For GDB we prefer the amd64_-prefix over the
60 x86_64_-prefix since it's so much easier to type. */
61
62 /* Register information. */
63
64 static const char *amd64_register_names[] =
65 {
66 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
67
68 /* %r8 is indeed register number 8. */
69 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
70 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
71
72 /* %st0 is register number 24. */
73 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
74 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
75
76 /* %xmm0 is register number 40. */
77 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
78 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
79 "mxcsr",
80 };
81
82 static const char *amd64_ymm_names[] =
83 {
84 "ymm0", "ymm1", "ymm2", "ymm3",
85 "ymm4", "ymm5", "ymm6", "ymm7",
86 "ymm8", "ymm9", "ymm10", "ymm11",
87 "ymm12", "ymm13", "ymm14", "ymm15"
88 };
89
90 static const char *amd64_ymm_avx512_names[] =
91 {
92 "ymm16", "ymm17", "ymm18", "ymm19",
93 "ymm20", "ymm21", "ymm22", "ymm23",
94 "ymm24", "ymm25", "ymm26", "ymm27",
95 "ymm28", "ymm29", "ymm30", "ymm31"
96 };
97
98 static const char *amd64_ymmh_names[] =
99 {
100 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
101 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
102 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
103 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
104 };
105
106 static const char *amd64_ymmh_avx512_names[] =
107 {
108 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
109 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
110 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
111 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
112 };
113
114 static const char *amd64_mpx_names[] =
115 {
116 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
117 };
118
119 static const char *amd64_k_names[] =
120 {
121 "k0", "k1", "k2", "k3",
122 "k4", "k5", "k6", "k7"
123 };
124
125 static const char *amd64_zmmh_names[] =
126 {
127 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
128 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
129 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
130 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
131 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
132 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
133 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
134 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
135 };
136
137 static const char *amd64_zmm_names[] =
138 {
139 "zmm0", "zmm1", "zmm2", "zmm3",
140 "zmm4", "zmm5", "zmm6", "zmm7",
141 "zmm8", "zmm9", "zmm10", "zmm11",
142 "zmm12", "zmm13", "zmm14", "zmm15",
143 "zmm16", "zmm17", "zmm18", "zmm19",
144 "zmm20", "zmm21", "zmm22", "zmm23",
145 "zmm24", "zmm25", "zmm26", "zmm27",
146 "zmm28", "zmm29", "zmm30", "zmm31"
147 };
148
149 static const char *amd64_xmm_avx512_names[] = {
150 "xmm16", "xmm17", "xmm18", "xmm19",
151 "xmm20", "xmm21", "xmm22", "xmm23",
152 "xmm24", "xmm25", "xmm26", "xmm27",
153 "xmm28", "xmm29", "xmm30", "xmm31"
154 };
155
156 static const char *amd64_pkeys_names[] = {
157 "pkru"
158 };
159
160 /* DWARF Register Number Mapping as defined in the System V psABI,
161 section 3.6. */
162
163 static int amd64_dwarf_regmap[] =
164 {
165 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
166 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
167 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
168 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
169
170 /* Frame Pointer Register RBP. */
171 AMD64_RBP_REGNUM,
172
173 /* Stack Pointer Register RSP. */
174 AMD64_RSP_REGNUM,
175
176 /* Extended Integer Registers 8 - 15. */
177 AMD64_R8_REGNUM, /* %r8 */
178 AMD64_R9_REGNUM, /* %r9 */
179 AMD64_R10_REGNUM, /* %r10 */
180 AMD64_R11_REGNUM, /* %r11 */
181 AMD64_R12_REGNUM, /* %r12 */
182 AMD64_R13_REGNUM, /* %r13 */
183 AMD64_R14_REGNUM, /* %r14 */
184 AMD64_R15_REGNUM, /* %r15 */
185
186 /* Return Address RA. Mapped to RIP. */
187 AMD64_RIP_REGNUM,
188
189 /* SSE Registers 0 - 7. */
190 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
191 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
192 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
193 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
194
195 /* Extended SSE Registers 8 - 15. */
196 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
197 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
198 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
199 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
200
201 /* Floating Point Registers 0-7. */
202 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
203 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
204 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
205 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
206
207 /* MMX Registers 0 - 7.
208 We have to handle those registers specifically, as their register
209 number within GDB depends on the target (or they may even not be
210 available at all). */
211 -1, -1, -1, -1, -1, -1, -1, -1,
212
213 /* Control and Status Flags Register. */
214 AMD64_EFLAGS_REGNUM,
215
216 /* Selector Registers. */
217 AMD64_ES_REGNUM,
218 AMD64_CS_REGNUM,
219 AMD64_SS_REGNUM,
220 AMD64_DS_REGNUM,
221 AMD64_FS_REGNUM,
222 AMD64_GS_REGNUM,
223 -1,
224 -1,
225
226 /* Segment Base Address Registers. */
227 -1,
228 -1,
229 -1,
230 -1,
231
232 /* Special Selector Registers. */
233 -1,
234 -1,
235
236 /* Floating Point Control Registers. */
237 AMD64_MXCSR_REGNUM,
238 AMD64_FCTRL_REGNUM,
239 AMD64_FSTAT_REGNUM
240 };
241
242 static const int amd64_dwarf_regmap_len =
243 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
244
245 /* Convert DWARF register number REG to the appropriate register
246 number used by GDB. */
247
248 static int
249 amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
250 {
251 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
252 int ymm0_regnum = tdep->ymm0_regnum;
253 int regnum = -1;
254
255 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
256 regnum = amd64_dwarf_regmap[reg];
257
258 if (ymm0_regnum >= 0
259 && i386_xmm_regnum_p (gdbarch, regnum))
260 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
261
262 return regnum;
263 }
264
265 /* Map architectural register numbers to gdb register numbers. */
266
267 static const int amd64_arch_regmap[16] =
268 {
269 AMD64_RAX_REGNUM, /* %rax */
270 AMD64_RCX_REGNUM, /* %rcx */
271 AMD64_RDX_REGNUM, /* %rdx */
272 AMD64_RBX_REGNUM, /* %rbx */
273 AMD64_RSP_REGNUM, /* %rsp */
274 AMD64_RBP_REGNUM, /* %rbp */
275 AMD64_RSI_REGNUM, /* %rsi */
276 AMD64_RDI_REGNUM, /* %rdi */
277 AMD64_R8_REGNUM, /* %r8 */
278 AMD64_R9_REGNUM, /* %r9 */
279 AMD64_R10_REGNUM, /* %r10 */
280 AMD64_R11_REGNUM, /* %r11 */
281 AMD64_R12_REGNUM, /* %r12 */
282 AMD64_R13_REGNUM, /* %r13 */
283 AMD64_R14_REGNUM, /* %r14 */
284 AMD64_R15_REGNUM /* %r15 */
285 };
286
287 static const int amd64_arch_regmap_len =
288 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
289
290 /* Convert architectural register number REG to the appropriate register
291 number used by GDB. */
292
293 static int
294 amd64_arch_reg_to_regnum (int reg)
295 {
296 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
297
298 return amd64_arch_regmap[reg];
299 }
300
301 /* Register names for byte pseudo-registers. */
302
303 static const char *amd64_byte_names[] =
304 {
305 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
306 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
307 "ah", "bh", "ch", "dh"
308 };
309
310 /* Number of lower byte registers. */
311 #define AMD64_NUM_LOWER_BYTE_REGS 16
312
313 /* Register names for word pseudo-registers. */
314
315 static const char *amd64_word_names[] =
316 {
317 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
318 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
319 };
320
321 /* Register names for dword pseudo-registers. */
322
323 static const char *amd64_dword_names[] =
324 {
325 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
326 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
327 "eip"
328 };
329
330 /* Return the name of register REGNUM. */
331
332 static const char *
333 amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
334 {
335 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
336 if (i386_byte_regnum_p (gdbarch, regnum))
337 return amd64_byte_names[regnum - tdep->al_regnum];
338 else if (i386_zmm_regnum_p (gdbarch, regnum))
339 return amd64_zmm_names[regnum - tdep->zmm0_regnum];
340 else if (i386_ymm_regnum_p (gdbarch, regnum))
341 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
342 else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
343 return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
344 else if (i386_word_regnum_p (gdbarch, regnum))
345 return amd64_word_names[regnum - tdep->ax_regnum];
346 else if (i386_dword_regnum_p (gdbarch, regnum))
347 return amd64_dword_names[regnum - tdep->eax_regnum];
348 else
349 return i386_pseudo_register_name (gdbarch, regnum);
350 }
351
352 static struct value *
353 amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
354 struct regcache *regcache,
355 int regnum)
356 {
357 gdb_byte *raw_buf = (gdb_byte *) alloca (register_size (gdbarch, regnum));
358 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
359 enum register_status status;
360 struct value *result_value;
361 gdb_byte *buf;
362
363 result_value = allocate_value (register_type (gdbarch, regnum));
364 VALUE_LVAL (result_value) = lval_register;
365 VALUE_REGNUM (result_value) = regnum;
366 buf = value_contents_raw (result_value);
367
368 if (i386_byte_regnum_p (gdbarch, regnum))
369 {
370 int gpnum = regnum - tdep->al_regnum;
371
372 /* Extract (always little endian). */
373 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
374 {
375 /* Special handling for AH, BH, CH, DH. */
376 status = regcache_raw_read (regcache,
377 gpnum - AMD64_NUM_LOWER_BYTE_REGS,
378 raw_buf);
379 if (status == REG_VALID)
380 memcpy (buf, raw_buf + 1, 1);
381 else
382 mark_value_bytes_unavailable (result_value, 0,
383 TYPE_LENGTH (value_type (result_value)));
384 }
385 else
386 {
387 status = regcache_raw_read (regcache, gpnum, raw_buf);
388 if (status == REG_VALID)
389 memcpy (buf, raw_buf, 1);
390 else
391 mark_value_bytes_unavailable (result_value, 0,
392 TYPE_LENGTH (value_type (result_value)));
393 }
394 }
395 else if (i386_dword_regnum_p (gdbarch, regnum))
396 {
397 int gpnum = regnum - tdep->eax_regnum;
398 /* Extract (always little endian). */
399 status = regcache_raw_read (regcache, gpnum, raw_buf);
400 if (status == REG_VALID)
401 memcpy (buf, raw_buf, 4);
402 else
403 mark_value_bytes_unavailable (result_value, 0,
404 TYPE_LENGTH (value_type (result_value)));
405 }
406 else
407 i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
408 result_value);
409
410 return result_value;
411 }
412
413 static void
414 amd64_pseudo_register_write (struct gdbarch *gdbarch,
415 struct regcache *regcache,
416 int regnum, const gdb_byte *buf)
417 {
418 gdb_byte *raw_buf = (gdb_byte *) alloca (register_size (gdbarch, regnum));
419 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
420
421 if (i386_byte_regnum_p (gdbarch, regnum))
422 {
423 int gpnum = regnum - tdep->al_regnum;
424
425 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
426 {
427 /* Read ... AH, BH, CH, DH. */
428 regcache_raw_read (regcache,
429 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
430 /* ... Modify ... (always little endian). */
431 memcpy (raw_buf + 1, buf, 1);
432 /* ... Write. */
433 regcache_raw_write (regcache,
434 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
435 }
436 else
437 {
438 /* Read ... */
439 regcache_raw_read (regcache, gpnum, raw_buf);
440 /* ... Modify ... (always little endian). */
441 memcpy (raw_buf, buf, 1);
442 /* ... Write. */
443 regcache_raw_write (regcache, gpnum, raw_buf);
444 }
445 }
446 else if (i386_dword_regnum_p (gdbarch, regnum))
447 {
448 int gpnum = regnum - tdep->eax_regnum;
449
450 /* Read ... */
451 regcache_raw_read (regcache, gpnum, raw_buf);
452 /* ... Modify ... (always little endian). */
453 memcpy (raw_buf, buf, 4);
454 /* ... Write. */
455 regcache_raw_write (regcache, gpnum, raw_buf);
456 }
457 else
458 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
459 }
460
461 /* Implement the 'ax_pseudo_register_collect' gdbarch method. */
462
463 static int
464 amd64_ax_pseudo_register_collect (struct gdbarch *gdbarch,
465 struct agent_expr *ax, int regnum)
466 {
467 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
468
469 if (i386_byte_regnum_p (gdbarch, regnum))
470 {
471 int gpnum = regnum - tdep->al_regnum;
472
473 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
474 ax_reg_mask (ax, gpnum - AMD64_NUM_LOWER_BYTE_REGS);
475 else
476 ax_reg_mask (ax, gpnum);
477 return 0;
478 }
479 else if (i386_dword_regnum_p (gdbarch, regnum))
480 {
481 int gpnum = regnum - tdep->eax_regnum;
482
483 ax_reg_mask (ax, gpnum);
484 return 0;
485 }
486 else
487 return i386_ax_pseudo_register_collect (gdbarch, ax, regnum);
488 }
489
490 \f
491
492 /* Register classes as defined in the psABI. */
493
494 enum amd64_reg_class
495 {
496 AMD64_INTEGER,
497 AMD64_SSE,
498 AMD64_SSEUP,
499 AMD64_X87,
500 AMD64_X87UP,
501 AMD64_COMPLEX_X87,
502 AMD64_NO_CLASS,
503 AMD64_MEMORY
504 };
505
506 /* Return the union class of CLASS1 and CLASS2. See the psABI for
507 details. */
508
509 static enum amd64_reg_class
510 amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
511 {
512 /* Rule (a): If both classes are equal, this is the resulting class. */
513 if (class1 == class2)
514 return class1;
515
516 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
517 is the other class. */
518 if (class1 == AMD64_NO_CLASS)
519 return class2;
520 if (class2 == AMD64_NO_CLASS)
521 return class1;
522
523 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
524 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
525 return AMD64_MEMORY;
526
527 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
528 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
529 return AMD64_INTEGER;
530
531 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
532 MEMORY is used as class. */
533 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
534 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
535 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
536 return AMD64_MEMORY;
537
538 /* Rule (f): Otherwise class SSE is used. */
539 return AMD64_SSE;
540 }
541
542 static void amd64_classify (struct type *type, enum amd64_reg_class theclass[2]);
543
544 /* Return non-zero if TYPE is a non-POD structure or union type. */
545
546 static int
547 amd64_non_pod_p (struct type *type)
548 {
549 /* ??? A class with a base class certainly isn't POD, but does this
550 catch all non-POD structure types? */
551 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
552 return 1;
553
554 return 0;
555 }
556
557 /* Classify TYPE according to the rules for aggregate (structures and
558 arrays) and union types, and store the result in CLASS. */
559
560 static void
561 amd64_classify_aggregate (struct type *type, enum amd64_reg_class theclass[2])
562 {
563 /* 1. If the size of an object is larger than two eightbytes, or in
564 C++, is a non-POD structure or union type, or contains
565 unaligned fields, it has class memory. */
566 if (TYPE_LENGTH (type) > 16 || amd64_non_pod_p (type))
567 {
568 theclass[0] = theclass[1] = AMD64_MEMORY;
569 return;
570 }
571
572 /* 2. Both eightbytes get initialized to class NO_CLASS. */
573 theclass[0] = theclass[1] = AMD64_NO_CLASS;
574
575 /* 3. Each field of an object is classified recursively so that
576 always two fields are considered. The resulting class is
577 calculated according to the classes of the fields in the
578 eightbyte: */
579
580 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
581 {
582 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
583
584 /* All fields in an array have the same type. */
585 amd64_classify (subtype, theclass);
586 if (TYPE_LENGTH (type) > 8 && theclass[1] == AMD64_NO_CLASS)
587 theclass[1] = theclass[0];
588 }
589 else
590 {
591 int i;
592
593 /* Structure or union. */
594 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
595 || TYPE_CODE (type) == TYPE_CODE_UNION);
596
597 for (i = 0; i < TYPE_NFIELDS (type); i++)
598 {
599 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
600 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
601 enum amd64_reg_class subclass[2];
602 int bitsize = TYPE_FIELD_BITSIZE (type, i);
603 int endpos;
604
605 if (bitsize == 0)
606 bitsize = TYPE_LENGTH (subtype) * 8;
607 endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
608
609 /* Ignore static fields. */
610 if (field_is_static (&TYPE_FIELD (type, i)))
611 continue;
612
613 gdb_assert (pos == 0 || pos == 1);
614
615 amd64_classify (subtype, subclass);
616 theclass[pos] = amd64_merge_classes (theclass[pos], subclass[0]);
617 if (bitsize <= 64 && pos == 0 && endpos == 1)
618 /* This is a bit of an odd case: We have a field that would
619 normally fit in one of the two eightbytes, except that
620 it is placed in a way that this field straddles them.
621 This has been seen with a structure containing an array.
622
623 The ABI is a bit unclear in this case, but we assume that
624 this field's class (stored in subclass[0]) must also be merged
625 into class[1]. In other words, our field has a piece stored
626 in the second eight-byte, and thus its class applies to
627 the second eight-byte as well.
628
629 In the case where the field length exceeds 8 bytes,
630 it should not be necessary to merge the field class
631 into class[1]. As LEN > 8, subclass[1] is necessarily
632 different from AMD64_NO_CLASS. If subclass[1] is equal
633 to subclass[0], then the normal class[1]/subclass[1]
634 merging will take care of everything. For subclass[1]
635 to be different from subclass[0], I can only see the case
636 where we have a SSE/SSEUP or X87/X87UP pair, which both
637 use up all 16 bytes of the aggregate, and are already
638 handled just fine (because each portion sits on its own
639 8-byte). */
640 theclass[1] = amd64_merge_classes (theclass[1], subclass[0]);
641 if (pos == 0)
642 theclass[1] = amd64_merge_classes (theclass[1], subclass[1]);
643 }
644 }
645
646 /* 4. Then a post merger cleanup is done: */
647
648 /* Rule (a): If one of the classes is MEMORY, the whole argument is
649 passed in memory. */
650 if (theclass[0] == AMD64_MEMORY || theclass[1] == AMD64_MEMORY)
651 theclass[0] = theclass[1] = AMD64_MEMORY;
652
653 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
654 SSE. */
655 if (theclass[0] == AMD64_SSEUP)
656 theclass[0] = AMD64_SSE;
657 if (theclass[1] == AMD64_SSEUP && theclass[0] != AMD64_SSE)
658 theclass[1] = AMD64_SSE;
659 }
660
661 /* Classify TYPE, and store the result in CLASS. */
662
663 static void
664 amd64_classify (struct type *type, enum amd64_reg_class theclass[2])
665 {
666 enum type_code code = TYPE_CODE (type);
667 int len = TYPE_LENGTH (type);
668
669 theclass[0] = theclass[1] = AMD64_NO_CLASS;
670
671 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
672 long, long long, and pointers are in the INTEGER class. Similarly,
673 range types, used by languages such as Ada, are also in the INTEGER
674 class. */
675 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
676 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
677 || code == TYPE_CODE_CHAR
678 || code == TYPE_CODE_PTR || TYPE_IS_REFERENCE (type))
679 && (len == 1 || len == 2 || len == 4 || len == 8))
680 theclass[0] = AMD64_INTEGER;
681
682 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
683 are in class SSE. */
684 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
685 && (len == 4 || len == 8))
686 /* FIXME: __m64 . */
687 theclass[0] = AMD64_SSE;
688
689 /* Arguments of types __float128, _Decimal128 and __m128 are split into
690 two halves. The least significant ones belong to class SSE, the most
691 significant one to class SSEUP. */
692 else if (code == TYPE_CODE_DECFLOAT && len == 16)
693 /* FIXME: __float128, __m128. */
694 theclass[0] = AMD64_SSE, theclass[1] = AMD64_SSEUP;
695
696 /* The 64-bit mantissa of arguments of type long double belongs to
697 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
698 class X87UP. */
699 else if (code == TYPE_CODE_FLT && len == 16)
700 /* Class X87 and X87UP. */
701 theclass[0] = AMD64_X87, theclass[1] = AMD64_X87UP;
702
703 /* Arguments of complex T where T is one of the types float or
704 double get treated as if they are implemented as:
705
706 struct complexT {
707 T real;
708 T imag;
709 };
710
711 */
712 else if (code == TYPE_CODE_COMPLEX && len == 8)
713 theclass[0] = AMD64_SSE;
714 else if (code == TYPE_CODE_COMPLEX && len == 16)
715 theclass[0] = theclass[1] = AMD64_SSE;
716
717 /* A variable of type complex long double is classified as type
718 COMPLEX_X87. */
719 else if (code == TYPE_CODE_COMPLEX && len == 32)
720 theclass[0] = AMD64_COMPLEX_X87;
721
722 /* Aggregates. */
723 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
724 || code == TYPE_CODE_UNION)
725 amd64_classify_aggregate (type, theclass);
726 }
727
728 static enum return_value_convention
729 amd64_return_value (struct gdbarch *gdbarch, struct value *function,
730 struct type *type, struct regcache *regcache,
731 gdb_byte *readbuf, const gdb_byte *writebuf)
732 {
733 enum amd64_reg_class theclass[2];
734 int len = TYPE_LENGTH (type);
735 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
736 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
737 int integer_reg = 0;
738 int sse_reg = 0;
739 int i;
740
741 gdb_assert (!(readbuf && writebuf));
742
743 /* 1. Classify the return type with the classification algorithm. */
744 amd64_classify (type, theclass);
745
746 /* 2. If the type has class MEMORY, then the caller provides space
747 for the return value and passes the address of this storage in
748 %rdi as if it were the first argument to the function. In effect,
749 this address becomes a hidden first argument.
750
751 On return %rax will contain the address that has been passed in
752 by the caller in %rdi. */
753 if (theclass[0] == AMD64_MEMORY)
754 {
755 /* As indicated by the comment above, the ABI guarantees that we
756 can always find the return value just after the function has
757 returned. */
758
759 if (readbuf)
760 {
761 ULONGEST addr;
762
763 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
764 read_memory (addr, readbuf, TYPE_LENGTH (type));
765 }
766
767 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
768 }
769
770 /* 8. If the class is COMPLEX_X87, the real part of the value is
771 returned in %st0 and the imaginary part in %st1. */
772 if (theclass[0] == AMD64_COMPLEX_X87)
773 {
774 if (readbuf)
775 {
776 regcache_raw_read (regcache, AMD64_ST0_REGNUM, readbuf);
777 regcache_raw_read (regcache, AMD64_ST1_REGNUM, readbuf + 16);
778 }
779
780 if (writebuf)
781 {
782 i387_return_value (gdbarch, regcache);
783 regcache_raw_write (regcache, AMD64_ST0_REGNUM, writebuf);
784 regcache_raw_write (regcache, AMD64_ST1_REGNUM, writebuf + 16);
785
786 /* Fix up the tag word such that both %st(0) and %st(1) are
787 marked as valid. */
788 regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
789 }
790
791 return RETURN_VALUE_REGISTER_CONVENTION;
792 }
793
794 gdb_assert (theclass[1] != AMD64_MEMORY);
795 gdb_assert (len <= 16);
796
797 for (i = 0; len > 0; i++, len -= 8)
798 {
799 int regnum = -1;
800 int offset = 0;
801
802 switch (theclass[i])
803 {
804 case AMD64_INTEGER:
805 /* 3. If the class is INTEGER, the next available register
806 of the sequence %rax, %rdx is used. */
807 regnum = integer_regnum[integer_reg++];
808 break;
809
810 case AMD64_SSE:
811 /* 4. If the class is SSE, the next available SSE register
812 of the sequence %xmm0, %xmm1 is used. */
813 regnum = sse_regnum[sse_reg++];
814 break;
815
816 case AMD64_SSEUP:
817 /* 5. If the class is SSEUP, the eightbyte is passed in the
818 upper half of the last used SSE register. */
819 gdb_assert (sse_reg > 0);
820 regnum = sse_regnum[sse_reg - 1];
821 offset = 8;
822 break;
823
824 case AMD64_X87:
825 /* 6. If the class is X87, the value is returned on the X87
826 stack in %st0 as 80-bit x87 number. */
827 regnum = AMD64_ST0_REGNUM;
828 if (writebuf)
829 i387_return_value (gdbarch, regcache);
830 break;
831
832 case AMD64_X87UP:
833 /* 7. If the class is X87UP, the value is returned together
834 with the previous X87 value in %st0. */
835 gdb_assert (i > 0 && theclass[0] == AMD64_X87);
836 regnum = AMD64_ST0_REGNUM;
837 offset = 8;
838 len = 2;
839 break;
840
841 case AMD64_NO_CLASS:
842 continue;
843
844 default:
845 gdb_assert (!"Unexpected register class.");
846 }
847
848 gdb_assert (regnum != -1);
849
850 if (readbuf)
851 regcache_raw_read_part (regcache, regnum, offset, std::min (len, 8),
852 readbuf + i * 8);
853 if (writebuf)
854 regcache_raw_write_part (regcache, regnum, offset, std::min (len, 8),
855 writebuf + i * 8);
856 }
857
858 return RETURN_VALUE_REGISTER_CONVENTION;
859 }
860 \f
861
862 static CORE_ADDR
863 amd64_push_arguments (struct regcache *regcache, int nargs,
864 struct value **args, CORE_ADDR sp, int struct_return)
865 {
866 static int integer_regnum[] =
867 {
868 AMD64_RDI_REGNUM, /* %rdi */
869 AMD64_RSI_REGNUM, /* %rsi */
870 AMD64_RDX_REGNUM, /* %rdx */
871 AMD64_RCX_REGNUM, /* %rcx */
872 AMD64_R8_REGNUM, /* %r8 */
873 AMD64_R9_REGNUM /* %r9 */
874 };
875 static int sse_regnum[] =
876 {
877 /* %xmm0 ... %xmm7 */
878 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
879 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
880 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
881 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
882 };
883 struct value **stack_args = XALLOCAVEC (struct value *, nargs);
884 int num_stack_args = 0;
885 int num_elements = 0;
886 int element = 0;
887 int integer_reg = 0;
888 int sse_reg = 0;
889 int i;
890
891 /* Reserve a register for the "hidden" argument. */
892 if (struct_return)
893 integer_reg++;
894
895 for (i = 0; i < nargs; i++)
896 {
897 struct type *type = value_type (args[i]);
898 int len = TYPE_LENGTH (type);
899 enum amd64_reg_class theclass[2];
900 int needed_integer_regs = 0;
901 int needed_sse_regs = 0;
902 int j;
903
904 /* Classify argument. */
905 amd64_classify (type, theclass);
906
907 /* Calculate the number of integer and SSE registers needed for
908 this argument. */
909 for (j = 0; j < 2; j++)
910 {
911 if (theclass[j] == AMD64_INTEGER)
912 needed_integer_regs++;
913 else if (theclass[j] == AMD64_SSE)
914 needed_sse_regs++;
915 }
916
917 /* Check whether enough registers are available, and if the
918 argument should be passed in registers at all. */
919 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
920 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
921 || (needed_integer_regs == 0 && needed_sse_regs == 0))
922 {
923 /* The argument will be passed on the stack. */
924 num_elements += ((len + 7) / 8);
925 stack_args[num_stack_args++] = args[i];
926 }
927 else
928 {
929 /* The argument will be passed in registers. */
930 const gdb_byte *valbuf = value_contents (args[i]);
931 gdb_byte buf[8];
932
933 gdb_assert (len <= 16);
934
935 for (j = 0; len > 0; j++, len -= 8)
936 {
937 int regnum = -1;
938 int offset = 0;
939
940 switch (theclass[j])
941 {
942 case AMD64_INTEGER:
943 regnum = integer_regnum[integer_reg++];
944 break;
945
946 case AMD64_SSE:
947 regnum = sse_regnum[sse_reg++];
948 break;
949
950 case AMD64_SSEUP:
951 gdb_assert (sse_reg > 0);
952 regnum = sse_regnum[sse_reg - 1];
953 offset = 8;
954 break;
955
956 default:
957 gdb_assert (!"Unexpected register class.");
958 }
959
960 gdb_assert (regnum != -1);
961 memset (buf, 0, sizeof buf);
962 memcpy (buf, valbuf + j * 8, std::min (len, 8));
963 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
964 }
965 }
966 }
967
968 /* Allocate space for the arguments on the stack. */
969 sp -= num_elements * 8;
970
971 /* The psABI says that "The end of the input argument area shall be
972 aligned on a 16 byte boundary." */
973 sp &= ~0xf;
974
975 /* Write out the arguments to the stack. */
976 for (i = 0; i < num_stack_args; i++)
977 {
978 struct type *type = value_type (stack_args[i]);
979 const gdb_byte *valbuf = value_contents (stack_args[i]);
980 int len = TYPE_LENGTH (type);
981
982 write_memory (sp + element * 8, valbuf, len);
983 element += ((len + 7) / 8);
984 }
985
986 /* The psABI says that "For calls that may call functions that use
987 varargs or stdargs (prototype-less calls or calls to functions
988 containing ellipsis (...) in the declaration) %al is used as
989 hidden argument to specify the number of SSE registers used. */
990 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
991 return sp;
992 }
993
994 static CORE_ADDR
995 amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
996 struct regcache *regcache, CORE_ADDR bp_addr,
997 int nargs, struct value **args, CORE_ADDR sp,
998 int struct_return, CORE_ADDR struct_addr)
999 {
1000 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1001 gdb_byte buf[8];
1002
1003 /* BND registers can be in arbitrary values at the moment of the
1004 inferior call. This can cause boundary violations that are not
1005 due to a real bug or even desired by the user. The best to be done
1006 is set the BND registers to allow access to the whole memory, INIT
1007 state, before pushing the inferior call. */
1008 i387_reset_bnd_regs (gdbarch, regcache);
1009
1010 /* Pass arguments. */
1011 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
1012
1013 /* Pass "hidden" argument". */
1014 if (struct_return)
1015 {
1016 store_unsigned_integer (buf, 8, byte_order, struct_addr);
1017 regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
1018 }
1019
1020 /* Store return address. */
1021 sp -= 8;
1022 store_unsigned_integer (buf, 8, byte_order, bp_addr);
1023 write_memory (sp, buf, 8);
1024
1025 /* Finally, update the stack pointer... */
1026 store_unsigned_integer (buf, 8, byte_order, sp);
1027 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
1028
1029 /* ...and fake a frame pointer. */
1030 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
1031
1032 return sp + 16;
1033 }
1034 \f
1035 /* Displaced instruction handling. */
1036
1037 /* A partially decoded instruction.
1038 This contains enough details for displaced stepping purposes. */
1039
1040 struct amd64_insn
1041 {
1042 /* The number of opcode bytes. */
1043 int opcode_len;
1044 /* The offset of the rex prefix or -1 if not present. */
1045 int rex_offset;
1046 /* The offset to the first opcode byte. */
1047 int opcode_offset;
1048 /* The offset to the modrm byte or -1 if not present. */
1049 int modrm_offset;
1050
1051 /* The raw instruction. */
1052 gdb_byte *raw_insn;
1053 };
1054
1055 struct displaced_step_closure
1056 {
1057 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1058 int tmp_used;
1059 int tmp_regno;
1060 ULONGEST tmp_save;
1061
1062 /* Details of the instruction. */
1063 struct amd64_insn insn_details;
1064
1065 /* Amount of space allocated to insn_buf. */
1066 int max_len;
1067
1068 /* The possibly modified insn.
1069 This is a variable-length field. */
1070 gdb_byte insn_buf[1];
1071 };
1072
1073 /* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1074 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1075 at which point delete these in favor of libopcodes' versions). */
1076
1077 static const unsigned char onebyte_has_modrm[256] = {
1078 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1079 /* ------------------------------- */
1080 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1081 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1082 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1083 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1084 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1085 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1086 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1087 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1088 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1089 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1090 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1091 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1092 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1093 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1094 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1095 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1096 /* ------------------------------- */
1097 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1098 };
1099
1100 static const unsigned char twobyte_has_modrm[256] = {
1101 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1102 /* ------------------------------- */
1103 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1104 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1105 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1106 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1107 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1108 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1109 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1110 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1111 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1112 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1113 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1114 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1115 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1116 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1117 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1118 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1119 /* ------------------------------- */
1120 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1121 };
1122
1123 static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
1124
1125 static int
1126 rex_prefix_p (gdb_byte pfx)
1127 {
1128 return REX_PREFIX_P (pfx);
1129 }
1130
1131 /* Skip the legacy instruction prefixes in INSN.
1132 We assume INSN is properly sentineled so we don't have to worry
1133 about falling off the end of the buffer. */
1134
1135 static gdb_byte *
1136 amd64_skip_prefixes (gdb_byte *insn)
1137 {
1138 while (1)
1139 {
1140 switch (*insn)
1141 {
1142 case DATA_PREFIX_OPCODE:
1143 case ADDR_PREFIX_OPCODE:
1144 case CS_PREFIX_OPCODE:
1145 case DS_PREFIX_OPCODE:
1146 case ES_PREFIX_OPCODE:
1147 case FS_PREFIX_OPCODE:
1148 case GS_PREFIX_OPCODE:
1149 case SS_PREFIX_OPCODE:
1150 case LOCK_PREFIX_OPCODE:
1151 case REPE_PREFIX_OPCODE:
1152 case REPNE_PREFIX_OPCODE:
1153 ++insn;
1154 continue;
1155 default:
1156 break;
1157 }
1158 break;
1159 }
1160
1161 return insn;
1162 }
1163
1164 /* Return an integer register (other than RSP) that is unused as an input
1165 operand in INSN.
1166 In order to not require adding a rex prefix if the insn doesn't already
1167 have one, the result is restricted to RAX ... RDI, sans RSP.
1168 The register numbering of the result follows architecture ordering,
1169 e.g. RDI = 7. */
1170
1171 static int
1172 amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1173 {
1174 /* 1 bit for each reg */
1175 int used_regs_mask = 0;
1176
1177 /* There can be at most 3 int regs used as inputs in an insn, and we have
1178 7 to choose from (RAX ... RDI, sans RSP).
1179 This allows us to take a conservative approach and keep things simple.
1180 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1181 that implicitly specify RAX. */
1182
1183 /* Avoid RAX. */
1184 used_regs_mask |= 1 << EAX_REG_NUM;
1185 /* Similarily avoid RDX, implicit operand in divides. */
1186 used_regs_mask |= 1 << EDX_REG_NUM;
1187 /* Avoid RSP. */
1188 used_regs_mask |= 1 << ESP_REG_NUM;
1189
1190 /* If the opcode is one byte long and there's no ModRM byte,
1191 assume the opcode specifies a register. */
1192 if (details->opcode_len == 1 && details->modrm_offset == -1)
1193 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1194
1195 /* Mark used regs in the modrm/sib bytes. */
1196 if (details->modrm_offset != -1)
1197 {
1198 int modrm = details->raw_insn[details->modrm_offset];
1199 int mod = MODRM_MOD_FIELD (modrm);
1200 int reg = MODRM_REG_FIELD (modrm);
1201 int rm = MODRM_RM_FIELD (modrm);
1202 int have_sib = mod != 3 && rm == 4;
1203
1204 /* Assume the reg field of the modrm byte specifies a register. */
1205 used_regs_mask |= 1 << reg;
1206
1207 if (have_sib)
1208 {
1209 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1210 int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1211 used_regs_mask |= 1 << base;
1212 used_regs_mask |= 1 << idx;
1213 }
1214 else
1215 {
1216 used_regs_mask |= 1 << rm;
1217 }
1218 }
1219
1220 gdb_assert (used_regs_mask < 256);
1221 gdb_assert (used_regs_mask != 255);
1222
1223 /* Finally, find a free reg. */
1224 {
1225 int i;
1226
1227 for (i = 0; i < 8; ++i)
1228 {
1229 if (! (used_regs_mask & (1 << i)))
1230 return i;
1231 }
1232
1233 /* We shouldn't get here. */
1234 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1235 }
1236 }
1237
1238 /* Extract the details of INSN that we need. */
1239
1240 static void
1241 amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1242 {
1243 gdb_byte *start = insn;
1244 int need_modrm;
1245
1246 details->raw_insn = insn;
1247
1248 details->opcode_len = -1;
1249 details->rex_offset = -1;
1250 details->opcode_offset = -1;
1251 details->modrm_offset = -1;
1252
1253 /* Skip legacy instruction prefixes. */
1254 insn = amd64_skip_prefixes (insn);
1255
1256 /* Skip REX instruction prefix. */
1257 if (rex_prefix_p (*insn))
1258 {
1259 details->rex_offset = insn - start;
1260 ++insn;
1261 }
1262
1263 details->opcode_offset = insn - start;
1264
1265 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1266 {
1267 /* Two or three-byte opcode. */
1268 ++insn;
1269 need_modrm = twobyte_has_modrm[*insn];
1270
1271 /* Check for three-byte opcode. */
1272 switch (*insn)
1273 {
1274 case 0x24:
1275 case 0x25:
1276 case 0x38:
1277 case 0x3a:
1278 case 0x7a:
1279 case 0x7b:
1280 ++insn;
1281 details->opcode_len = 3;
1282 break;
1283 default:
1284 details->opcode_len = 2;
1285 break;
1286 }
1287 }
1288 else
1289 {
1290 /* One-byte opcode. */
1291 need_modrm = onebyte_has_modrm[*insn];
1292 details->opcode_len = 1;
1293 }
1294
1295 if (need_modrm)
1296 {
1297 ++insn;
1298 details->modrm_offset = insn - start;
1299 }
1300 }
1301
1302 /* Update %rip-relative addressing in INSN.
1303
1304 %rip-relative addressing only uses a 32-bit displacement.
1305 32 bits is not enough to be guaranteed to cover the distance between where
1306 the real instruction is and where its copy is.
1307 Convert the insn to use base+disp addressing.
1308 We set base = pc + insn_length so we can leave disp unchanged. */
1309
1310 static void
1311 fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1312 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1313 {
1314 const struct amd64_insn *insn_details = &dsc->insn_details;
1315 int modrm_offset = insn_details->modrm_offset;
1316 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1317 CORE_ADDR rip_base;
1318 int insn_length;
1319 int arch_tmp_regno, tmp_regno;
1320 ULONGEST orig_value;
1321
1322 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1323 ++insn;
1324
1325 /* Compute the rip-relative address. */
1326 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf,
1327 dsc->max_len, from);
1328 rip_base = from + insn_length;
1329
1330 /* We need a register to hold the address.
1331 Pick one not used in the insn.
1332 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1333 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1334 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1335
1336 /* REX.B should be unset as we were using rip-relative addressing,
1337 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1338 if (insn_details->rex_offset != -1)
1339 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1340
1341 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1342 dsc->tmp_regno = tmp_regno;
1343 dsc->tmp_save = orig_value;
1344 dsc->tmp_used = 1;
1345
1346 /* Convert the ModRM field to be base+disp. */
1347 dsc->insn_buf[modrm_offset] &= ~0xc7;
1348 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1349
1350 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1351
1352 if (debug_displaced)
1353 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1354 "displaced: using temp reg %d, old value %s, new value %s\n",
1355 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1356 paddress (gdbarch, rip_base));
1357 }
1358
1359 static void
1360 fixup_displaced_copy (struct gdbarch *gdbarch,
1361 struct displaced_step_closure *dsc,
1362 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1363 {
1364 const struct amd64_insn *details = &dsc->insn_details;
1365
1366 if (details->modrm_offset != -1)
1367 {
1368 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1369
1370 if ((modrm & 0xc7) == 0x05)
1371 {
1372 /* The insn uses rip-relative addressing.
1373 Deal with it. */
1374 fixup_riprel (gdbarch, dsc, from, to, regs);
1375 }
1376 }
1377 }
1378
1379 struct displaced_step_closure *
1380 amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1381 CORE_ADDR from, CORE_ADDR to,
1382 struct regcache *regs)
1383 {
1384 int len = gdbarch_max_insn_length (gdbarch);
1385 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
1386 continually watch for running off the end of the buffer. */
1387 int fixup_sentinel_space = len;
1388 struct displaced_step_closure *dsc
1389 = ((struct displaced_step_closure *)
1390 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space));
1391 gdb_byte *buf = &dsc->insn_buf[0];
1392 struct amd64_insn *details = &dsc->insn_details;
1393
1394 dsc->tmp_used = 0;
1395 dsc->max_len = len + fixup_sentinel_space;
1396
1397 read_memory (from, buf, len);
1398
1399 /* Set up the sentinel space so we don't have to worry about running
1400 off the end of the buffer. An excessive number of leading prefixes
1401 could otherwise cause this. */
1402 memset (buf + len, 0, fixup_sentinel_space);
1403
1404 amd64_get_insn_details (buf, details);
1405
1406 /* GDB may get control back after the insn after the syscall.
1407 Presumably this is a kernel bug.
1408 If this is a syscall, make sure there's a nop afterwards. */
1409 {
1410 int syscall_length;
1411
1412 if (amd64_syscall_p (details, &syscall_length))
1413 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1414 }
1415
1416 /* Modify the insn to cope with the address where it will be executed from.
1417 In particular, handle any rip-relative addressing. */
1418 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1419
1420 write_memory (to, buf, len);
1421
1422 if (debug_displaced)
1423 {
1424 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1425 paddress (gdbarch, from), paddress (gdbarch, to));
1426 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1427 }
1428
1429 return dsc;
1430 }
1431
1432 static int
1433 amd64_absolute_jmp_p (const struct amd64_insn *details)
1434 {
1435 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1436
1437 if (insn[0] == 0xff)
1438 {
1439 /* jump near, absolute indirect (/4) */
1440 if ((insn[1] & 0x38) == 0x20)
1441 return 1;
1442
1443 /* jump far, absolute indirect (/5) */
1444 if ((insn[1] & 0x38) == 0x28)
1445 return 1;
1446 }
1447
1448 return 0;
1449 }
1450
1451 /* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1452
1453 static int
1454 amd64_jmp_p (const struct amd64_insn *details)
1455 {
1456 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1457
1458 /* jump short, relative. */
1459 if (insn[0] == 0xeb)
1460 return 1;
1461
1462 /* jump near, relative. */
1463 if (insn[0] == 0xe9)
1464 return 1;
1465
1466 return amd64_absolute_jmp_p (details);
1467 }
1468
1469 static int
1470 amd64_absolute_call_p (const struct amd64_insn *details)
1471 {
1472 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1473
1474 if (insn[0] == 0xff)
1475 {
1476 /* Call near, absolute indirect (/2) */
1477 if ((insn[1] & 0x38) == 0x10)
1478 return 1;
1479
1480 /* Call far, absolute indirect (/3) */
1481 if ((insn[1] & 0x38) == 0x18)
1482 return 1;
1483 }
1484
1485 return 0;
1486 }
1487
1488 static int
1489 amd64_ret_p (const struct amd64_insn *details)
1490 {
1491 /* NOTE: gcc can emit "repz ; ret". */
1492 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1493
1494 switch (insn[0])
1495 {
1496 case 0xc2: /* ret near, pop N bytes */
1497 case 0xc3: /* ret near */
1498 case 0xca: /* ret far, pop N bytes */
1499 case 0xcb: /* ret far */
1500 case 0xcf: /* iret */
1501 return 1;
1502
1503 default:
1504 return 0;
1505 }
1506 }
1507
1508 static int
1509 amd64_call_p (const struct amd64_insn *details)
1510 {
1511 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1512
1513 if (amd64_absolute_call_p (details))
1514 return 1;
1515
1516 /* call near, relative */
1517 if (insn[0] == 0xe8)
1518 return 1;
1519
1520 return 0;
1521 }
1522
1523 /* Return non-zero if INSN is a system call, and set *LENGTHP to its
1524 length in bytes. Otherwise, return zero. */
1525
1526 static int
1527 amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1528 {
1529 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1530
1531 if (insn[0] == 0x0f && insn[1] == 0x05)
1532 {
1533 *lengthp = 2;
1534 return 1;
1535 }
1536
1537 return 0;
1538 }
1539
1540 /* Classify the instruction at ADDR using PRED.
1541 Throw an error if the memory can't be read. */
1542
1543 static int
1544 amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
1545 int (*pred) (const struct amd64_insn *))
1546 {
1547 struct amd64_insn details;
1548 gdb_byte *buf;
1549 int len, classification;
1550
1551 len = gdbarch_max_insn_length (gdbarch);
1552 buf = (gdb_byte *) alloca (len);
1553
1554 read_code (addr, buf, len);
1555 amd64_get_insn_details (buf, &details);
1556
1557 classification = pred (&details);
1558
1559 return classification;
1560 }
1561
1562 /* The gdbarch insn_is_call method. */
1563
1564 static int
1565 amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
1566 {
1567 return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
1568 }
1569
1570 /* The gdbarch insn_is_ret method. */
1571
1572 static int
1573 amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
1574 {
1575 return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
1576 }
1577
1578 /* The gdbarch insn_is_jump method. */
1579
1580 static int
1581 amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
1582 {
1583 return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
1584 }
1585
1586 /* Fix up the state of registers and memory after having single-stepped
1587 a displaced instruction. */
1588
1589 void
1590 amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1591 struct displaced_step_closure *dsc,
1592 CORE_ADDR from, CORE_ADDR to,
1593 struct regcache *regs)
1594 {
1595 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1596 /* The offset we applied to the instruction's address. */
1597 ULONGEST insn_offset = to - from;
1598 gdb_byte *insn = dsc->insn_buf;
1599 const struct amd64_insn *insn_details = &dsc->insn_details;
1600
1601 if (debug_displaced)
1602 fprintf_unfiltered (gdb_stdlog,
1603 "displaced: fixup (%s, %s), "
1604 "insn = 0x%02x 0x%02x ...\n",
1605 paddress (gdbarch, from), paddress (gdbarch, to),
1606 insn[0], insn[1]);
1607
1608 /* If we used a tmp reg, restore it. */
1609
1610 if (dsc->tmp_used)
1611 {
1612 if (debug_displaced)
1613 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1614 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1615 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1616 }
1617
1618 /* The list of issues to contend with here is taken from
1619 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1620 Yay for Free Software! */
1621
1622 /* Relocate the %rip back to the program's instruction stream,
1623 if necessary. */
1624
1625 /* Except in the case of absolute or indirect jump or call
1626 instructions, or a return instruction, the new rip is relative to
1627 the displaced instruction; make it relative to the original insn.
1628 Well, signal handler returns don't need relocation either, but we use the
1629 value of %rip to recognize those; see below. */
1630 if (! amd64_absolute_jmp_p (insn_details)
1631 && ! amd64_absolute_call_p (insn_details)
1632 && ! amd64_ret_p (insn_details))
1633 {
1634 ULONGEST orig_rip;
1635 int insn_len;
1636
1637 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1638
1639 /* A signal trampoline system call changes the %rip, resuming
1640 execution of the main program after the signal handler has
1641 returned. That makes them like 'return' instructions; we
1642 shouldn't relocate %rip.
1643
1644 But most system calls don't, and we do need to relocate %rip.
1645
1646 Our heuristic for distinguishing these cases: if stepping
1647 over the system call instruction left control directly after
1648 the instruction, the we relocate --- control almost certainly
1649 doesn't belong in the displaced copy. Otherwise, we assume
1650 the instruction has put control where it belongs, and leave
1651 it unrelocated. Goodness help us if there are PC-relative
1652 system calls. */
1653 if (amd64_syscall_p (insn_details, &insn_len)
1654 && orig_rip != to + insn_len
1655 /* GDB can get control back after the insn after the syscall.
1656 Presumably this is a kernel bug.
1657 Fixup ensures its a nop, we add one to the length for it. */
1658 && orig_rip != to + insn_len + 1)
1659 {
1660 if (debug_displaced)
1661 fprintf_unfiltered (gdb_stdlog,
1662 "displaced: syscall changed %%rip; "
1663 "not relocating\n");
1664 }
1665 else
1666 {
1667 ULONGEST rip = orig_rip - insn_offset;
1668
1669 /* If we just stepped over a breakpoint insn, we don't backup
1670 the pc on purpose; this is to match behaviour without
1671 stepping. */
1672
1673 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1674
1675 if (debug_displaced)
1676 fprintf_unfiltered (gdb_stdlog,
1677 "displaced: "
1678 "relocated %%rip from %s to %s\n",
1679 paddress (gdbarch, orig_rip),
1680 paddress (gdbarch, rip));
1681 }
1682 }
1683
1684 /* If the instruction was PUSHFL, then the TF bit will be set in the
1685 pushed value, and should be cleared. We'll leave this for later,
1686 since GDB already messes up the TF flag when stepping over a
1687 pushfl. */
1688
1689 /* If the instruction was a call, the return address now atop the
1690 stack is the address following the copied instruction. We need
1691 to make it the address following the original instruction. */
1692 if (amd64_call_p (insn_details))
1693 {
1694 ULONGEST rsp;
1695 ULONGEST retaddr;
1696 const ULONGEST retaddr_len = 8;
1697
1698 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1699 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1700 retaddr = (retaddr - insn_offset) & 0xffffffffffffffffULL;
1701 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1702
1703 if (debug_displaced)
1704 fprintf_unfiltered (gdb_stdlog,
1705 "displaced: relocated return addr at %s "
1706 "to %s\n",
1707 paddress (gdbarch, rsp),
1708 paddress (gdbarch, retaddr));
1709 }
1710 }
1711
1712 /* If the instruction INSN uses RIP-relative addressing, return the
1713 offset into the raw INSN where the displacement to be adjusted is
1714 found. Returns 0 if the instruction doesn't use RIP-relative
1715 addressing. */
1716
1717 static int
1718 rip_relative_offset (struct amd64_insn *insn)
1719 {
1720 if (insn->modrm_offset != -1)
1721 {
1722 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1723
1724 if ((modrm & 0xc7) == 0x05)
1725 {
1726 /* The displacement is found right after the ModRM byte. */
1727 return insn->modrm_offset + 1;
1728 }
1729 }
1730
1731 return 0;
1732 }
1733
1734 static void
1735 append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1736 {
1737 target_write_memory (*to, buf, len);
1738 *to += len;
1739 }
1740
1741 static void
1742 amd64_relocate_instruction (struct gdbarch *gdbarch,
1743 CORE_ADDR *to, CORE_ADDR oldloc)
1744 {
1745 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1746 int len = gdbarch_max_insn_length (gdbarch);
1747 /* Extra space for sentinels. */
1748 int fixup_sentinel_space = len;
1749 gdb_byte *buf = (gdb_byte *) xmalloc (len + fixup_sentinel_space);
1750 struct amd64_insn insn_details;
1751 int offset = 0;
1752 LONGEST rel32, newrel;
1753 gdb_byte *insn;
1754 int insn_length;
1755
1756 read_memory (oldloc, buf, len);
1757
1758 /* Set up the sentinel space so we don't have to worry about running
1759 off the end of the buffer. An excessive number of leading prefixes
1760 could otherwise cause this. */
1761 memset (buf + len, 0, fixup_sentinel_space);
1762
1763 insn = buf;
1764 amd64_get_insn_details (insn, &insn_details);
1765
1766 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1767
1768 /* Skip legacy instruction prefixes. */
1769 insn = amd64_skip_prefixes (insn);
1770
1771 /* Adjust calls with 32-bit relative addresses as push/jump, with
1772 the address pushed being the location where the original call in
1773 the user program would return to. */
1774 if (insn[0] == 0xe8)
1775 {
1776 gdb_byte push_buf[32];
1777 CORE_ADDR ret_addr;
1778 int i = 0;
1779
1780 /* Where "ret" in the original code will return to. */
1781 ret_addr = oldloc + insn_length;
1782
1783 /* If pushing an address higher than or equal to 0x80000000,
1784 avoid 'pushq', as that sign extends its 32-bit operand, which
1785 would be incorrect. */
1786 if (ret_addr <= 0x7fffffff)
1787 {
1788 push_buf[0] = 0x68; /* pushq $... */
1789 store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
1790 i = 5;
1791 }
1792 else
1793 {
1794 push_buf[i++] = 0x48; /* sub $0x8,%rsp */
1795 push_buf[i++] = 0x83;
1796 push_buf[i++] = 0xec;
1797 push_buf[i++] = 0x08;
1798
1799 push_buf[i++] = 0xc7; /* movl $imm,(%rsp) */
1800 push_buf[i++] = 0x04;
1801 push_buf[i++] = 0x24;
1802 store_unsigned_integer (&push_buf[i], 4, byte_order,
1803 ret_addr & 0xffffffff);
1804 i += 4;
1805
1806 push_buf[i++] = 0xc7; /* movl $imm,4(%rsp) */
1807 push_buf[i++] = 0x44;
1808 push_buf[i++] = 0x24;
1809 push_buf[i++] = 0x04;
1810 store_unsigned_integer (&push_buf[i], 4, byte_order,
1811 ret_addr >> 32);
1812 i += 4;
1813 }
1814 gdb_assert (i <= sizeof (push_buf));
1815 /* Push the push. */
1816 append_insns (to, i, push_buf);
1817
1818 /* Convert the relative call to a relative jump. */
1819 insn[0] = 0xe9;
1820
1821 /* Adjust the destination offset. */
1822 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1823 newrel = (oldloc - *to) + rel32;
1824 store_signed_integer (insn + 1, 4, byte_order, newrel);
1825
1826 if (debug_displaced)
1827 fprintf_unfiltered (gdb_stdlog,
1828 "Adjusted insn rel32=%s at %s to"
1829 " rel32=%s at %s\n",
1830 hex_string (rel32), paddress (gdbarch, oldloc),
1831 hex_string (newrel), paddress (gdbarch, *to));
1832
1833 /* Write the adjusted jump into its displaced location. */
1834 append_insns (to, 5, insn);
1835 return;
1836 }
1837
1838 offset = rip_relative_offset (&insn_details);
1839 if (!offset)
1840 {
1841 /* Adjust jumps with 32-bit relative addresses. Calls are
1842 already handled above. */
1843 if (insn[0] == 0xe9)
1844 offset = 1;
1845 /* Adjust conditional jumps. */
1846 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1847 offset = 2;
1848 }
1849
1850 if (offset)
1851 {
1852 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1853 newrel = (oldloc - *to) + rel32;
1854 store_signed_integer (insn + offset, 4, byte_order, newrel);
1855 if (debug_displaced)
1856 fprintf_unfiltered (gdb_stdlog,
1857 "Adjusted insn rel32=%s at %s to"
1858 " rel32=%s at %s\n",
1859 hex_string (rel32), paddress (gdbarch, oldloc),
1860 hex_string (newrel), paddress (gdbarch, *to));
1861 }
1862
1863 /* Write the adjusted instruction into its displaced location. */
1864 append_insns (to, insn_length, buf);
1865 }
1866
1867 \f
1868 /* The maximum number of saved registers. This should include %rip. */
1869 #define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
1870
1871 struct amd64_frame_cache
1872 {
1873 /* Base address. */
1874 CORE_ADDR base;
1875 int base_p;
1876 CORE_ADDR sp_offset;
1877 CORE_ADDR pc;
1878
1879 /* Saved registers. */
1880 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1881 CORE_ADDR saved_sp;
1882 int saved_sp_reg;
1883
1884 /* Do we have a frame? */
1885 int frameless_p;
1886 };
1887
1888 /* Initialize a frame cache. */
1889
1890 static void
1891 amd64_init_frame_cache (struct amd64_frame_cache *cache)
1892 {
1893 int i;
1894
1895 /* Base address. */
1896 cache->base = 0;
1897 cache->base_p = 0;
1898 cache->sp_offset = -8;
1899 cache->pc = 0;
1900
1901 /* Saved registers. We initialize these to -1 since zero is a valid
1902 offset (that's where %rbp is supposed to be stored).
1903 The values start out as being offsets, and are later converted to
1904 addresses (at which point -1 is interpreted as an address, still meaning
1905 "invalid"). */
1906 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1907 cache->saved_regs[i] = -1;
1908 cache->saved_sp = 0;
1909 cache->saved_sp_reg = -1;
1910
1911 /* Frameless until proven otherwise. */
1912 cache->frameless_p = 1;
1913 }
1914
1915 /* Allocate and initialize a frame cache. */
1916
1917 static struct amd64_frame_cache *
1918 amd64_alloc_frame_cache (void)
1919 {
1920 struct amd64_frame_cache *cache;
1921
1922 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1923 amd64_init_frame_cache (cache);
1924 return cache;
1925 }
1926
1927 /* GCC 4.4 and later, can put code in the prologue to realign the
1928 stack pointer. Check whether PC points to such code, and update
1929 CACHE accordingly. Return the first instruction after the code
1930 sequence or CURRENT_PC, whichever is smaller. If we don't
1931 recognize the code, return PC. */
1932
1933 static CORE_ADDR
1934 amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1935 struct amd64_frame_cache *cache)
1936 {
1937 /* There are 2 code sequences to re-align stack before the frame
1938 gets set up:
1939
1940 1. Use a caller-saved saved register:
1941
1942 leaq 8(%rsp), %reg
1943 andq $-XXX, %rsp
1944 pushq -8(%reg)
1945
1946 2. Use a callee-saved saved register:
1947
1948 pushq %reg
1949 leaq 16(%rsp), %reg
1950 andq $-XXX, %rsp
1951 pushq -8(%reg)
1952
1953 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1954
1955 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1956 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1957 */
1958
1959 gdb_byte buf[18];
1960 int reg, r;
1961 int offset, offset_and;
1962
1963 if (target_read_code (pc, buf, sizeof buf))
1964 return pc;
1965
1966 /* Check caller-saved saved register. The first instruction has
1967 to be "leaq 8(%rsp), %reg". */
1968 if ((buf[0] & 0xfb) == 0x48
1969 && buf[1] == 0x8d
1970 && buf[3] == 0x24
1971 && buf[4] == 0x8)
1972 {
1973 /* MOD must be binary 10 and R/M must be binary 100. */
1974 if ((buf[2] & 0xc7) != 0x44)
1975 return pc;
1976
1977 /* REG has register number. */
1978 reg = (buf[2] >> 3) & 7;
1979
1980 /* Check the REX.R bit. */
1981 if (buf[0] == 0x4c)
1982 reg += 8;
1983
1984 offset = 5;
1985 }
1986 else
1987 {
1988 /* Check callee-saved saved register. The first instruction
1989 has to be "pushq %reg". */
1990 reg = 0;
1991 if ((buf[0] & 0xf8) == 0x50)
1992 offset = 0;
1993 else if ((buf[0] & 0xf6) == 0x40
1994 && (buf[1] & 0xf8) == 0x50)
1995 {
1996 /* Check the REX.B bit. */
1997 if ((buf[0] & 1) != 0)
1998 reg = 8;
1999
2000 offset = 1;
2001 }
2002 else
2003 return pc;
2004
2005 /* Get register. */
2006 reg += buf[offset] & 0x7;
2007
2008 offset++;
2009
2010 /* The next instruction has to be "leaq 16(%rsp), %reg". */
2011 if ((buf[offset] & 0xfb) != 0x48
2012 || buf[offset + 1] != 0x8d
2013 || buf[offset + 3] != 0x24
2014 || buf[offset + 4] != 0x10)
2015 return pc;
2016
2017 /* MOD must be binary 10 and R/M must be binary 100. */
2018 if ((buf[offset + 2] & 0xc7) != 0x44)
2019 return pc;
2020
2021 /* REG has register number. */
2022 r = (buf[offset + 2] >> 3) & 7;
2023
2024 /* Check the REX.R bit. */
2025 if (buf[offset] == 0x4c)
2026 r += 8;
2027
2028 /* Registers in pushq and leaq have to be the same. */
2029 if (reg != r)
2030 return pc;
2031
2032 offset += 5;
2033 }
2034
2035 /* Rigister can't be %rsp nor %rbp. */
2036 if (reg == 4 || reg == 5)
2037 return pc;
2038
2039 /* The next instruction has to be "andq $-XXX, %rsp". */
2040 if (buf[offset] != 0x48
2041 || buf[offset + 2] != 0xe4
2042 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2043 return pc;
2044
2045 offset_and = offset;
2046 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2047
2048 /* The next instruction has to be "pushq -8(%reg)". */
2049 r = 0;
2050 if (buf[offset] == 0xff)
2051 offset++;
2052 else if ((buf[offset] & 0xf6) == 0x40
2053 && buf[offset + 1] == 0xff)
2054 {
2055 /* Check the REX.B bit. */
2056 if ((buf[offset] & 0x1) != 0)
2057 r = 8;
2058 offset += 2;
2059 }
2060 else
2061 return pc;
2062
2063 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2064 01. */
2065 if (buf[offset + 1] != 0xf8
2066 || (buf[offset] & 0xf8) != 0x70)
2067 return pc;
2068
2069 /* R/M has register. */
2070 r += buf[offset] & 7;
2071
2072 /* Registers in leaq and pushq have to be the same. */
2073 if (reg != r)
2074 return pc;
2075
2076 if (current_pc > pc + offset_and)
2077 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2078
2079 return std::min (pc + offset + 2, current_pc);
2080 }
2081
2082 /* Similar to amd64_analyze_stack_align for x32. */
2083
2084 static CORE_ADDR
2085 amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2086 struct amd64_frame_cache *cache)
2087 {
2088 /* There are 2 code sequences to re-align stack before the frame
2089 gets set up:
2090
2091 1. Use a caller-saved saved register:
2092
2093 leaq 8(%rsp), %reg
2094 andq $-XXX, %rsp
2095 pushq -8(%reg)
2096
2097 or
2098
2099 [addr32] leal 8(%rsp), %reg
2100 andl $-XXX, %esp
2101 [addr32] pushq -8(%reg)
2102
2103 2. Use a callee-saved saved register:
2104
2105 pushq %reg
2106 leaq 16(%rsp), %reg
2107 andq $-XXX, %rsp
2108 pushq -8(%reg)
2109
2110 or
2111
2112 pushq %reg
2113 [addr32] leal 16(%rsp), %reg
2114 andl $-XXX, %esp
2115 [addr32] pushq -8(%reg)
2116
2117 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2118
2119 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2120 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2121
2122 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2123
2124 0x83 0xe4 0xf0 andl $-16, %esp
2125 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2126 */
2127
2128 gdb_byte buf[19];
2129 int reg, r;
2130 int offset, offset_and;
2131
2132 if (target_read_memory (pc, buf, sizeof buf))
2133 return pc;
2134
2135 /* Skip optional addr32 prefix. */
2136 offset = buf[0] == 0x67 ? 1 : 0;
2137
2138 /* Check caller-saved saved register. The first instruction has
2139 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2140 if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
2141 && buf[offset + 1] == 0x8d
2142 && buf[offset + 3] == 0x24
2143 && buf[offset + 4] == 0x8)
2144 {
2145 /* MOD must be binary 10 and R/M must be binary 100. */
2146 if ((buf[offset + 2] & 0xc7) != 0x44)
2147 return pc;
2148
2149 /* REG has register number. */
2150 reg = (buf[offset + 2] >> 3) & 7;
2151
2152 /* Check the REX.R bit. */
2153 if ((buf[offset] & 0x4) != 0)
2154 reg += 8;
2155
2156 offset += 5;
2157 }
2158 else
2159 {
2160 /* Check callee-saved saved register. The first instruction
2161 has to be "pushq %reg". */
2162 reg = 0;
2163 if ((buf[offset] & 0xf6) == 0x40
2164 && (buf[offset + 1] & 0xf8) == 0x50)
2165 {
2166 /* Check the REX.B bit. */
2167 if ((buf[offset] & 1) != 0)
2168 reg = 8;
2169
2170 offset += 1;
2171 }
2172 else if ((buf[offset] & 0xf8) != 0x50)
2173 return pc;
2174
2175 /* Get register. */
2176 reg += buf[offset] & 0x7;
2177
2178 offset++;
2179
2180 /* Skip optional addr32 prefix. */
2181 if (buf[offset] == 0x67)
2182 offset++;
2183
2184 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2185 "leal 16(%rsp), %reg". */
2186 if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
2187 || buf[offset + 1] != 0x8d
2188 || buf[offset + 3] != 0x24
2189 || buf[offset + 4] != 0x10)
2190 return pc;
2191
2192 /* MOD must be binary 10 and R/M must be binary 100. */
2193 if ((buf[offset + 2] & 0xc7) != 0x44)
2194 return pc;
2195
2196 /* REG has register number. */
2197 r = (buf[offset + 2] >> 3) & 7;
2198
2199 /* Check the REX.R bit. */
2200 if ((buf[offset] & 0x4) != 0)
2201 r += 8;
2202
2203 /* Registers in pushq and leaq have to be the same. */
2204 if (reg != r)
2205 return pc;
2206
2207 offset += 5;
2208 }
2209
2210 /* Rigister can't be %rsp nor %rbp. */
2211 if (reg == 4 || reg == 5)
2212 return pc;
2213
2214 /* The next instruction may be "andq $-XXX, %rsp" or
2215 "andl $-XXX, %esp". */
2216 if (buf[offset] != 0x48)
2217 offset--;
2218
2219 if (buf[offset + 2] != 0xe4
2220 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2221 return pc;
2222
2223 offset_and = offset;
2224 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2225
2226 /* Skip optional addr32 prefix. */
2227 if (buf[offset] == 0x67)
2228 offset++;
2229
2230 /* The next instruction has to be "pushq -8(%reg)". */
2231 r = 0;
2232 if (buf[offset] == 0xff)
2233 offset++;
2234 else if ((buf[offset] & 0xf6) == 0x40
2235 && buf[offset + 1] == 0xff)
2236 {
2237 /* Check the REX.B bit. */
2238 if ((buf[offset] & 0x1) != 0)
2239 r = 8;
2240 offset += 2;
2241 }
2242 else
2243 return pc;
2244
2245 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2246 01. */
2247 if (buf[offset + 1] != 0xf8
2248 || (buf[offset] & 0xf8) != 0x70)
2249 return pc;
2250
2251 /* R/M has register. */
2252 r += buf[offset] & 7;
2253
2254 /* Registers in leaq and pushq have to be the same. */
2255 if (reg != r)
2256 return pc;
2257
2258 if (current_pc > pc + offset_and)
2259 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2260
2261 return std::min (pc + offset + 2, current_pc);
2262 }
2263
2264 /* Do a limited analysis of the prologue at PC and update CACHE
2265 accordingly. Bail out early if CURRENT_PC is reached. Return the
2266 address where the analysis stopped.
2267
2268 We will handle only functions beginning with:
2269
2270 pushq %rbp 0x55
2271 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
2272
2273 or (for the X32 ABI):
2274
2275 pushq %rbp 0x55
2276 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2277
2278 Any function that doesn't start with one of these sequences will be
2279 assumed to have no prologue and thus no valid frame pointer in
2280 %rbp. */
2281
2282 static CORE_ADDR
2283 amd64_analyze_prologue (struct gdbarch *gdbarch,
2284 CORE_ADDR pc, CORE_ADDR current_pc,
2285 struct amd64_frame_cache *cache)
2286 {
2287 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2288 /* There are two variations of movq %rsp, %rbp. */
2289 static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
2290 static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
2291 /* Ditto for movl %esp, %ebp. */
2292 static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
2293 static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
2294
2295 gdb_byte buf[3];
2296 gdb_byte op;
2297
2298 if (current_pc <= pc)
2299 return current_pc;
2300
2301 if (gdbarch_ptr_bit (gdbarch) == 32)
2302 pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
2303 else
2304 pc = amd64_analyze_stack_align (pc, current_pc, cache);
2305
2306 op = read_code_unsigned_integer (pc, 1, byte_order);
2307
2308 if (op == 0x55) /* pushq %rbp */
2309 {
2310 /* Take into account that we've executed the `pushq %rbp' that
2311 starts this instruction sequence. */
2312 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
2313 cache->sp_offset += 8;
2314
2315 /* If that's all, return now. */
2316 if (current_pc <= pc + 1)
2317 return current_pc;
2318
2319 read_code (pc + 1, buf, 3);
2320
2321 /* Check for `movq %rsp, %rbp'. */
2322 if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
2323 || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
2324 {
2325 /* OK, we actually have a frame. */
2326 cache->frameless_p = 0;
2327 return pc + 4;
2328 }
2329
2330 /* For X32, also check for `movq %esp, %ebp'. */
2331 if (gdbarch_ptr_bit (gdbarch) == 32)
2332 {
2333 if (memcmp (buf, mov_esp_ebp_1, 2) == 0
2334 || memcmp (buf, mov_esp_ebp_2, 2) == 0)
2335 {
2336 /* OK, we actually have a frame. */
2337 cache->frameless_p = 0;
2338 return pc + 3;
2339 }
2340 }
2341
2342 return pc + 1;
2343 }
2344
2345 return pc;
2346 }
2347
2348 /* Work around false termination of prologue - GCC PR debug/48827.
2349
2350 START_PC is the first instruction of a function, PC is its minimal already
2351 determined advanced address. Function returns PC if it has nothing to do.
2352
2353 84 c0 test %al,%al
2354 74 23 je after
2355 <-- here is 0 lines advance - the false prologue end marker.
2356 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2357 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2358 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2359 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2360 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2361 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2362 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2363 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2364 after: */
2365
2366 static CORE_ADDR
2367 amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
2368 {
2369 struct symtab_and_line start_pc_sal, next_sal;
2370 gdb_byte buf[4 + 8 * 7];
2371 int offset, xmmreg;
2372
2373 if (pc == start_pc)
2374 return pc;
2375
2376 start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
2377 if (start_pc_sal.symtab == NULL
2378 || producer_is_gcc_ge_4 (COMPUNIT_PRODUCER
2379 (SYMTAB_COMPUNIT (start_pc_sal.symtab))) < 6
2380 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
2381 return pc;
2382
2383 next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
2384 if (next_sal.line != start_pc_sal.line)
2385 return pc;
2386
2387 /* START_PC can be from overlayed memory, ignored here. */
2388 if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
2389 return pc;
2390
2391 /* test %al,%al */
2392 if (buf[0] != 0x84 || buf[1] != 0xc0)
2393 return pc;
2394 /* je AFTER */
2395 if (buf[2] != 0x74)
2396 return pc;
2397
2398 offset = 4;
2399 for (xmmreg = 0; xmmreg < 8; xmmreg++)
2400 {
2401 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
2402 if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
2403 || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
2404 return pc;
2405
2406 /* 0b01?????? */
2407 if ((buf[offset + 2] & 0xc0) == 0x40)
2408 {
2409 /* 8-bit displacement. */
2410 offset += 4;
2411 }
2412 /* 0b10?????? */
2413 else if ((buf[offset + 2] & 0xc0) == 0x80)
2414 {
2415 /* 32-bit displacement. */
2416 offset += 7;
2417 }
2418 else
2419 return pc;
2420 }
2421
2422 /* je AFTER */
2423 if (offset - 4 != buf[3])
2424 return pc;
2425
2426 return next_sal.end;
2427 }
2428
2429 /* Return PC of first real instruction. */
2430
2431 static CORE_ADDR
2432 amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
2433 {
2434 struct amd64_frame_cache cache;
2435 CORE_ADDR pc;
2436 CORE_ADDR func_addr;
2437
2438 if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
2439 {
2440 CORE_ADDR post_prologue_pc
2441 = skip_prologue_using_sal (gdbarch, func_addr);
2442 struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
2443
2444 /* Clang always emits a line note before the prologue and another
2445 one after. We trust clang to emit usable line notes. */
2446 if (post_prologue_pc
2447 && (cust != NULL
2448 && COMPUNIT_PRODUCER (cust) != NULL
2449 && startswith (COMPUNIT_PRODUCER (cust), "clang ")))
2450 return std::max (start_pc, post_prologue_pc);
2451 }
2452
2453 amd64_init_frame_cache (&cache);
2454 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
2455 &cache);
2456 if (cache.frameless_p)
2457 return start_pc;
2458
2459 return amd64_skip_xmm_prologue (pc, start_pc);
2460 }
2461 \f
2462
2463 /* Normal frames. */
2464
2465 static void
2466 amd64_frame_cache_1 (struct frame_info *this_frame,
2467 struct amd64_frame_cache *cache)
2468 {
2469 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2470 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2471 gdb_byte buf[8];
2472 int i;
2473
2474 cache->pc = get_frame_func (this_frame);
2475 if (cache->pc != 0)
2476 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
2477 cache);
2478
2479 if (cache->frameless_p)
2480 {
2481 /* We didn't find a valid frame. If we're at the start of a
2482 function, or somewhere half-way its prologue, the function's
2483 frame probably hasn't been fully setup yet. Try to
2484 reconstruct the base address for the stack frame by looking
2485 at the stack pointer. For truly "frameless" functions this
2486 might work too. */
2487
2488 if (cache->saved_sp_reg != -1)
2489 {
2490 /* Stack pointer has been saved. */
2491 get_frame_register (this_frame, cache->saved_sp_reg, buf);
2492 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
2493
2494 /* We're halfway aligning the stack. */
2495 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
2496 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
2497
2498 /* This will be added back below. */
2499 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
2500 }
2501 else
2502 {
2503 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2504 cache->base = extract_unsigned_integer (buf, 8, byte_order)
2505 + cache->sp_offset;
2506 }
2507 }
2508 else
2509 {
2510 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
2511 cache->base = extract_unsigned_integer (buf, 8, byte_order);
2512 }
2513
2514 /* Now that we have the base address for the stack frame we can
2515 calculate the value of %rsp in the calling frame. */
2516 cache->saved_sp = cache->base + 16;
2517
2518 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2519 frame we find it at the same offset from the reconstructed base
2520 address. If we're halfway aligning the stack, %rip is handled
2521 differently (see above). */
2522 if (!cache->frameless_p || cache->saved_sp_reg == -1)
2523 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
2524
2525 /* Adjust all the saved registers such that they contain addresses
2526 instead of offsets. */
2527 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
2528 if (cache->saved_regs[i] != -1)
2529 cache->saved_regs[i] += cache->base;
2530
2531 cache->base_p = 1;
2532 }
2533
2534 static struct amd64_frame_cache *
2535 amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
2536 {
2537 struct amd64_frame_cache *cache;
2538
2539 if (*this_cache)
2540 return (struct amd64_frame_cache *) *this_cache;
2541
2542 cache = amd64_alloc_frame_cache ();
2543 *this_cache = cache;
2544
2545 TRY
2546 {
2547 amd64_frame_cache_1 (this_frame, cache);
2548 }
2549 CATCH (ex, RETURN_MASK_ERROR)
2550 {
2551 if (ex.error != NOT_AVAILABLE_ERROR)
2552 throw_exception (ex);
2553 }
2554 END_CATCH
2555
2556 return cache;
2557 }
2558
2559 static enum unwind_stop_reason
2560 amd64_frame_unwind_stop_reason (struct frame_info *this_frame,
2561 void **this_cache)
2562 {
2563 struct amd64_frame_cache *cache =
2564 amd64_frame_cache (this_frame, this_cache);
2565
2566 if (!cache->base_p)
2567 return UNWIND_UNAVAILABLE;
2568
2569 /* This marks the outermost frame. */
2570 if (cache->base == 0)
2571 return UNWIND_OUTERMOST;
2572
2573 return UNWIND_NO_REASON;
2574 }
2575
2576 static void
2577 amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
2578 struct frame_id *this_id)
2579 {
2580 struct amd64_frame_cache *cache =
2581 amd64_frame_cache (this_frame, this_cache);
2582
2583 if (!cache->base_p)
2584 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2585 else if (cache->base == 0)
2586 {
2587 /* This marks the outermost frame. */
2588 return;
2589 }
2590 else
2591 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
2592 }
2593
2594 static struct value *
2595 amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
2596 int regnum)
2597 {
2598 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2599 struct amd64_frame_cache *cache =
2600 amd64_frame_cache (this_frame, this_cache);
2601
2602 gdb_assert (regnum >= 0);
2603
2604 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
2605 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
2606
2607 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
2608 return frame_unwind_got_memory (this_frame, regnum,
2609 cache->saved_regs[regnum]);
2610
2611 return frame_unwind_got_register (this_frame, regnum, regnum);
2612 }
2613
2614 static const struct frame_unwind amd64_frame_unwind =
2615 {
2616 NORMAL_FRAME,
2617 amd64_frame_unwind_stop_reason,
2618 amd64_frame_this_id,
2619 amd64_frame_prev_register,
2620 NULL,
2621 default_frame_sniffer
2622 };
2623 \f
2624 /* Generate a bytecode expression to get the value of the saved PC. */
2625
2626 static void
2627 amd64_gen_return_address (struct gdbarch *gdbarch,
2628 struct agent_expr *ax, struct axs_value *value,
2629 CORE_ADDR scope)
2630 {
2631 /* The following sequence assumes the traditional use of the base
2632 register. */
2633 ax_reg (ax, AMD64_RBP_REGNUM);
2634 ax_const_l (ax, 8);
2635 ax_simple (ax, aop_add);
2636 value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
2637 value->kind = axs_lvalue_memory;
2638 }
2639 \f
2640
2641 /* Signal trampolines. */
2642
2643 /* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2644 64-bit variants. This would require using identical frame caches
2645 on both platforms. */
2646
2647 static struct amd64_frame_cache *
2648 amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2649 {
2650 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2651 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2652 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2653 struct amd64_frame_cache *cache;
2654 CORE_ADDR addr;
2655 gdb_byte buf[8];
2656 int i;
2657
2658 if (*this_cache)
2659 return (struct amd64_frame_cache *) *this_cache;
2660
2661 cache = amd64_alloc_frame_cache ();
2662
2663 TRY
2664 {
2665 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2666 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2667
2668 addr = tdep->sigcontext_addr (this_frame);
2669 gdb_assert (tdep->sc_reg_offset);
2670 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2671 for (i = 0; i < tdep->sc_num_regs; i++)
2672 if (tdep->sc_reg_offset[i] != -1)
2673 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
2674
2675 cache->base_p = 1;
2676 }
2677 CATCH (ex, RETURN_MASK_ERROR)
2678 {
2679 if (ex.error != NOT_AVAILABLE_ERROR)
2680 throw_exception (ex);
2681 }
2682 END_CATCH
2683
2684 *this_cache = cache;
2685 return cache;
2686 }
2687
2688 static enum unwind_stop_reason
2689 amd64_sigtramp_frame_unwind_stop_reason (struct frame_info *this_frame,
2690 void **this_cache)
2691 {
2692 struct amd64_frame_cache *cache =
2693 amd64_sigtramp_frame_cache (this_frame, this_cache);
2694
2695 if (!cache->base_p)
2696 return UNWIND_UNAVAILABLE;
2697
2698 return UNWIND_NO_REASON;
2699 }
2700
2701 static void
2702 amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
2703 void **this_cache, struct frame_id *this_id)
2704 {
2705 struct amd64_frame_cache *cache =
2706 amd64_sigtramp_frame_cache (this_frame, this_cache);
2707
2708 if (!cache->base_p)
2709 (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
2710 else if (cache->base == 0)
2711 {
2712 /* This marks the outermost frame. */
2713 return;
2714 }
2715 else
2716 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
2717 }
2718
2719 static struct value *
2720 amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2721 void **this_cache, int regnum)
2722 {
2723 /* Make sure we've initialized the cache. */
2724 amd64_sigtramp_frame_cache (this_frame, this_cache);
2725
2726 return amd64_frame_prev_register (this_frame, this_cache, regnum);
2727 }
2728
2729 static int
2730 amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2731 struct frame_info *this_frame,
2732 void **this_cache)
2733 {
2734 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2735
2736 /* We shouldn't even bother if we don't have a sigcontext_addr
2737 handler. */
2738 if (tdep->sigcontext_addr == NULL)
2739 return 0;
2740
2741 if (tdep->sigtramp_p != NULL)
2742 {
2743 if (tdep->sigtramp_p (this_frame))
2744 return 1;
2745 }
2746
2747 if (tdep->sigtramp_start != 0)
2748 {
2749 CORE_ADDR pc = get_frame_pc (this_frame);
2750
2751 gdb_assert (tdep->sigtramp_end != 0);
2752 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
2753 return 1;
2754 }
2755
2756 return 0;
2757 }
2758
2759 static const struct frame_unwind amd64_sigtramp_frame_unwind =
2760 {
2761 SIGTRAMP_FRAME,
2762 amd64_sigtramp_frame_unwind_stop_reason,
2763 amd64_sigtramp_frame_this_id,
2764 amd64_sigtramp_frame_prev_register,
2765 NULL,
2766 amd64_sigtramp_frame_sniffer
2767 };
2768 \f
2769
2770 static CORE_ADDR
2771 amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2772 {
2773 struct amd64_frame_cache *cache =
2774 amd64_frame_cache (this_frame, this_cache);
2775
2776 return cache->base;
2777 }
2778
2779 static const struct frame_base amd64_frame_base =
2780 {
2781 &amd64_frame_unwind,
2782 amd64_frame_base_address,
2783 amd64_frame_base_address,
2784 amd64_frame_base_address
2785 };
2786
2787 /* Normal frames, but in a function epilogue. */
2788
2789 /* Implement the stack_frame_destroyed_p gdbarch method.
2790
2791 The epilogue is defined here as the 'ret' instruction, which will
2792 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2793 the function's stack frame. */
2794
2795 static int
2796 amd64_stack_frame_destroyed_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2797 {
2798 gdb_byte insn;
2799 struct compunit_symtab *cust;
2800
2801 cust = find_pc_compunit_symtab (pc);
2802 if (cust != NULL && COMPUNIT_EPILOGUE_UNWIND_VALID (cust))
2803 return 0;
2804
2805 if (target_read_memory (pc, &insn, 1))
2806 return 0; /* Can't read memory at pc. */
2807
2808 if (insn != 0xc3) /* 'ret' instruction. */
2809 return 0;
2810
2811 return 1;
2812 }
2813
2814 static int
2815 amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2816 struct frame_info *this_frame,
2817 void **this_prologue_cache)
2818 {
2819 if (frame_relative_level (this_frame) == 0)
2820 return amd64_stack_frame_destroyed_p (get_frame_arch (this_frame),
2821 get_frame_pc (this_frame));
2822 else
2823 return 0;
2824 }
2825
2826 static struct amd64_frame_cache *
2827 amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2828 {
2829 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2830 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2831 struct amd64_frame_cache *cache;
2832 gdb_byte buf[8];
2833
2834 if (*this_cache)
2835 return (struct amd64_frame_cache *) *this_cache;
2836
2837 cache = amd64_alloc_frame_cache ();
2838 *this_cache = cache;
2839
2840 TRY
2841 {
2842 /* Cache base will be %esp plus cache->sp_offset (-8). */
2843 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2844 cache->base = extract_unsigned_integer (buf, 8,
2845 byte_order) + cache->sp_offset;
2846
2847 /* Cache pc will be the frame func. */
2848 cache->pc = get_frame_pc (this_frame);
2849
2850 /* The saved %esp will be at cache->base plus 16. */
2851 cache->saved_sp = cache->base + 16;
2852
2853 /* The saved %eip will be at cache->base plus 8. */
2854 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2855
2856 cache->base_p = 1;
2857 }
2858 CATCH (ex, RETURN_MASK_ERROR)
2859 {
2860 if (ex.error != NOT_AVAILABLE_ERROR)
2861 throw_exception (ex);
2862 }
2863 END_CATCH
2864
2865 return cache;
2866 }
2867
2868 static enum unwind_stop_reason
2869 amd64_epilogue_frame_unwind_stop_reason (struct frame_info *this_frame,
2870 void **this_cache)
2871 {
2872 struct amd64_frame_cache *cache
2873 = amd64_epilogue_frame_cache (this_frame, this_cache);
2874
2875 if (!cache->base_p)
2876 return UNWIND_UNAVAILABLE;
2877
2878 return UNWIND_NO_REASON;
2879 }
2880
2881 static void
2882 amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2883 void **this_cache,
2884 struct frame_id *this_id)
2885 {
2886 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2887 this_cache);
2888
2889 if (!cache->base_p)
2890 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2891 else
2892 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
2893 }
2894
2895 static const struct frame_unwind amd64_epilogue_frame_unwind =
2896 {
2897 NORMAL_FRAME,
2898 amd64_epilogue_frame_unwind_stop_reason,
2899 amd64_epilogue_frame_this_id,
2900 amd64_frame_prev_register,
2901 NULL,
2902 amd64_epilogue_frame_sniffer
2903 };
2904
2905 static struct frame_id
2906 amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2907 {
2908 CORE_ADDR fp;
2909
2910 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
2911
2912 return frame_id_build (fp + 16, get_frame_pc (this_frame));
2913 }
2914
2915 /* 16 byte align the SP per frame requirements. */
2916
2917 static CORE_ADDR
2918 amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2919 {
2920 return sp & -(CORE_ADDR)16;
2921 }
2922 \f
2923
2924 /* Supply register REGNUM from the buffer specified by FPREGS and LEN
2925 in the floating-point register set REGSET to register cache
2926 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
2927
2928 static void
2929 amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2930 int regnum, const void *fpregs, size_t len)
2931 {
2932 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2933 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2934
2935 gdb_assert (len >= tdep->sizeof_fpregset);
2936 amd64_supply_fxsave (regcache, regnum, fpregs);
2937 }
2938
2939 /* Collect register REGNUM from the register cache REGCACHE and store
2940 it in the buffer specified by FPREGS and LEN as described by the
2941 floating-point register set REGSET. If REGNUM is -1, do this for
2942 all registers in REGSET. */
2943
2944 static void
2945 amd64_collect_fpregset (const struct regset *regset,
2946 const struct regcache *regcache,
2947 int regnum, void *fpregs, size_t len)
2948 {
2949 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2950 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2951
2952 gdb_assert (len >= tdep->sizeof_fpregset);
2953 amd64_collect_fxsave (regcache, regnum, fpregs);
2954 }
2955
2956 const struct regset amd64_fpregset =
2957 {
2958 NULL, amd64_supply_fpregset, amd64_collect_fpregset
2959 };
2960 \f
2961
2962 /* Figure out where the longjmp will land. Slurp the jmp_buf out of
2963 %rdi. We expect its value to be a pointer to the jmp_buf structure
2964 from which we extract the address that we will land at. This
2965 address is copied into PC. This routine returns non-zero on
2966 success. */
2967
2968 static int
2969 amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2970 {
2971 gdb_byte buf[8];
2972 CORE_ADDR jb_addr;
2973 struct gdbarch *gdbarch = get_frame_arch (frame);
2974 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
2975 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
2976
2977 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2978 longjmp will land. */
2979 if (jb_pc_offset == -1)
2980 return 0;
2981
2982 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
2983 jb_addr= extract_typed_address
2984 (buf, builtin_type (gdbarch)->builtin_data_ptr);
2985 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2986 return 0;
2987
2988 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
2989
2990 return 1;
2991 }
2992
2993 static const int amd64_record_regmap[] =
2994 {
2995 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2996 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2997 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2998 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2999 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
3000 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
3001 };
3002
3003 void
3004 amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
3005 const target_desc *default_tdesc)
3006 {
3007 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3008 const struct target_desc *tdesc = info.target_desc;
3009 static const char *const stap_integer_prefixes[] = { "$", NULL };
3010 static const char *const stap_register_prefixes[] = { "%", NULL };
3011 static const char *const stap_register_indirection_prefixes[] = { "(",
3012 NULL };
3013 static const char *const stap_register_indirection_suffixes[] = { ")",
3014 NULL };
3015
3016 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
3017 floating-point registers. */
3018 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
3019 tdep->fpregset = &amd64_fpregset;
3020
3021 if (! tdesc_has_registers (tdesc))
3022 tdesc = default_tdesc;
3023 tdep->tdesc = tdesc;
3024
3025 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
3026 tdep->register_names = amd64_register_names;
3027
3028 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
3029 {
3030 tdep->zmmh_register_names = amd64_zmmh_names;
3031 tdep->k_register_names = amd64_k_names;
3032 tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
3033 tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
3034
3035 tdep->num_zmm_regs = 32;
3036 tdep->num_xmm_avx512_regs = 16;
3037 tdep->num_ymm_avx512_regs = 16;
3038
3039 tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
3040 tdep->k0_regnum = AMD64_K0_REGNUM;
3041 tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
3042 tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
3043 }
3044
3045 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
3046 {
3047 tdep->ymmh_register_names = amd64_ymmh_names;
3048 tdep->num_ymm_regs = 16;
3049 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
3050 }
3051
3052 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
3053 {
3054 tdep->mpx_register_names = amd64_mpx_names;
3055 tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
3056 tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
3057 }
3058
3059 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments") != NULL)
3060 {
3061 const struct tdesc_feature *feature =
3062 tdesc_find_feature (tdesc, "org.gnu.gdb.i386.segments");
3063 struct tdesc_arch_data *tdesc_data_segments =
3064 (struct tdesc_arch_data *) info.tdep_info;
3065
3066 tdesc_numbered_register (feature, tdesc_data_segments,
3067 AMD64_FSBASE_REGNUM, "fs_base");
3068 tdesc_numbered_register (feature, tdesc_data_segments,
3069 AMD64_GSBASE_REGNUM, "gs_base");
3070 }
3071
3072 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.pkeys") != NULL)
3073 {
3074 tdep->pkeys_register_names = amd64_pkeys_names;
3075 tdep->pkru_regnum = AMD64_PKRU_REGNUM;
3076 tdep->num_pkeys_regs = 1;
3077 }
3078
3079 tdep->num_byte_regs = 20;
3080 tdep->num_word_regs = 16;
3081 tdep->num_dword_regs = 16;
3082 /* Avoid wiring in the MMX registers for now. */
3083 tdep->num_mmx_regs = 0;
3084
3085 set_gdbarch_pseudo_register_read_value (gdbarch,
3086 amd64_pseudo_register_read_value);
3087 set_gdbarch_pseudo_register_write (gdbarch,
3088 amd64_pseudo_register_write);
3089 set_gdbarch_ax_pseudo_register_collect (gdbarch,
3090 amd64_ax_pseudo_register_collect);
3091
3092 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
3093
3094 /* AMD64 has an FPU and 16 SSE registers. */
3095 tdep->st0_regnum = AMD64_ST0_REGNUM;
3096 tdep->num_xmm_regs = 16;
3097
3098 /* This is what all the fuss is about. */
3099 set_gdbarch_long_bit (gdbarch, 64);
3100 set_gdbarch_long_long_bit (gdbarch, 64);
3101 set_gdbarch_ptr_bit (gdbarch, 64);
3102
3103 /* In contrast to the i386, on AMD64 a `long double' actually takes
3104 up 128 bits, even though it's still based on the i387 extended
3105 floating-point format which has only 80 significant bits. */
3106 set_gdbarch_long_double_bit (gdbarch, 128);
3107
3108 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
3109
3110 /* Register numbers of various important registers. */
3111 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
3112 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
3113 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
3114 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
3115
3116 /* The "default" register numbering scheme for AMD64 is referred to
3117 as the "DWARF Register Number Mapping" in the System V psABI.
3118 The preferred debugging format for all known AMD64 targets is
3119 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3120 DWARF-1), but we provide the same mapping just in case. This
3121 mapping is also used for stabs, which GCC does support. */
3122 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3123 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
3124
3125 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
3126 be in use on any of the supported AMD64 targets. */
3127
3128 /* Call dummy code. */
3129 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
3130 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
3131 set_gdbarch_frame_red_zone_size (gdbarch, 128);
3132
3133 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
3134 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
3135 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
3136
3137 set_gdbarch_return_value (gdbarch, amd64_return_value);
3138
3139 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
3140
3141 tdep->record_regmap = amd64_record_regmap;
3142
3143 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
3144
3145 /* Hook the function epilogue frame unwinder. This unwinder is
3146 appended to the list first, so that it supercedes the other
3147 unwinders in function epilogues. */
3148 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
3149
3150 /* Hook the prologue-based frame unwinders. */
3151 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
3152 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
3153 frame_base_set_default (gdbarch, &amd64_frame_base);
3154
3155 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
3156
3157 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
3158
3159 set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
3160
3161 /* SystemTap variables and functions. */
3162 set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
3163 set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
3164 set_gdbarch_stap_register_indirection_prefixes (gdbarch,
3165 stap_register_indirection_prefixes);
3166 set_gdbarch_stap_register_indirection_suffixes (gdbarch,
3167 stap_register_indirection_suffixes);
3168 set_gdbarch_stap_is_single_operand (gdbarch,
3169 i386_stap_is_single_operand);
3170 set_gdbarch_stap_parse_special_token (gdbarch,
3171 i386_stap_parse_special_token);
3172 set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
3173 set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
3174 set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
3175 }
3176 \f
3177
3178 static struct type *
3179 amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3180 {
3181 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3182
3183 switch (regnum - tdep->eax_regnum)
3184 {
3185 case AMD64_RBP_REGNUM: /* %ebp */
3186 case AMD64_RSP_REGNUM: /* %esp */
3187 return builtin_type (gdbarch)->builtin_data_ptr;
3188 case AMD64_RIP_REGNUM: /* %eip */
3189 return builtin_type (gdbarch)->builtin_func_ptr;
3190 }
3191
3192 return i386_pseudo_register_type (gdbarch, regnum);
3193 }
3194
3195 void
3196 amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch,
3197 const target_desc *default_tdesc)
3198 {
3199 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3200
3201 amd64_init_abi (info, gdbarch, default_tdesc);
3202
3203 tdep->num_dword_regs = 17;
3204 set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
3205
3206 set_gdbarch_long_bit (gdbarch, 32);
3207 set_gdbarch_ptr_bit (gdbarch, 32);
3208 }
3209
3210 /* Return the target description for a specified XSAVE feature mask. */
3211
3212 const struct target_desc *
3213 amd64_target_description (uint64_t xcr0)
3214 {
3215 switch (xcr0 & X86_XSTATE_ALL_MASK)
3216 {
3217 case X86_XSTATE_AVX_MPX_AVX512_PKU_MASK:
3218 return tdesc_amd64_avx_mpx_avx512_pku;
3219 case X86_XSTATE_AVX_AVX512_MASK:
3220 return tdesc_amd64_avx_avx512;
3221 case X86_XSTATE_MPX_MASK:
3222 return tdesc_amd64_mpx;
3223 case X86_XSTATE_AVX_MPX_MASK:
3224 return tdesc_amd64_avx_mpx;
3225 case X86_XSTATE_AVX_MASK:
3226 return tdesc_amd64_avx;
3227 default:
3228 return tdesc_amd64;
3229 }
3230 }
3231
3232 /* Provide a prototype to silence -Wmissing-prototypes. */
3233 void _initialize_amd64_tdep (void);
3234
3235 void
3236 _initialize_amd64_tdep (void)
3237 {
3238 initialize_tdesc_amd64 ();
3239 initialize_tdesc_amd64_avx ();
3240 initialize_tdesc_amd64_mpx ();
3241 initialize_tdesc_amd64_avx_mpx ();
3242 initialize_tdesc_amd64_avx_avx512 ();
3243 initialize_tdesc_amd64_avx_mpx_avx512_pku ();
3244 }
3245 \f
3246
3247 /* The 64-bit FXSAVE format differs from the 32-bit format in the
3248 sense that the instruction pointer and data pointer are simply
3249 64-bit offsets into the code segment and the data segment instead
3250 of a selector offset pair. The functions below store the upper 32
3251 bits of these pointers (instead of just the 16-bits of the segment
3252 selector). */
3253
3254 /* Fill register REGNUM in REGCACHE with the appropriate
3255 floating-point or SSE register value from *FXSAVE. If REGNUM is
3256 -1, do this for all registers. This function masks off any of the
3257 reserved bits in *FXSAVE. */
3258
3259 void
3260 amd64_supply_fxsave (struct regcache *regcache, int regnum,
3261 const void *fxsave)
3262 {
3263 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3264 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3265
3266 i387_supply_fxsave (regcache, regnum, fxsave);
3267
3268 if (fxsave
3269 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3270 {
3271 const gdb_byte *regs = (const gdb_byte *) fxsave;
3272
3273 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3274 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3275 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3276 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
3277 }
3278 }
3279
3280 /* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3281
3282 void
3283 amd64_supply_xsave (struct regcache *regcache, int regnum,
3284 const void *xsave)
3285 {
3286 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3287 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3288
3289 i387_supply_xsave (regcache, regnum, xsave);
3290
3291 if (xsave
3292 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3293 {
3294 const gdb_byte *regs = (const gdb_byte *) xsave;
3295
3296 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3297 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep),
3298 regs + 12);
3299 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3300 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep),
3301 regs + 20);
3302 }
3303 }
3304
3305 /* Fill register REGNUM (if it is a floating-point or SSE register) in
3306 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3307 all registers. This function doesn't touch any of the reserved
3308 bits in *FXSAVE. */
3309
3310 void
3311 amd64_collect_fxsave (const struct regcache *regcache, int regnum,
3312 void *fxsave)
3313 {
3314 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3315 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3316 gdb_byte *regs = (gdb_byte *) fxsave;
3317
3318 i387_collect_fxsave (regcache, regnum, fxsave);
3319
3320 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3321 {
3322 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3323 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3324 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3325 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
3326 }
3327 }
3328
3329 /* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
3330
3331 void
3332 amd64_collect_xsave (const struct regcache *regcache, int regnum,
3333 void *xsave, int gcore)
3334 {
3335 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3336 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3337 gdb_byte *regs = (gdb_byte *) xsave;
3338
3339 i387_collect_xsave (regcache, regnum, xsave, gcore);
3340
3341 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
3342 {
3343 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3344 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep),
3345 regs + 12);
3346 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3347 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep),
3348 regs + 20);
3349 }
3350 }