]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/amd64-tdep.c
Remove (dead-code) native core file sniffers on Linux targets
[thirdparty/binutils-gdb.git] / gdb / amd64-tdep.c
CommitLineData
e53bef9f 1/* Target-dependent code for AMD64.
ce0eebec 2
ecd75fc8 3 Copyright (C) 2001-2014 Free Software Foundation, Inc.
5ae96ec1
MK
4
5 Contributed by Jiri Smid, SuSE Labs.
53e95fcf
JS
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
a9762ec7 11 the Free Software Foundation; either version 3 of the License, or
53e95fcf
JS
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
a9762ec7 20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
53e95fcf
JS
21
22#include "defs.h"
35669430
DE
23#include "opcode/i386.h"
24#include "dis-asm.h"
c4f35dd8
MK
25#include "arch-utils.h"
26#include "block.h"
27#include "dummy-frame.h"
28#include "frame.h"
29#include "frame-base.h"
30#include "frame-unwind.h"
53e95fcf 31#include "inferior.h"
45741a9c 32#include "infrun.h"
53e95fcf 33#include "gdbcmd.h"
c4f35dd8
MK
34#include "gdbcore.h"
35#include "objfiles.h"
53e95fcf 36#include "regcache.h"
2c261fae 37#include "regset.h"
53e95fcf 38#include "symfile.h"
eda5a4d7 39#include "disasm.h"
9c1488cb 40#include "amd64-tdep.h"
c4f35dd8 41#include "i387-tdep.h"
53e95fcf 42
90884b2b 43#include "features/i386/amd64.c"
a055a187 44#include "features/i386/amd64-avx.c"
e43e105e 45#include "features/i386/amd64-mpx.c"
01f9f808
MS
46#include "features/i386/amd64-avx512.c"
47
ac1438b5
L
48#include "features/i386/x32.c"
49#include "features/i386/x32-avx.c"
01f9f808 50#include "features/i386/x32-avx512.c"
90884b2b 51
6710bf39
SS
52#include "ax.h"
53#include "ax-gdb.h"
54
e53bef9f
MK
55/* Note that the AMD64 architecture was previously known as x86-64.
56 The latter is (forever) engraved into the canonical system name as
90f90721 57 returned by config.guess, and used as the name for the AMD64 port
e53bef9f
MK
58 of GNU/Linux. The BSD's have renamed their ports to amd64; they
59 don't like to shout. For GDB we prefer the amd64_-prefix over the
60 x86_64_-prefix since it's so much easier to type. */
61
402ecd56 62/* Register information. */
c4f35dd8 63
6707b003 64static const char *amd64_register_names[] =
de220d0f 65{
6707b003 66 "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
c4f35dd8
MK
67
68 /* %r8 is indeed register number 8. */
6707b003
UW
69 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
70 "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
c4f35dd8 71
af233647 72 /* %st0 is register number 24. */
6707b003
UW
73 "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
74 "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
c4f35dd8 75
af233647 76 /* %xmm0 is register number 40. */
6707b003
UW
77 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
78 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
79 "mxcsr",
0e04a514
ML
80};
81
a055a187
L
82static const char *amd64_ymm_names[] =
83{
84 "ymm0", "ymm1", "ymm2", "ymm3",
85 "ymm4", "ymm5", "ymm6", "ymm7",
86 "ymm8", "ymm9", "ymm10", "ymm11",
87 "ymm12", "ymm13", "ymm14", "ymm15"
88};
89
01f9f808
MS
90static const char *amd64_ymm_avx512_names[] =
91{
92 "ymm16", "ymm17", "ymm18", "ymm19",
93 "ymm20", "ymm21", "ymm22", "ymm23",
94 "ymm24", "ymm25", "ymm26", "ymm27",
95 "ymm28", "ymm29", "ymm30", "ymm31"
96};
97
a055a187
L
98static const char *amd64_ymmh_names[] =
99{
100 "ymm0h", "ymm1h", "ymm2h", "ymm3h",
101 "ymm4h", "ymm5h", "ymm6h", "ymm7h",
102 "ymm8h", "ymm9h", "ymm10h", "ymm11h",
103 "ymm12h", "ymm13h", "ymm14h", "ymm15h"
104};
de220d0f 105
01f9f808
MS
106static const char *amd64_ymmh_avx512_names[] =
107{
108 "ymm16h", "ymm17h", "ymm18h", "ymm19h",
109 "ymm20h", "ymm21h", "ymm22h", "ymm23h",
110 "ymm24h", "ymm25h", "ymm26h", "ymm27h",
111 "ymm28h", "ymm29h", "ymm30h", "ymm31h"
112};
113
e43e105e
WT
114static const char *amd64_mpx_names[] =
115{
116 "bnd0raw", "bnd1raw", "bnd2raw", "bnd3raw", "bndcfgu", "bndstatus"
117};
118
01f9f808
MS
119static const char *amd64_k_names[] =
120{
121 "k0", "k1", "k2", "k3",
122 "k4", "k5", "k6", "k7"
123};
124
125static const char *amd64_zmmh_names[] =
126{
127 "zmm0h", "zmm1h", "zmm2h", "zmm3h",
128 "zmm4h", "zmm5h", "zmm6h", "zmm7h",
129 "zmm8h", "zmm9h", "zmm10h", "zmm11h",
130 "zmm12h", "zmm13h", "zmm14h", "zmm15h",
131 "zmm16h", "zmm17h", "zmm18h", "zmm19h",
132 "zmm20h", "zmm21h", "zmm22h", "zmm23h",
133 "zmm24h", "zmm25h", "zmm26h", "zmm27h",
134 "zmm28h", "zmm29h", "zmm30h", "zmm31h"
135};
136
137static const char *amd64_zmm_names[] =
138{
139 "zmm0", "zmm1", "zmm2", "zmm3",
140 "zmm4", "zmm5", "zmm6", "zmm7",
141 "zmm8", "zmm9", "zmm10", "zmm11",
142 "zmm12", "zmm13", "zmm14", "zmm15",
143 "zmm16", "zmm17", "zmm18", "zmm19",
144 "zmm20", "zmm21", "zmm22", "zmm23",
145 "zmm24", "zmm25", "zmm26", "zmm27",
146 "zmm28", "zmm29", "zmm30", "zmm31"
147};
148
149static const char *amd64_xmm_avx512_names[] = {
150 "xmm16", "xmm17", "xmm18", "xmm19",
151 "xmm20", "xmm21", "xmm22", "xmm23",
152 "xmm24", "xmm25", "xmm26", "xmm27",
153 "xmm28", "xmm29", "xmm30", "xmm31"
154};
155
c4f35dd8
MK
156/* DWARF Register Number Mapping as defined in the System V psABI,
157 section 3.6. */
53e95fcf 158
e53bef9f 159static int amd64_dwarf_regmap[] =
0e04a514 160{
c4f35dd8 161 /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI. */
90f90721
MK
162 AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
163 AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
164 AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
c4f35dd8
MK
165
166 /* Frame Pointer Register RBP. */
90f90721 167 AMD64_RBP_REGNUM,
c4f35dd8
MK
168
169 /* Stack Pointer Register RSP. */
90f90721 170 AMD64_RSP_REGNUM,
c4f35dd8
MK
171
172 /* Extended Integer Registers 8 - 15. */
5b856f36
PM
173 AMD64_R8_REGNUM, /* %r8 */
174 AMD64_R9_REGNUM, /* %r9 */
175 AMD64_R10_REGNUM, /* %r10 */
176 AMD64_R11_REGNUM, /* %r11 */
177 AMD64_R12_REGNUM, /* %r12 */
178 AMD64_R13_REGNUM, /* %r13 */
179 AMD64_R14_REGNUM, /* %r14 */
180 AMD64_R15_REGNUM, /* %r15 */
c4f35dd8 181
59207364 182 /* Return Address RA. Mapped to RIP. */
90f90721 183 AMD64_RIP_REGNUM,
c4f35dd8
MK
184
185 /* SSE Registers 0 - 7. */
90f90721
MK
186 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
187 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
188 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
189 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
c4f35dd8
MK
190
191 /* Extended SSE Registers 8 - 15. */
90f90721
MK
192 AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
193 AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
194 AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
195 AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
c4f35dd8
MK
196
197 /* Floating Point Registers 0-7. */
90f90721
MK
198 AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
199 AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
200 AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
c6f4c129
JB
201 AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
202
203 /* Control and Status Flags Register. */
204 AMD64_EFLAGS_REGNUM,
205
206 /* Selector Registers. */
207 AMD64_ES_REGNUM,
208 AMD64_CS_REGNUM,
209 AMD64_SS_REGNUM,
210 AMD64_DS_REGNUM,
211 AMD64_FS_REGNUM,
212 AMD64_GS_REGNUM,
213 -1,
214 -1,
215
216 /* Segment Base Address Registers. */
217 -1,
218 -1,
219 -1,
220 -1,
221
222 /* Special Selector Registers. */
223 -1,
224 -1,
225
226 /* Floating Point Control Registers. */
227 AMD64_MXCSR_REGNUM,
228 AMD64_FCTRL_REGNUM,
229 AMD64_FSTAT_REGNUM
c4f35dd8 230};
0e04a514 231
e53bef9f
MK
232static const int amd64_dwarf_regmap_len =
233 (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
0e04a514 234
c4f35dd8
MK
235/* Convert DWARF register number REG to the appropriate register
236 number used by GDB. */
26abbdc4 237
c4f35dd8 238static int
d3f73121 239amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
53e95fcf 240{
a055a187
L
241 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
242 int ymm0_regnum = tdep->ymm0_regnum;
c4f35dd8 243 int regnum = -1;
53e95fcf 244
16aff9a6 245 if (reg >= 0 && reg < amd64_dwarf_regmap_len)
e53bef9f 246 regnum = amd64_dwarf_regmap[reg];
53e95fcf 247
c4f35dd8 248 if (regnum == -1)
8a3fe4f8 249 warning (_("Unmapped DWARF Register #%d encountered."), reg);
a055a187
L
250 else if (ymm0_regnum >= 0
251 && i386_xmm_regnum_p (gdbarch, regnum))
252 regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
c4f35dd8
MK
253
254 return regnum;
53e95fcf 255}
d532c08f 256
35669430
DE
257/* Map architectural register numbers to gdb register numbers. */
258
259static const int amd64_arch_regmap[16] =
260{
261 AMD64_RAX_REGNUM, /* %rax */
262 AMD64_RCX_REGNUM, /* %rcx */
263 AMD64_RDX_REGNUM, /* %rdx */
264 AMD64_RBX_REGNUM, /* %rbx */
265 AMD64_RSP_REGNUM, /* %rsp */
266 AMD64_RBP_REGNUM, /* %rbp */
267 AMD64_RSI_REGNUM, /* %rsi */
268 AMD64_RDI_REGNUM, /* %rdi */
269 AMD64_R8_REGNUM, /* %r8 */
270 AMD64_R9_REGNUM, /* %r9 */
271 AMD64_R10_REGNUM, /* %r10 */
272 AMD64_R11_REGNUM, /* %r11 */
273 AMD64_R12_REGNUM, /* %r12 */
274 AMD64_R13_REGNUM, /* %r13 */
275 AMD64_R14_REGNUM, /* %r14 */
276 AMD64_R15_REGNUM /* %r15 */
277};
278
279static const int amd64_arch_regmap_len =
280 (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
281
282/* Convert architectural register number REG to the appropriate register
283 number used by GDB. */
284
285static int
286amd64_arch_reg_to_regnum (int reg)
287{
288 gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
289
290 return amd64_arch_regmap[reg];
291}
292
1ba53b71
L
293/* Register names for byte pseudo-registers. */
294
295static const char *amd64_byte_names[] =
296{
297 "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
fe01d668
L
298 "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
299 "ah", "bh", "ch", "dh"
1ba53b71
L
300};
301
fe01d668
L
302/* Number of lower byte registers. */
303#define AMD64_NUM_LOWER_BYTE_REGS 16
304
1ba53b71
L
305/* Register names for word pseudo-registers. */
306
307static const char *amd64_word_names[] =
308{
9cad29ac 309 "ax", "bx", "cx", "dx", "si", "di", "bp", "",
1ba53b71
L
310 "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
311};
312
313/* Register names for dword pseudo-registers. */
314
315static const char *amd64_dword_names[] =
316{
317 "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
fff4548b
MK
318 "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d",
319 "eip"
1ba53b71
L
320};
321
322/* Return the name of register REGNUM. */
323
324static const char *
325amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
326{
327 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
328 if (i386_byte_regnum_p (gdbarch, regnum))
329 return amd64_byte_names[regnum - tdep->al_regnum];
01f9f808
MS
330 else if (i386_zmm_regnum_p (gdbarch, regnum))
331 return amd64_zmm_names[regnum - tdep->zmm0_regnum];
a055a187
L
332 else if (i386_ymm_regnum_p (gdbarch, regnum))
333 return amd64_ymm_names[regnum - tdep->ymm0_regnum];
01f9f808
MS
334 else if (i386_ymm_avx512_regnum_p (gdbarch, regnum))
335 return amd64_ymm_avx512_names[regnum - tdep->ymm16_regnum];
1ba53b71
L
336 else if (i386_word_regnum_p (gdbarch, regnum))
337 return amd64_word_names[regnum - tdep->ax_regnum];
338 else if (i386_dword_regnum_p (gdbarch, regnum))
339 return amd64_dword_names[regnum - tdep->eax_regnum];
340 else
341 return i386_pseudo_register_name (gdbarch, regnum);
342}
343
3543a589
TT
344static struct value *
345amd64_pseudo_register_read_value (struct gdbarch *gdbarch,
346 struct regcache *regcache,
347 int regnum)
1ba53b71
L
348{
349 gdb_byte raw_buf[MAX_REGISTER_SIZE];
350 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
05d1431c 351 enum register_status status;
3543a589
TT
352 struct value *result_value;
353 gdb_byte *buf;
354
355 result_value = allocate_value (register_type (gdbarch, regnum));
356 VALUE_LVAL (result_value) = lval_register;
357 VALUE_REGNUM (result_value) = regnum;
358 buf = value_contents_raw (result_value);
1ba53b71
L
359
360 if (i386_byte_regnum_p (gdbarch, regnum))
361 {
362 int gpnum = regnum - tdep->al_regnum;
363
364 /* Extract (always little endian). */
fe01d668
L
365 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
366 {
367 /* Special handling for AH, BH, CH, DH. */
05d1431c
PA
368 status = regcache_raw_read (regcache,
369 gpnum - AMD64_NUM_LOWER_BYTE_REGS,
370 raw_buf);
371 if (status == REG_VALID)
372 memcpy (buf, raw_buf + 1, 1);
3543a589
TT
373 else
374 mark_value_bytes_unavailable (result_value, 0,
375 TYPE_LENGTH (value_type (result_value)));
fe01d668
L
376 }
377 else
378 {
05d1431c
PA
379 status = regcache_raw_read (regcache, gpnum, raw_buf);
380 if (status == REG_VALID)
381 memcpy (buf, raw_buf, 1);
3543a589
TT
382 else
383 mark_value_bytes_unavailable (result_value, 0,
384 TYPE_LENGTH (value_type (result_value)));
fe01d668 385 }
1ba53b71
L
386 }
387 else if (i386_dword_regnum_p (gdbarch, regnum))
388 {
389 int gpnum = regnum - tdep->eax_regnum;
390 /* Extract (always little endian). */
05d1431c
PA
391 status = regcache_raw_read (regcache, gpnum, raw_buf);
392 if (status == REG_VALID)
393 memcpy (buf, raw_buf, 4);
3543a589
TT
394 else
395 mark_value_bytes_unavailable (result_value, 0,
396 TYPE_LENGTH (value_type (result_value)));
1ba53b71
L
397 }
398 else
3543a589
TT
399 i386_pseudo_register_read_into_value (gdbarch, regcache, regnum,
400 result_value);
401
402 return result_value;
1ba53b71
L
403}
404
405static void
406amd64_pseudo_register_write (struct gdbarch *gdbarch,
407 struct regcache *regcache,
408 int regnum, const gdb_byte *buf)
409{
410 gdb_byte raw_buf[MAX_REGISTER_SIZE];
411 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
412
413 if (i386_byte_regnum_p (gdbarch, regnum))
414 {
415 int gpnum = regnum - tdep->al_regnum;
416
fe01d668
L
417 if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
418 {
419 /* Read ... AH, BH, CH, DH. */
420 regcache_raw_read (regcache,
421 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
422 /* ... Modify ... (always little endian). */
423 memcpy (raw_buf + 1, buf, 1);
424 /* ... Write. */
425 regcache_raw_write (regcache,
426 gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
427 }
428 else
429 {
430 /* Read ... */
431 regcache_raw_read (regcache, gpnum, raw_buf);
432 /* ... Modify ... (always little endian). */
433 memcpy (raw_buf, buf, 1);
434 /* ... Write. */
435 regcache_raw_write (regcache, gpnum, raw_buf);
436 }
1ba53b71
L
437 }
438 else if (i386_dword_regnum_p (gdbarch, regnum))
439 {
440 int gpnum = regnum - tdep->eax_regnum;
441
442 /* Read ... */
443 regcache_raw_read (regcache, gpnum, raw_buf);
444 /* ... Modify ... (always little endian). */
445 memcpy (raw_buf, buf, 4);
446 /* ... Write. */
447 regcache_raw_write (regcache, gpnum, raw_buf);
448 }
449 else
450 i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
451}
452
53e95fcf
JS
453\f
454
bf4d6c1c
JB
455/* Register classes as defined in the psABI. */
456
457enum amd64_reg_class
458{
459 AMD64_INTEGER,
460 AMD64_SSE,
461 AMD64_SSEUP,
462 AMD64_X87,
463 AMD64_X87UP,
464 AMD64_COMPLEX_X87,
465 AMD64_NO_CLASS,
466 AMD64_MEMORY
467};
468
efb1c01c
MK
469/* Return the union class of CLASS1 and CLASS2. See the psABI for
470 details. */
471
472static enum amd64_reg_class
473amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
474{
475 /* Rule (a): If both classes are equal, this is the resulting class. */
476 if (class1 == class2)
477 return class1;
478
479 /* Rule (b): If one of the classes is NO_CLASS, the resulting class
480 is the other class. */
481 if (class1 == AMD64_NO_CLASS)
482 return class2;
483 if (class2 == AMD64_NO_CLASS)
484 return class1;
485
486 /* Rule (c): If one of the classes is MEMORY, the result is MEMORY. */
487 if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
488 return AMD64_MEMORY;
489
490 /* Rule (d): If one of the classes is INTEGER, the result is INTEGER. */
491 if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
492 return AMD64_INTEGER;
493
494 /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
495 MEMORY is used as class. */
496 if (class1 == AMD64_X87 || class1 == AMD64_X87UP
497 || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
498 || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
499 return AMD64_MEMORY;
500
501 /* Rule (f): Otherwise class SSE is used. */
502 return AMD64_SSE;
503}
504
bf4d6c1c
JB
505static void amd64_classify (struct type *type, enum amd64_reg_class class[2]);
506
79b1ab3d
MK
507/* Return non-zero if TYPE is a non-POD structure or union type. */
508
509static int
510amd64_non_pod_p (struct type *type)
511{
512 /* ??? A class with a base class certainly isn't POD, but does this
513 catch all non-POD structure types? */
514 if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
515 return 1;
516
517 return 0;
518}
519
efb1c01c
MK
520/* Classify TYPE according to the rules for aggregate (structures and
521 arrays) and union types, and store the result in CLASS. */
c4f35dd8
MK
522
523static void
efb1c01c 524amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
53e95fcf 525{
efb1c01c
MK
526 /* 1. If the size of an object is larger than two eightbytes, or in
527 C++, is a non-POD structure or union type, or contains
528 unaligned fields, it has class memory. */
744a8059 529 if (TYPE_LENGTH (type) > 16 || amd64_non_pod_p (type))
53e95fcf 530 {
efb1c01c
MK
531 class[0] = class[1] = AMD64_MEMORY;
532 return;
53e95fcf 533 }
efb1c01c
MK
534
535 /* 2. Both eightbytes get initialized to class NO_CLASS. */
536 class[0] = class[1] = AMD64_NO_CLASS;
537
538 /* 3. Each field of an object is classified recursively so that
539 always two fields are considered. The resulting class is
540 calculated according to the classes of the fields in the
541 eightbyte: */
542
543 if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
8ffd9b1b 544 {
efb1c01c
MK
545 struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
546
547 /* All fields in an array have the same type. */
548 amd64_classify (subtype, class);
744a8059 549 if (TYPE_LENGTH (type) > 8 && class[1] == AMD64_NO_CLASS)
efb1c01c 550 class[1] = class[0];
8ffd9b1b 551 }
53e95fcf
JS
552 else
553 {
efb1c01c 554 int i;
53e95fcf 555
efb1c01c
MK
556 /* Structure or union. */
557 gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
558 || TYPE_CODE (type) == TYPE_CODE_UNION);
559
560 for (i = 0; i < TYPE_NFIELDS (type); i++)
53e95fcf 561 {
efb1c01c
MK
562 struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
563 int pos = TYPE_FIELD_BITPOS (type, i) / 64;
564 enum amd64_reg_class subclass[2];
e4e2711a
JB
565 int bitsize = TYPE_FIELD_BITSIZE (type, i);
566 int endpos;
567
568 if (bitsize == 0)
569 bitsize = TYPE_LENGTH (subtype) * 8;
570 endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
efb1c01c 571
562c50c2 572 /* Ignore static fields. */
d6a843b5 573 if (field_is_static (&TYPE_FIELD (type, i)))
562c50c2
MK
574 continue;
575
efb1c01c
MK
576 gdb_assert (pos == 0 || pos == 1);
577
578 amd64_classify (subtype, subclass);
579 class[pos] = amd64_merge_classes (class[pos], subclass[0]);
e4e2711a
JB
580 if (bitsize <= 64 && pos == 0 && endpos == 1)
581 /* This is a bit of an odd case: We have a field that would
582 normally fit in one of the two eightbytes, except that
583 it is placed in a way that this field straddles them.
584 This has been seen with a structure containing an array.
585
586 The ABI is a bit unclear in this case, but we assume that
587 this field's class (stored in subclass[0]) must also be merged
588 into class[1]. In other words, our field has a piece stored
589 in the second eight-byte, and thus its class applies to
590 the second eight-byte as well.
591
592 In the case where the field length exceeds 8 bytes,
593 it should not be necessary to merge the field class
594 into class[1]. As LEN > 8, subclass[1] is necessarily
595 different from AMD64_NO_CLASS. If subclass[1] is equal
596 to subclass[0], then the normal class[1]/subclass[1]
597 merging will take care of everything. For subclass[1]
598 to be different from subclass[0], I can only see the case
599 where we have a SSE/SSEUP or X87/X87UP pair, which both
600 use up all 16 bytes of the aggregate, and are already
601 handled just fine (because each portion sits on its own
602 8-byte). */
603 class[1] = amd64_merge_classes (class[1], subclass[0]);
efb1c01c
MK
604 if (pos == 0)
605 class[1] = amd64_merge_classes (class[1], subclass[1]);
53e95fcf 606 }
53e95fcf 607 }
efb1c01c
MK
608
609 /* 4. Then a post merger cleanup is done: */
610
611 /* Rule (a): If one of the classes is MEMORY, the whole argument is
612 passed in memory. */
613 if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
614 class[0] = class[1] = AMD64_MEMORY;
615
177b42fe 616 /* Rule (b): If SSEUP is not preceded by SSE, it is converted to
efb1c01c
MK
617 SSE. */
618 if (class[0] == AMD64_SSEUP)
619 class[0] = AMD64_SSE;
620 if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
621 class[1] = AMD64_SSE;
622}
623
624/* Classify TYPE, and store the result in CLASS. */
625
bf4d6c1c 626static void
efb1c01c
MK
627amd64_classify (struct type *type, enum amd64_reg_class class[2])
628{
629 enum type_code code = TYPE_CODE (type);
630 int len = TYPE_LENGTH (type);
631
632 class[0] = class[1] = AMD64_NO_CLASS;
633
634 /* Arguments of types (signed and unsigned) _Bool, char, short, int,
5a7225ed
JB
635 long, long long, and pointers are in the INTEGER class. Similarly,
636 range types, used by languages such as Ada, are also in the INTEGER
637 class. */
efb1c01c 638 if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
b929c77f 639 || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
9db13498 640 || code == TYPE_CODE_CHAR
efb1c01c
MK
641 || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
642 && (len == 1 || len == 2 || len == 4 || len == 8))
643 class[0] = AMD64_INTEGER;
644
5daa78cc
TJB
645 /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
646 are in class SSE. */
647 else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
648 && (len == 4 || len == 8))
efb1c01c
MK
649 /* FIXME: __m64 . */
650 class[0] = AMD64_SSE;
651
5daa78cc
TJB
652 /* Arguments of types __float128, _Decimal128 and __m128 are split into
653 two halves. The least significant ones belong to class SSE, the most
efb1c01c 654 significant one to class SSEUP. */
5daa78cc
TJB
655 else if (code == TYPE_CODE_DECFLOAT && len == 16)
656 /* FIXME: __float128, __m128. */
657 class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
efb1c01c
MK
658
659 /* The 64-bit mantissa of arguments of type long double belongs to
660 class X87, the 16-bit exponent plus 6 bytes of padding belongs to
661 class X87UP. */
662 else if (code == TYPE_CODE_FLT && len == 16)
663 /* Class X87 and X87UP. */
664 class[0] = AMD64_X87, class[1] = AMD64_X87UP;
665
7f7930dd
MK
666 /* Arguments of complex T where T is one of the types float or
667 double get treated as if they are implemented as:
668
669 struct complexT {
670 T real;
671 T imag;
5f52445b
YQ
672 };
673
674 */
7f7930dd
MK
675 else if (code == TYPE_CODE_COMPLEX && len == 8)
676 class[0] = AMD64_SSE;
677 else if (code == TYPE_CODE_COMPLEX && len == 16)
678 class[0] = class[1] = AMD64_SSE;
679
680 /* A variable of type complex long double is classified as type
681 COMPLEX_X87. */
682 else if (code == TYPE_CODE_COMPLEX && len == 32)
683 class[0] = AMD64_COMPLEX_X87;
684
efb1c01c
MK
685 /* Aggregates. */
686 else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
687 || code == TYPE_CODE_UNION)
688 amd64_classify_aggregate (type, class);
689}
690
691static enum return_value_convention
6a3a010b 692amd64_return_value (struct gdbarch *gdbarch, struct value *function,
c055b101 693 struct type *type, struct regcache *regcache,
42835c2b 694 gdb_byte *readbuf, const gdb_byte *writebuf)
efb1c01c
MK
695{
696 enum amd64_reg_class class[2];
697 int len = TYPE_LENGTH (type);
90f90721
MK
698 static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
699 static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
efb1c01c
MK
700 int integer_reg = 0;
701 int sse_reg = 0;
702 int i;
703
704 gdb_assert (!(readbuf && writebuf));
705
706 /* 1. Classify the return type with the classification algorithm. */
bf4d6c1c 707 amd64_classify (type, class);
efb1c01c
MK
708
709 /* 2. If the type has class MEMORY, then the caller provides space
6fa57a7d 710 for the return value and passes the address of this storage in
0963b4bd 711 %rdi as if it were the first argument to the function. In effect,
6fa57a7d
MK
712 this address becomes a hidden first argument.
713
714 On return %rax will contain the address that has been passed in
715 by the caller in %rdi. */
efb1c01c 716 if (class[0] == AMD64_MEMORY)
6fa57a7d
MK
717 {
718 /* As indicated by the comment above, the ABI guarantees that we
719 can always find the return value just after the function has
720 returned. */
721
722 if (readbuf)
723 {
724 ULONGEST addr;
725
726 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
727 read_memory (addr, readbuf, TYPE_LENGTH (type));
728 }
729
730 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
731 }
efb1c01c 732
7f7930dd
MK
733 /* 8. If the class is COMPLEX_X87, the real part of the value is
734 returned in %st0 and the imaginary part in %st1. */
735 if (class[0] == AMD64_COMPLEX_X87)
736 {
737 if (readbuf)
738 {
739 regcache_raw_read (regcache, AMD64_ST0_REGNUM, readbuf);
740 regcache_raw_read (regcache, AMD64_ST1_REGNUM, readbuf + 16);
741 }
742
743 if (writebuf)
744 {
745 i387_return_value (gdbarch, regcache);
746 regcache_raw_write (regcache, AMD64_ST0_REGNUM, writebuf);
747 regcache_raw_write (regcache, AMD64_ST1_REGNUM, writebuf + 16);
748
749 /* Fix up the tag word such that both %st(0) and %st(1) are
750 marked as valid. */
751 regcache_raw_write_unsigned (regcache, AMD64_FTAG_REGNUM, 0xfff);
752 }
753
754 return RETURN_VALUE_REGISTER_CONVENTION;
755 }
756
efb1c01c 757 gdb_assert (class[1] != AMD64_MEMORY);
bad43aa5 758 gdb_assert (len <= 16);
efb1c01c
MK
759
760 for (i = 0; len > 0; i++, len -= 8)
761 {
762 int regnum = -1;
763 int offset = 0;
764
765 switch (class[i])
766 {
767 case AMD64_INTEGER:
768 /* 3. If the class is INTEGER, the next available register
769 of the sequence %rax, %rdx is used. */
770 regnum = integer_regnum[integer_reg++];
771 break;
772
773 case AMD64_SSE:
774 /* 4. If the class is SSE, the next available SSE register
775 of the sequence %xmm0, %xmm1 is used. */
776 regnum = sse_regnum[sse_reg++];
777 break;
778
779 case AMD64_SSEUP:
780 /* 5. If the class is SSEUP, the eightbyte is passed in the
781 upper half of the last used SSE register. */
782 gdb_assert (sse_reg > 0);
783 regnum = sse_regnum[sse_reg - 1];
784 offset = 8;
785 break;
786
787 case AMD64_X87:
788 /* 6. If the class is X87, the value is returned on the X87
789 stack in %st0 as 80-bit x87 number. */
90f90721 790 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
791 if (writebuf)
792 i387_return_value (gdbarch, regcache);
793 break;
794
795 case AMD64_X87UP:
796 /* 7. If the class is X87UP, the value is returned together
797 with the previous X87 value in %st0. */
798 gdb_assert (i > 0 && class[0] == AMD64_X87);
90f90721 799 regnum = AMD64_ST0_REGNUM;
efb1c01c
MK
800 offset = 8;
801 len = 2;
802 break;
803
804 case AMD64_NO_CLASS:
805 continue;
806
807 default:
808 gdb_assert (!"Unexpected register class.");
809 }
810
811 gdb_assert (regnum != -1);
812
813 if (readbuf)
814 regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
42835c2b 815 readbuf + i * 8);
efb1c01c
MK
816 if (writebuf)
817 regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
42835c2b 818 writebuf + i * 8);
efb1c01c
MK
819 }
820
821 return RETURN_VALUE_REGISTER_CONVENTION;
53e95fcf
JS
822}
823\f
824
720aa428
MK
825static CORE_ADDR
826amd64_push_arguments (struct regcache *regcache, int nargs,
6470d250 827 struct value **args, CORE_ADDR sp, int struct_return)
720aa428 828{
bf4d6c1c
JB
829 static int integer_regnum[] =
830 {
831 AMD64_RDI_REGNUM, /* %rdi */
832 AMD64_RSI_REGNUM, /* %rsi */
833 AMD64_RDX_REGNUM, /* %rdx */
834 AMD64_RCX_REGNUM, /* %rcx */
5b856f36
PM
835 AMD64_R8_REGNUM, /* %r8 */
836 AMD64_R9_REGNUM /* %r9 */
bf4d6c1c 837 };
720aa428
MK
838 static int sse_regnum[] =
839 {
840 /* %xmm0 ... %xmm7 */
90f90721
MK
841 AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
842 AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
843 AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
844 AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
720aa428
MK
845 };
846 struct value **stack_args = alloca (nargs * sizeof (struct value *));
847 int num_stack_args = 0;
848 int num_elements = 0;
849 int element = 0;
850 int integer_reg = 0;
851 int sse_reg = 0;
852 int i;
853
6470d250
MK
854 /* Reserve a register for the "hidden" argument. */
855 if (struct_return)
856 integer_reg++;
857
720aa428
MK
858 for (i = 0; i < nargs; i++)
859 {
4991999e 860 struct type *type = value_type (args[i]);
720aa428
MK
861 int len = TYPE_LENGTH (type);
862 enum amd64_reg_class class[2];
863 int needed_integer_regs = 0;
864 int needed_sse_regs = 0;
865 int j;
866
867 /* Classify argument. */
bf4d6c1c 868 amd64_classify (type, class);
720aa428
MK
869
870 /* Calculate the number of integer and SSE registers needed for
871 this argument. */
872 for (j = 0; j < 2; j++)
873 {
874 if (class[j] == AMD64_INTEGER)
875 needed_integer_regs++;
876 else if (class[j] == AMD64_SSE)
877 needed_sse_regs++;
878 }
879
880 /* Check whether enough registers are available, and if the
881 argument should be passed in registers at all. */
bf4d6c1c 882 if (integer_reg + needed_integer_regs > ARRAY_SIZE (integer_regnum)
720aa428
MK
883 || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
884 || (needed_integer_regs == 0 && needed_sse_regs == 0))
885 {
886 /* The argument will be passed on the stack. */
887 num_elements += ((len + 7) / 8);
849e9755 888 stack_args[num_stack_args++] = args[i];
720aa428
MK
889 }
890 else
891 {
892 /* The argument will be passed in registers. */
d8de1ef7
MK
893 const gdb_byte *valbuf = value_contents (args[i]);
894 gdb_byte buf[8];
720aa428
MK
895
896 gdb_assert (len <= 16);
897
898 for (j = 0; len > 0; j++, len -= 8)
899 {
900 int regnum = -1;
901 int offset = 0;
902
903 switch (class[j])
904 {
905 case AMD64_INTEGER:
bf4d6c1c 906 regnum = integer_regnum[integer_reg++];
720aa428
MK
907 break;
908
909 case AMD64_SSE:
910 regnum = sse_regnum[sse_reg++];
911 break;
912
913 case AMD64_SSEUP:
914 gdb_assert (sse_reg > 0);
915 regnum = sse_regnum[sse_reg - 1];
916 offset = 8;
917 break;
918
919 default:
920 gdb_assert (!"Unexpected register class.");
921 }
922
923 gdb_assert (regnum != -1);
924 memset (buf, 0, sizeof buf);
925 memcpy (buf, valbuf + j * 8, min (len, 8));
926 regcache_raw_write_part (regcache, regnum, offset, 8, buf);
927 }
928 }
929 }
930
931 /* Allocate space for the arguments on the stack. */
932 sp -= num_elements * 8;
933
934 /* The psABI says that "The end of the input argument area shall be
935 aligned on a 16 byte boundary." */
936 sp &= ~0xf;
937
938 /* Write out the arguments to the stack. */
939 for (i = 0; i < num_stack_args; i++)
940 {
4991999e 941 struct type *type = value_type (stack_args[i]);
d8de1ef7 942 const gdb_byte *valbuf = value_contents (stack_args[i]);
849e9755
JB
943 int len = TYPE_LENGTH (type);
944
945 write_memory (sp + element * 8, valbuf, len);
946 element += ((len + 7) / 8);
720aa428
MK
947 }
948
949 /* The psABI says that "For calls that may call functions that use
950 varargs or stdargs (prototype-less calls or calls to functions
951 containing ellipsis (...) in the declaration) %al is used as
952 hidden argument to specify the number of SSE registers used. */
90f90721 953 regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
720aa428
MK
954 return sp;
955}
956
c4f35dd8 957static CORE_ADDR
7d9b040b 958amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
e53bef9f
MK
959 struct regcache *regcache, CORE_ADDR bp_addr,
960 int nargs, struct value **args, CORE_ADDR sp,
961 int struct_return, CORE_ADDR struct_addr)
53e95fcf 962{
e17a4113 963 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
d8de1ef7 964 gdb_byte buf[8];
c4f35dd8
MK
965
966 /* Pass arguments. */
6470d250 967 sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
c4f35dd8
MK
968
969 /* Pass "hidden" argument". */
970 if (struct_return)
971 {
e17a4113 972 store_unsigned_integer (buf, 8, byte_order, struct_addr);
bf4d6c1c 973 regcache_cooked_write (regcache, AMD64_RDI_REGNUM, buf);
c4f35dd8
MK
974 }
975
976 /* Store return address. */
977 sp -= 8;
e17a4113 978 store_unsigned_integer (buf, 8, byte_order, bp_addr);
c4f35dd8
MK
979 write_memory (sp, buf, 8);
980
981 /* Finally, update the stack pointer... */
e17a4113 982 store_unsigned_integer (buf, 8, byte_order, sp);
90f90721 983 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
c4f35dd8
MK
984
985 /* ...and fake a frame pointer. */
90f90721 986 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
c4f35dd8 987
3e210248 988 return sp + 16;
53e95fcf 989}
c4f35dd8 990\f
35669430
DE
991/* Displaced instruction handling. */
992
993/* A partially decoded instruction.
994 This contains enough details for displaced stepping purposes. */
995
996struct amd64_insn
997{
998 /* The number of opcode bytes. */
999 int opcode_len;
1000 /* The offset of the rex prefix or -1 if not present. */
1001 int rex_offset;
1002 /* The offset to the first opcode byte. */
1003 int opcode_offset;
1004 /* The offset to the modrm byte or -1 if not present. */
1005 int modrm_offset;
1006
1007 /* The raw instruction. */
1008 gdb_byte *raw_insn;
1009};
1010
1011struct displaced_step_closure
1012{
1013 /* For rip-relative insns, saved copy of the reg we use instead of %rip. */
1014 int tmp_used;
1015 int tmp_regno;
1016 ULONGEST tmp_save;
1017
1018 /* Details of the instruction. */
1019 struct amd64_insn insn_details;
1020
1021 /* Amount of space allocated to insn_buf. */
1022 int max_len;
1023
1024 /* The possibly modified insn.
1025 This is a variable-length field. */
1026 gdb_byte insn_buf[1];
1027};
1028
1029/* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
1030 ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
1031 at which point delete these in favor of libopcodes' versions). */
1032
1033static const unsigned char onebyte_has_modrm[256] = {
1034 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1035 /* ------------------------------- */
1036 /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
1037 /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
1038 /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
1039 /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
1040 /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
1041 /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
1042 /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
1043 /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
1044 /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
1045 /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
1046 /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
1047 /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
1048 /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
1049 /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
1050 /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
1051 /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1 /* f0 */
1052 /* ------------------------------- */
1053 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1054};
1055
1056static const unsigned char twobyte_has_modrm[256] = {
1057 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1058 /* ------------------------------- */
1059 /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
1060 /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
1061 /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
1062 /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
1063 /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
1064 /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
1065 /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
1066 /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
1067 /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
1068 /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
1069 /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
1070 /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
1071 /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
1072 /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
1073 /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
1074 /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0 /* ff */
1075 /* ------------------------------- */
1076 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
1077};
1078
1079static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
1080
1081static int
1082rex_prefix_p (gdb_byte pfx)
1083{
1084 return REX_PREFIX_P (pfx);
1085}
1086
1087/* Skip the legacy instruction prefixes in INSN.
1088 We assume INSN is properly sentineled so we don't have to worry
1089 about falling off the end of the buffer. */
1090
1091static gdb_byte *
1903f0e6 1092amd64_skip_prefixes (gdb_byte *insn)
35669430
DE
1093{
1094 while (1)
1095 {
1096 switch (*insn)
1097 {
1098 case DATA_PREFIX_OPCODE:
1099 case ADDR_PREFIX_OPCODE:
1100 case CS_PREFIX_OPCODE:
1101 case DS_PREFIX_OPCODE:
1102 case ES_PREFIX_OPCODE:
1103 case FS_PREFIX_OPCODE:
1104 case GS_PREFIX_OPCODE:
1105 case SS_PREFIX_OPCODE:
1106 case LOCK_PREFIX_OPCODE:
1107 case REPE_PREFIX_OPCODE:
1108 case REPNE_PREFIX_OPCODE:
1109 ++insn;
1110 continue;
1111 default:
1112 break;
1113 }
1114 break;
1115 }
1116
1117 return insn;
1118}
1119
35669430
DE
1120/* Return an integer register (other than RSP) that is unused as an input
1121 operand in INSN.
1122 In order to not require adding a rex prefix if the insn doesn't already
1123 have one, the result is restricted to RAX ... RDI, sans RSP.
1124 The register numbering of the result follows architecture ordering,
1125 e.g. RDI = 7. */
1126
1127static int
1128amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1129{
1130 /* 1 bit for each reg */
1131 int used_regs_mask = 0;
1132
1133 /* There can be at most 3 int regs used as inputs in an insn, and we have
1134 7 to choose from (RAX ... RDI, sans RSP).
1135 This allows us to take a conservative approach and keep things simple.
1136 E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1137 that implicitly specify RAX. */
1138
1139 /* Avoid RAX. */
1140 used_regs_mask |= 1 << EAX_REG_NUM;
1141 /* Similarily avoid RDX, implicit operand in divides. */
1142 used_regs_mask |= 1 << EDX_REG_NUM;
1143 /* Avoid RSP. */
1144 used_regs_mask |= 1 << ESP_REG_NUM;
1145
1146 /* If the opcode is one byte long and there's no ModRM byte,
1147 assume the opcode specifies a register. */
1148 if (details->opcode_len == 1 && details->modrm_offset == -1)
1149 used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1150
1151 /* Mark used regs in the modrm/sib bytes. */
1152 if (details->modrm_offset != -1)
1153 {
1154 int modrm = details->raw_insn[details->modrm_offset];
1155 int mod = MODRM_MOD_FIELD (modrm);
1156 int reg = MODRM_REG_FIELD (modrm);
1157 int rm = MODRM_RM_FIELD (modrm);
1158 int have_sib = mod != 3 && rm == 4;
1159
1160 /* Assume the reg field of the modrm byte specifies a register. */
1161 used_regs_mask |= 1 << reg;
1162
1163 if (have_sib)
1164 {
1165 int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
d48ebb5b 1166 int idx = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
35669430 1167 used_regs_mask |= 1 << base;
d48ebb5b 1168 used_regs_mask |= 1 << idx;
35669430
DE
1169 }
1170 else
1171 {
1172 used_regs_mask |= 1 << rm;
1173 }
1174 }
1175
1176 gdb_assert (used_regs_mask < 256);
1177 gdb_assert (used_regs_mask != 255);
1178
1179 /* Finally, find a free reg. */
1180 {
1181 int i;
1182
1183 for (i = 0; i < 8; ++i)
1184 {
1185 if (! (used_regs_mask & (1 << i)))
1186 return i;
1187 }
1188
1189 /* We shouldn't get here. */
1190 internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1191 }
1192}
1193
1194/* Extract the details of INSN that we need. */
1195
1196static void
1197amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1198{
1199 gdb_byte *start = insn;
1200 int need_modrm;
1201
1202 details->raw_insn = insn;
1203
1204 details->opcode_len = -1;
1205 details->rex_offset = -1;
1206 details->opcode_offset = -1;
1207 details->modrm_offset = -1;
1208
1209 /* Skip legacy instruction prefixes. */
1903f0e6 1210 insn = amd64_skip_prefixes (insn);
35669430
DE
1211
1212 /* Skip REX instruction prefix. */
1213 if (rex_prefix_p (*insn))
1214 {
1215 details->rex_offset = insn - start;
1216 ++insn;
1217 }
1218
1219 details->opcode_offset = insn - start;
1220
1221 if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1222 {
1223 /* Two or three-byte opcode. */
1224 ++insn;
1225 need_modrm = twobyte_has_modrm[*insn];
1226
1227 /* Check for three-byte opcode. */
1903f0e6 1228 switch (*insn)
35669430 1229 {
1903f0e6
DE
1230 case 0x24:
1231 case 0x25:
1232 case 0x38:
1233 case 0x3a:
1234 case 0x7a:
1235 case 0x7b:
35669430
DE
1236 ++insn;
1237 details->opcode_len = 3;
1903f0e6
DE
1238 break;
1239 default:
1240 details->opcode_len = 2;
1241 break;
35669430 1242 }
35669430
DE
1243 }
1244 else
1245 {
1246 /* One-byte opcode. */
1247 need_modrm = onebyte_has_modrm[*insn];
1248 details->opcode_len = 1;
1249 }
1250
1251 if (need_modrm)
1252 {
1253 ++insn;
1254 details->modrm_offset = insn - start;
1255 }
1256}
1257
1258/* Update %rip-relative addressing in INSN.
1259
1260 %rip-relative addressing only uses a 32-bit displacement.
1261 32 bits is not enough to be guaranteed to cover the distance between where
1262 the real instruction is and where its copy is.
1263 Convert the insn to use base+disp addressing.
1264 We set base = pc + insn_length so we can leave disp unchanged. */
c4f35dd8 1265
35669430
DE
1266static void
1267fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1268 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1269{
e17a4113 1270 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
35669430
DE
1271 const struct amd64_insn *insn_details = &dsc->insn_details;
1272 int modrm_offset = insn_details->modrm_offset;
1273 gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1274 CORE_ADDR rip_base;
1275 int32_t disp;
1276 int insn_length;
1277 int arch_tmp_regno, tmp_regno;
1278 ULONGEST orig_value;
1279
1280 /* %rip+disp32 addressing mode, displacement follows ModRM byte. */
1281 ++insn;
1282
1283 /* Compute the rip-relative address. */
e17a4113 1284 disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
eda5a4d7
PA
1285 insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf,
1286 dsc->max_len, from);
35669430
DE
1287 rip_base = from + insn_length;
1288
1289 /* We need a register to hold the address.
1290 Pick one not used in the insn.
1291 NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7. */
1292 arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1293 tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1294
1295 /* REX.B should be unset as we were using rip-relative addressing,
1296 but ensure it's unset anyway, tmp_regno is not r8-r15. */
1297 if (insn_details->rex_offset != -1)
1298 dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1299
1300 regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1301 dsc->tmp_regno = tmp_regno;
1302 dsc->tmp_save = orig_value;
1303 dsc->tmp_used = 1;
1304
1305 /* Convert the ModRM field to be base+disp. */
1306 dsc->insn_buf[modrm_offset] &= ~0xc7;
1307 dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1308
1309 regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1310
1311 if (debug_displaced)
1312 fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
5af949e3
UW
1313 "displaced: using temp reg %d, old value %s, new value %s\n",
1314 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1315 paddress (gdbarch, rip_base));
35669430
DE
1316}
1317
1318static void
1319fixup_displaced_copy (struct gdbarch *gdbarch,
1320 struct displaced_step_closure *dsc,
1321 CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1322{
1323 const struct amd64_insn *details = &dsc->insn_details;
1324
1325 if (details->modrm_offset != -1)
1326 {
1327 gdb_byte modrm = details->raw_insn[details->modrm_offset];
1328
1329 if ((modrm & 0xc7) == 0x05)
1330 {
1331 /* The insn uses rip-relative addressing.
1332 Deal with it. */
1333 fixup_riprel (gdbarch, dsc, from, to, regs);
1334 }
1335 }
1336}
1337
1338struct displaced_step_closure *
1339amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1340 CORE_ADDR from, CORE_ADDR to,
1341 struct regcache *regs)
1342{
1343 int len = gdbarch_max_insn_length (gdbarch);
741e63d7 1344 /* Extra space for sentinels so fixup_{riprel,displaced_copy} don't have to
35669430
DE
1345 continually watch for running off the end of the buffer. */
1346 int fixup_sentinel_space = len;
1347 struct displaced_step_closure *dsc =
1348 xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1349 gdb_byte *buf = &dsc->insn_buf[0];
1350 struct amd64_insn *details = &dsc->insn_details;
1351
1352 dsc->tmp_used = 0;
1353 dsc->max_len = len + fixup_sentinel_space;
1354
1355 read_memory (from, buf, len);
1356
1357 /* Set up the sentinel space so we don't have to worry about running
1358 off the end of the buffer. An excessive number of leading prefixes
1359 could otherwise cause this. */
1360 memset (buf + len, 0, fixup_sentinel_space);
1361
1362 amd64_get_insn_details (buf, details);
1363
1364 /* GDB may get control back after the insn after the syscall.
1365 Presumably this is a kernel bug.
1366 If this is a syscall, make sure there's a nop afterwards. */
1367 {
1368 int syscall_length;
1369
1370 if (amd64_syscall_p (details, &syscall_length))
1371 buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1372 }
1373
1374 /* Modify the insn to cope with the address where it will be executed from.
1375 In particular, handle any rip-relative addressing. */
1376 fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1377
1378 write_memory (to, buf, len);
1379
1380 if (debug_displaced)
1381 {
5af949e3
UW
1382 fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1383 paddress (gdbarch, from), paddress (gdbarch, to));
35669430
DE
1384 displaced_step_dump_bytes (gdb_stdlog, buf, len);
1385 }
1386
1387 return dsc;
1388}
1389
1390static int
1391amd64_absolute_jmp_p (const struct amd64_insn *details)
1392{
1393 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1394
1395 if (insn[0] == 0xff)
1396 {
1397 /* jump near, absolute indirect (/4) */
1398 if ((insn[1] & 0x38) == 0x20)
1399 return 1;
1400
1401 /* jump far, absolute indirect (/5) */
1402 if ((insn[1] & 0x38) == 0x28)
1403 return 1;
1404 }
1405
1406 return 0;
1407}
1408
c2170eef
MM
1409/* Return non-zero if the instruction DETAILS is a jump, zero otherwise. */
1410
1411static int
1412amd64_jmp_p (const struct amd64_insn *details)
1413{
1414 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1415
1416 /* jump short, relative. */
1417 if (insn[0] == 0xeb)
1418 return 1;
1419
1420 /* jump near, relative. */
1421 if (insn[0] == 0xe9)
1422 return 1;
1423
1424 return amd64_absolute_jmp_p (details);
1425}
1426
35669430
DE
1427static int
1428amd64_absolute_call_p (const struct amd64_insn *details)
1429{
1430 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1431
1432 if (insn[0] == 0xff)
1433 {
1434 /* Call near, absolute indirect (/2) */
1435 if ((insn[1] & 0x38) == 0x10)
1436 return 1;
1437
1438 /* Call far, absolute indirect (/3) */
1439 if ((insn[1] & 0x38) == 0x18)
1440 return 1;
1441 }
1442
1443 return 0;
1444}
1445
1446static int
1447amd64_ret_p (const struct amd64_insn *details)
1448{
1449 /* NOTE: gcc can emit "repz ; ret". */
1450 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1451
1452 switch (insn[0])
1453 {
1454 case 0xc2: /* ret near, pop N bytes */
1455 case 0xc3: /* ret near */
1456 case 0xca: /* ret far, pop N bytes */
1457 case 0xcb: /* ret far */
1458 case 0xcf: /* iret */
1459 return 1;
1460
1461 default:
1462 return 0;
1463 }
1464}
1465
1466static int
1467amd64_call_p (const struct amd64_insn *details)
1468{
1469 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1470
1471 if (amd64_absolute_call_p (details))
1472 return 1;
1473
1474 /* call near, relative */
1475 if (insn[0] == 0xe8)
1476 return 1;
1477
1478 return 0;
1479}
1480
35669430
DE
1481/* Return non-zero if INSN is a system call, and set *LENGTHP to its
1482 length in bytes. Otherwise, return zero. */
1483
1484static int
1485amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1486{
1487 const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1488
1489 if (insn[0] == 0x0f && insn[1] == 0x05)
1490 {
1491 *lengthp = 2;
1492 return 1;
1493 }
1494
1495 return 0;
1496}
1497
c2170eef
MM
1498/* Classify the instruction at ADDR using PRED.
1499 Throw an error if the memory can't be read. */
1500
1501static int
1502amd64_classify_insn_at (struct gdbarch *gdbarch, CORE_ADDR addr,
1503 int (*pred) (const struct amd64_insn *))
1504{
1505 struct amd64_insn details;
1506 gdb_byte *buf;
1507 int len, classification;
1508
1509 len = gdbarch_max_insn_length (gdbarch);
1510 buf = alloca (len);
1511
1512 read_code (addr, buf, len);
1513 amd64_get_insn_details (buf, &details);
1514
1515 classification = pred (&details);
1516
1517 return classification;
1518}
1519
1520/* The gdbarch insn_is_call method. */
1521
1522static int
1523amd64_insn_is_call (struct gdbarch *gdbarch, CORE_ADDR addr)
1524{
1525 return amd64_classify_insn_at (gdbarch, addr, amd64_call_p);
1526}
1527
1528/* The gdbarch insn_is_ret method. */
1529
1530static int
1531amd64_insn_is_ret (struct gdbarch *gdbarch, CORE_ADDR addr)
1532{
1533 return amd64_classify_insn_at (gdbarch, addr, amd64_ret_p);
1534}
1535
1536/* The gdbarch insn_is_jump method. */
1537
1538static int
1539amd64_insn_is_jump (struct gdbarch *gdbarch, CORE_ADDR addr)
1540{
1541 return amd64_classify_insn_at (gdbarch, addr, amd64_jmp_p);
1542}
1543
35669430
DE
1544/* Fix up the state of registers and memory after having single-stepped
1545 a displaced instruction. */
1546
1547void
1548amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1549 struct displaced_step_closure *dsc,
1550 CORE_ADDR from, CORE_ADDR to,
1551 struct regcache *regs)
1552{
e17a4113 1553 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
35669430
DE
1554 /* The offset we applied to the instruction's address. */
1555 ULONGEST insn_offset = to - from;
1556 gdb_byte *insn = dsc->insn_buf;
1557 const struct amd64_insn *insn_details = &dsc->insn_details;
1558
1559 if (debug_displaced)
1560 fprintf_unfiltered (gdb_stdlog,
5af949e3 1561 "displaced: fixup (%s, %s), "
35669430 1562 "insn = 0x%02x 0x%02x ...\n",
5af949e3
UW
1563 paddress (gdbarch, from), paddress (gdbarch, to),
1564 insn[0], insn[1]);
35669430
DE
1565
1566 /* If we used a tmp reg, restore it. */
1567
1568 if (dsc->tmp_used)
1569 {
1570 if (debug_displaced)
5af949e3
UW
1571 fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1572 dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
35669430
DE
1573 regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1574 }
1575
1576 /* The list of issues to contend with here is taken from
1577 resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1578 Yay for Free Software! */
1579
1580 /* Relocate the %rip back to the program's instruction stream,
1581 if necessary. */
1582
1583 /* Except in the case of absolute or indirect jump or call
1584 instructions, or a return instruction, the new rip is relative to
1585 the displaced instruction; make it relative to the original insn.
1586 Well, signal handler returns don't need relocation either, but we use the
1587 value of %rip to recognize those; see below. */
1588 if (! amd64_absolute_jmp_p (insn_details)
1589 && ! amd64_absolute_call_p (insn_details)
1590 && ! amd64_ret_p (insn_details))
1591 {
1592 ULONGEST orig_rip;
1593 int insn_len;
1594
1595 regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1596
1597 /* A signal trampoline system call changes the %rip, resuming
1598 execution of the main program after the signal handler has
1599 returned. That makes them like 'return' instructions; we
1600 shouldn't relocate %rip.
1601
1602 But most system calls don't, and we do need to relocate %rip.
1603
1604 Our heuristic for distinguishing these cases: if stepping
1605 over the system call instruction left control directly after
1606 the instruction, the we relocate --- control almost certainly
1607 doesn't belong in the displaced copy. Otherwise, we assume
1608 the instruction has put control where it belongs, and leave
1609 it unrelocated. Goodness help us if there are PC-relative
1610 system calls. */
1611 if (amd64_syscall_p (insn_details, &insn_len)
1612 && orig_rip != to + insn_len
1613 /* GDB can get control back after the insn after the syscall.
1614 Presumably this is a kernel bug.
1615 Fixup ensures its a nop, we add one to the length for it. */
1616 && orig_rip != to + insn_len + 1)
1617 {
1618 if (debug_displaced)
1619 fprintf_unfiltered (gdb_stdlog,
1620 "displaced: syscall changed %%rip; "
1621 "not relocating\n");
1622 }
1623 else
1624 {
1625 ULONGEST rip = orig_rip - insn_offset;
1626
1903f0e6
DE
1627 /* If we just stepped over a breakpoint insn, we don't backup
1628 the pc on purpose; this is to match behaviour without
1629 stepping. */
35669430
DE
1630
1631 regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1632
1633 if (debug_displaced)
1634 fprintf_unfiltered (gdb_stdlog,
1635 "displaced: "
5af949e3
UW
1636 "relocated %%rip from %s to %s\n",
1637 paddress (gdbarch, orig_rip),
1638 paddress (gdbarch, rip));
35669430
DE
1639 }
1640 }
1641
1642 /* If the instruction was PUSHFL, then the TF bit will be set in the
1643 pushed value, and should be cleared. We'll leave this for later,
1644 since GDB already messes up the TF flag when stepping over a
1645 pushfl. */
1646
1647 /* If the instruction was a call, the return address now atop the
1648 stack is the address following the copied instruction. We need
1649 to make it the address following the original instruction. */
1650 if (amd64_call_p (insn_details))
1651 {
1652 ULONGEST rsp;
1653 ULONGEST retaddr;
1654 const ULONGEST retaddr_len = 8;
1655
1656 regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
e17a4113 1657 retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
35669430 1658 retaddr = (retaddr - insn_offset) & 0xffffffffUL;
e17a4113 1659 write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
35669430
DE
1660
1661 if (debug_displaced)
1662 fprintf_unfiltered (gdb_stdlog,
5af949e3
UW
1663 "displaced: relocated return addr at %s "
1664 "to %s\n",
1665 paddress (gdbarch, rsp),
1666 paddress (gdbarch, retaddr));
35669430
DE
1667 }
1668}
dde08ee1
PA
1669
1670/* If the instruction INSN uses RIP-relative addressing, return the
1671 offset into the raw INSN where the displacement to be adjusted is
1672 found. Returns 0 if the instruction doesn't use RIP-relative
1673 addressing. */
1674
1675static int
1676rip_relative_offset (struct amd64_insn *insn)
1677{
1678 if (insn->modrm_offset != -1)
1679 {
1680 gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1681
1682 if ((modrm & 0xc7) == 0x05)
1683 {
1684 /* The displacement is found right after the ModRM byte. */
1685 return insn->modrm_offset + 1;
1686 }
1687 }
1688
1689 return 0;
1690}
1691
1692static void
1693append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1694{
1695 target_write_memory (*to, buf, len);
1696 *to += len;
1697}
1698
60965737 1699static void
dde08ee1
PA
1700amd64_relocate_instruction (struct gdbarch *gdbarch,
1701 CORE_ADDR *to, CORE_ADDR oldloc)
1702{
1703 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1704 int len = gdbarch_max_insn_length (gdbarch);
1705 /* Extra space for sentinels. */
1706 int fixup_sentinel_space = len;
1707 gdb_byte *buf = xmalloc (len + fixup_sentinel_space);
1708 struct amd64_insn insn_details;
1709 int offset = 0;
1710 LONGEST rel32, newrel;
1711 gdb_byte *insn;
1712 int insn_length;
1713
1714 read_memory (oldloc, buf, len);
1715
1716 /* Set up the sentinel space so we don't have to worry about running
1717 off the end of the buffer. An excessive number of leading prefixes
1718 could otherwise cause this. */
1719 memset (buf + len, 0, fixup_sentinel_space);
1720
1721 insn = buf;
1722 amd64_get_insn_details (insn, &insn_details);
1723
1724 insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1725
1726 /* Skip legacy instruction prefixes. */
1727 insn = amd64_skip_prefixes (insn);
1728
1729 /* Adjust calls with 32-bit relative addresses as push/jump, with
1730 the address pushed being the location where the original call in
1731 the user program would return to. */
1732 if (insn[0] == 0xe8)
1733 {
1734 gdb_byte push_buf[16];
1735 unsigned int ret_addr;
1736
1737 /* Where "ret" in the original code will return to. */
1738 ret_addr = oldloc + insn_length;
0963b4bd 1739 push_buf[0] = 0x68; /* pushq $... */
144db827 1740 store_unsigned_integer (&push_buf[1], 4, byte_order, ret_addr);
dde08ee1
PA
1741 /* Push the push. */
1742 append_insns (to, 5, push_buf);
1743
1744 /* Convert the relative call to a relative jump. */
1745 insn[0] = 0xe9;
1746
1747 /* Adjust the destination offset. */
1748 rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1749 newrel = (oldloc - *to) + rel32;
f4a1794a
KY
1750 store_signed_integer (insn + 1, 4, byte_order, newrel);
1751
1752 if (debug_displaced)
1753 fprintf_unfiltered (gdb_stdlog,
1754 "Adjusted insn rel32=%s at %s to"
1755 " rel32=%s at %s\n",
1756 hex_string (rel32), paddress (gdbarch, oldloc),
1757 hex_string (newrel), paddress (gdbarch, *to));
dde08ee1
PA
1758
1759 /* Write the adjusted jump into its displaced location. */
1760 append_insns (to, 5, insn);
1761 return;
1762 }
1763
1764 offset = rip_relative_offset (&insn_details);
1765 if (!offset)
1766 {
1767 /* Adjust jumps with 32-bit relative addresses. Calls are
1768 already handled above. */
1769 if (insn[0] == 0xe9)
1770 offset = 1;
1771 /* Adjust conditional jumps. */
1772 else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1773 offset = 2;
1774 }
1775
1776 if (offset)
1777 {
1778 rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1779 newrel = (oldloc - *to) + rel32;
f4a1794a 1780 store_signed_integer (insn + offset, 4, byte_order, newrel);
dde08ee1
PA
1781 if (debug_displaced)
1782 fprintf_unfiltered (gdb_stdlog,
f4a1794a
KY
1783 "Adjusted insn rel32=%s at %s to"
1784 " rel32=%s at %s\n",
dde08ee1
PA
1785 hex_string (rel32), paddress (gdbarch, oldloc),
1786 hex_string (newrel), paddress (gdbarch, *to));
1787 }
1788
1789 /* Write the adjusted instruction into its displaced location. */
1790 append_insns (to, insn_length, buf);
1791}
1792
35669430 1793\f
c4f35dd8 1794/* The maximum number of saved registers. This should include %rip. */
90f90721 1795#define AMD64_NUM_SAVED_REGS AMD64_NUM_GREGS
c4f35dd8 1796
e53bef9f 1797struct amd64_frame_cache
c4f35dd8
MK
1798{
1799 /* Base address. */
1800 CORE_ADDR base;
8fbca658 1801 int base_p;
c4f35dd8
MK
1802 CORE_ADDR sp_offset;
1803 CORE_ADDR pc;
1804
1805 /* Saved registers. */
e53bef9f 1806 CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
c4f35dd8 1807 CORE_ADDR saved_sp;
e0c62198 1808 int saved_sp_reg;
c4f35dd8
MK
1809
1810 /* Do we have a frame? */
1811 int frameless_p;
1812};
8dda9770 1813
d2449ee8 1814/* Initialize a frame cache. */
c4f35dd8 1815
d2449ee8
DJ
1816static void
1817amd64_init_frame_cache (struct amd64_frame_cache *cache)
8dda9770 1818{
c4f35dd8
MK
1819 int i;
1820
c4f35dd8
MK
1821 /* Base address. */
1822 cache->base = 0;
8fbca658 1823 cache->base_p = 0;
c4f35dd8
MK
1824 cache->sp_offset = -8;
1825 cache->pc = 0;
1826
1827 /* Saved registers. We initialize these to -1 since zero is a valid
bba66b87
DE
1828 offset (that's where %rbp is supposed to be stored).
1829 The values start out as being offsets, and are later converted to
1830 addresses (at which point -1 is interpreted as an address, still meaning
1831 "invalid"). */
e53bef9f 1832 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
1833 cache->saved_regs[i] = -1;
1834 cache->saved_sp = 0;
e0c62198 1835 cache->saved_sp_reg = -1;
c4f35dd8
MK
1836
1837 /* Frameless until proven otherwise. */
1838 cache->frameless_p = 1;
d2449ee8 1839}
c4f35dd8 1840
d2449ee8
DJ
1841/* Allocate and initialize a frame cache. */
1842
1843static struct amd64_frame_cache *
1844amd64_alloc_frame_cache (void)
1845{
1846 struct amd64_frame_cache *cache;
1847
1848 cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1849 amd64_init_frame_cache (cache);
c4f35dd8 1850 return cache;
8dda9770 1851}
53e95fcf 1852
e0c62198
L
1853/* GCC 4.4 and later, can put code in the prologue to realign the
1854 stack pointer. Check whether PC points to such code, and update
1855 CACHE accordingly. Return the first instruction after the code
1856 sequence or CURRENT_PC, whichever is smaller. If we don't
1857 recognize the code, return PC. */
1858
1859static CORE_ADDR
1860amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1861 struct amd64_frame_cache *cache)
1862{
1863 /* There are 2 code sequences to re-align stack before the frame
1864 gets set up:
1865
1866 1. Use a caller-saved saved register:
1867
1868 leaq 8(%rsp), %reg
1869 andq $-XXX, %rsp
1870 pushq -8(%reg)
1871
1872 2. Use a callee-saved saved register:
1873
1874 pushq %reg
1875 leaq 16(%rsp), %reg
1876 andq $-XXX, %rsp
1877 pushq -8(%reg)
1878
1879 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1880
1881 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
1882 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
1883 */
1884
1885 gdb_byte buf[18];
1886 int reg, r;
1887 int offset, offset_and;
e0c62198 1888
bae8a07a 1889 if (target_read_code (pc, buf, sizeof buf))
e0c62198
L
1890 return pc;
1891
1892 /* Check caller-saved saved register. The first instruction has
1893 to be "leaq 8(%rsp), %reg". */
1894 if ((buf[0] & 0xfb) == 0x48
1895 && buf[1] == 0x8d
1896 && buf[3] == 0x24
1897 && buf[4] == 0x8)
1898 {
1899 /* MOD must be binary 10 and R/M must be binary 100. */
1900 if ((buf[2] & 0xc7) != 0x44)
1901 return pc;
1902
1903 /* REG has register number. */
1904 reg = (buf[2] >> 3) & 7;
1905
1906 /* Check the REX.R bit. */
1907 if (buf[0] == 0x4c)
1908 reg += 8;
1909
1910 offset = 5;
1911 }
1912 else
1913 {
1914 /* Check callee-saved saved register. The first instruction
1915 has to be "pushq %reg". */
1916 reg = 0;
1917 if ((buf[0] & 0xf8) == 0x50)
1918 offset = 0;
1919 else if ((buf[0] & 0xf6) == 0x40
1920 && (buf[1] & 0xf8) == 0x50)
1921 {
1922 /* Check the REX.B bit. */
1923 if ((buf[0] & 1) != 0)
1924 reg = 8;
1925
1926 offset = 1;
1927 }
1928 else
1929 return pc;
1930
1931 /* Get register. */
1932 reg += buf[offset] & 0x7;
1933
1934 offset++;
1935
1936 /* The next instruction has to be "leaq 16(%rsp), %reg". */
1937 if ((buf[offset] & 0xfb) != 0x48
1938 || buf[offset + 1] != 0x8d
1939 || buf[offset + 3] != 0x24
1940 || buf[offset + 4] != 0x10)
1941 return pc;
1942
1943 /* MOD must be binary 10 and R/M must be binary 100. */
1944 if ((buf[offset + 2] & 0xc7) != 0x44)
1945 return pc;
1946
1947 /* REG has register number. */
1948 r = (buf[offset + 2] >> 3) & 7;
1949
1950 /* Check the REX.R bit. */
1951 if (buf[offset] == 0x4c)
1952 r += 8;
1953
1954 /* Registers in pushq and leaq have to be the same. */
1955 if (reg != r)
1956 return pc;
1957
1958 offset += 5;
1959 }
1960
1961 /* Rigister can't be %rsp nor %rbp. */
1962 if (reg == 4 || reg == 5)
1963 return pc;
1964
1965 /* The next instruction has to be "andq $-XXX, %rsp". */
1966 if (buf[offset] != 0x48
1967 || buf[offset + 2] != 0xe4
1968 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1969 return pc;
1970
1971 offset_and = offset;
1972 offset += buf[offset + 1] == 0x81 ? 7 : 4;
1973
1974 /* The next instruction has to be "pushq -8(%reg)". */
1975 r = 0;
1976 if (buf[offset] == 0xff)
1977 offset++;
1978 else if ((buf[offset] & 0xf6) == 0x40
1979 && buf[offset + 1] == 0xff)
1980 {
1981 /* Check the REX.B bit. */
1982 if ((buf[offset] & 0x1) != 0)
1983 r = 8;
1984 offset += 2;
1985 }
1986 else
1987 return pc;
1988
1989 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
1990 01. */
1991 if (buf[offset + 1] != 0xf8
1992 || (buf[offset] & 0xf8) != 0x70)
1993 return pc;
1994
1995 /* R/M has register. */
1996 r += buf[offset] & 7;
1997
1998 /* Registers in leaq and pushq have to be the same. */
1999 if (reg != r)
2000 return pc;
2001
2002 if (current_pc > pc + offset_and)
35669430 2003 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
e0c62198
L
2004
2005 return min (pc + offset + 2, current_pc);
2006}
2007
ac142d96
L
2008/* Similar to amd64_analyze_stack_align for x32. */
2009
2010static CORE_ADDR
2011amd64_x32_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
2012 struct amd64_frame_cache *cache)
2013{
2014 /* There are 2 code sequences to re-align stack before the frame
2015 gets set up:
2016
2017 1. Use a caller-saved saved register:
2018
2019 leaq 8(%rsp), %reg
2020 andq $-XXX, %rsp
2021 pushq -8(%reg)
2022
2023 or
2024
2025 [addr32] leal 8(%rsp), %reg
2026 andl $-XXX, %esp
2027 [addr32] pushq -8(%reg)
2028
2029 2. Use a callee-saved saved register:
2030
2031 pushq %reg
2032 leaq 16(%rsp), %reg
2033 andq $-XXX, %rsp
2034 pushq -8(%reg)
2035
2036 or
2037
2038 pushq %reg
2039 [addr32] leal 16(%rsp), %reg
2040 andl $-XXX, %esp
2041 [addr32] pushq -8(%reg)
2042
2043 "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
2044
2045 0x48 0x83 0xe4 0xf0 andq $-16, %rsp
2046 0x48 0x81 0xe4 0x00 0xff 0xff 0xff andq $-256, %rsp
2047
2048 "andl $-XXX, %esp" can be either 3 bytes or 6 bytes:
2049
2050 0x83 0xe4 0xf0 andl $-16, %esp
2051 0x81 0xe4 0x00 0xff 0xff 0xff andl $-256, %esp
2052 */
2053
2054 gdb_byte buf[19];
2055 int reg, r;
2056 int offset, offset_and;
2057
2058 if (target_read_memory (pc, buf, sizeof buf))
2059 return pc;
2060
2061 /* Skip optional addr32 prefix. */
2062 offset = buf[0] == 0x67 ? 1 : 0;
2063
2064 /* Check caller-saved saved register. The first instruction has
2065 to be "leaq 8(%rsp), %reg" or "leal 8(%rsp), %reg". */
2066 if (((buf[offset] & 0xfb) == 0x48 || (buf[offset] & 0xfb) == 0x40)
2067 && buf[offset + 1] == 0x8d
2068 && buf[offset + 3] == 0x24
2069 && buf[offset + 4] == 0x8)
2070 {
2071 /* MOD must be binary 10 and R/M must be binary 100. */
2072 if ((buf[offset + 2] & 0xc7) != 0x44)
2073 return pc;
2074
2075 /* REG has register number. */
2076 reg = (buf[offset + 2] >> 3) & 7;
2077
2078 /* Check the REX.R bit. */
2079 if ((buf[offset] & 0x4) != 0)
2080 reg += 8;
2081
2082 offset += 5;
2083 }
2084 else
2085 {
2086 /* Check callee-saved saved register. The first instruction
2087 has to be "pushq %reg". */
2088 reg = 0;
2089 if ((buf[offset] & 0xf6) == 0x40
2090 && (buf[offset + 1] & 0xf8) == 0x50)
2091 {
2092 /* Check the REX.B bit. */
2093 if ((buf[offset] & 1) != 0)
2094 reg = 8;
2095
2096 offset += 1;
2097 }
2098 else if ((buf[offset] & 0xf8) != 0x50)
2099 return pc;
2100
2101 /* Get register. */
2102 reg += buf[offset] & 0x7;
2103
2104 offset++;
2105
2106 /* Skip optional addr32 prefix. */
2107 if (buf[offset] == 0x67)
2108 offset++;
2109
2110 /* The next instruction has to be "leaq 16(%rsp), %reg" or
2111 "leal 16(%rsp), %reg". */
2112 if (((buf[offset] & 0xfb) != 0x48 && (buf[offset] & 0xfb) != 0x40)
2113 || buf[offset + 1] != 0x8d
2114 || buf[offset + 3] != 0x24
2115 || buf[offset + 4] != 0x10)
2116 return pc;
2117
2118 /* MOD must be binary 10 and R/M must be binary 100. */
2119 if ((buf[offset + 2] & 0xc7) != 0x44)
2120 return pc;
2121
2122 /* REG has register number. */
2123 r = (buf[offset + 2] >> 3) & 7;
2124
2125 /* Check the REX.R bit. */
2126 if ((buf[offset] & 0x4) != 0)
2127 r += 8;
2128
2129 /* Registers in pushq and leaq have to be the same. */
2130 if (reg != r)
2131 return pc;
2132
2133 offset += 5;
2134 }
2135
2136 /* Rigister can't be %rsp nor %rbp. */
2137 if (reg == 4 || reg == 5)
2138 return pc;
2139
2140 /* The next instruction may be "andq $-XXX, %rsp" or
2141 "andl $-XXX, %esp". */
2142 if (buf[offset] != 0x48)
2143 offset--;
2144
2145 if (buf[offset + 2] != 0xe4
2146 || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
2147 return pc;
2148
2149 offset_and = offset;
2150 offset += buf[offset + 1] == 0x81 ? 7 : 4;
2151
2152 /* Skip optional addr32 prefix. */
2153 if (buf[offset] == 0x67)
2154 offset++;
2155
2156 /* The next instruction has to be "pushq -8(%reg)". */
2157 r = 0;
2158 if (buf[offset] == 0xff)
2159 offset++;
2160 else if ((buf[offset] & 0xf6) == 0x40
2161 && buf[offset + 1] == 0xff)
2162 {
2163 /* Check the REX.B bit. */
2164 if ((buf[offset] & 0x1) != 0)
2165 r = 8;
2166 offset += 2;
2167 }
2168 else
2169 return pc;
2170
2171 /* 8bit -8 is 0xf8. REG must be binary 110 and MOD must be binary
2172 01. */
2173 if (buf[offset + 1] != 0xf8
2174 || (buf[offset] & 0xf8) != 0x70)
2175 return pc;
2176
2177 /* R/M has register. */
2178 r += buf[offset] & 7;
2179
2180 /* Registers in leaq and pushq have to be the same. */
2181 if (reg != r)
2182 return pc;
2183
2184 if (current_pc > pc + offset_and)
2185 cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
2186
2187 return min (pc + offset + 2, current_pc);
2188}
2189
c4f35dd8
MK
2190/* Do a limited analysis of the prologue at PC and update CACHE
2191 accordingly. Bail out early if CURRENT_PC is reached. Return the
2192 address where the analysis stopped.
2193
2194 We will handle only functions beginning with:
2195
2196 pushq %rbp 0x55
50f1ae7b 2197 movq %rsp, %rbp 0x48 0x89 0xe5 (or 0x48 0x8b 0xec)
c4f35dd8 2198
649e6d92
MK
2199 or (for the X32 ABI):
2200
2201 pushq %rbp 0x55
2202 movl %esp, %ebp 0x89 0xe5 (or 0x8b 0xec)
2203
2204 Any function that doesn't start with one of these sequences will be
2205 assumed to have no prologue and thus no valid frame pointer in
2206 %rbp. */
c4f35dd8
MK
2207
2208static CORE_ADDR
e17a4113
UW
2209amd64_analyze_prologue (struct gdbarch *gdbarch,
2210 CORE_ADDR pc, CORE_ADDR current_pc,
e53bef9f 2211 struct amd64_frame_cache *cache)
53e95fcf 2212{
e17a4113 2213 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
50f1ae7b
DE
2214 /* There are two variations of movq %rsp, %rbp. */
2215 static const gdb_byte mov_rsp_rbp_1[3] = { 0x48, 0x89, 0xe5 };
2216 static const gdb_byte mov_rsp_rbp_2[3] = { 0x48, 0x8b, 0xec };
649e6d92
MK
2217 /* Ditto for movl %esp, %ebp. */
2218 static const gdb_byte mov_esp_ebp_1[2] = { 0x89, 0xe5 };
2219 static const gdb_byte mov_esp_ebp_2[2] = { 0x8b, 0xec };
2220
d8de1ef7
MK
2221 gdb_byte buf[3];
2222 gdb_byte op;
c4f35dd8
MK
2223
2224 if (current_pc <= pc)
2225 return current_pc;
2226
ac142d96
L
2227 if (gdbarch_ptr_bit (gdbarch) == 32)
2228 pc = amd64_x32_analyze_stack_align (pc, current_pc, cache);
2229 else
2230 pc = amd64_analyze_stack_align (pc, current_pc, cache);
e0c62198 2231
bae8a07a 2232 op = read_code_unsigned_integer (pc, 1, byte_order);
c4f35dd8
MK
2233
2234 if (op == 0x55) /* pushq %rbp */
2235 {
2236 /* Take into account that we've executed the `pushq %rbp' that
2237 starts this instruction sequence. */
90f90721 2238 cache->saved_regs[AMD64_RBP_REGNUM] = 0;
c4f35dd8
MK
2239 cache->sp_offset += 8;
2240
2241 /* If that's all, return now. */
2242 if (current_pc <= pc + 1)
2243 return current_pc;
2244
bae8a07a 2245 read_code (pc + 1, buf, 3);
c4f35dd8 2246
649e6d92
MK
2247 /* Check for `movq %rsp, %rbp'. */
2248 if (memcmp (buf, mov_rsp_rbp_1, 3) == 0
2249 || memcmp (buf, mov_rsp_rbp_2, 3) == 0)
2250 {
2251 /* OK, we actually have a frame. */
2252 cache->frameless_p = 0;
2253 return pc + 4;
2254 }
2255
2256 /* For X32, also check for `movq %esp, %ebp'. */
2257 if (gdbarch_ptr_bit (gdbarch) == 32)
2258 {
2259 if (memcmp (buf, mov_esp_ebp_1, 2) == 0
2260 || memcmp (buf, mov_esp_ebp_2, 2) == 0)
2261 {
2262 /* OK, we actually have a frame. */
2263 cache->frameless_p = 0;
2264 return pc + 3;
2265 }
2266 }
2267
2268 return pc + 1;
c4f35dd8
MK
2269 }
2270
2271 return pc;
53e95fcf
JS
2272}
2273
df15bd07
JK
2274/* Work around false termination of prologue - GCC PR debug/48827.
2275
2276 START_PC is the first instruction of a function, PC is its minimal already
2277 determined advanced address. Function returns PC if it has nothing to do.
2278
2279 84 c0 test %al,%al
2280 74 23 je after
2281 <-- here is 0 lines advance - the false prologue end marker.
2282 0f 29 85 70 ff ff ff movaps %xmm0,-0x90(%rbp)
2283 0f 29 4d 80 movaps %xmm1,-0x80(%rbp)
2284 0f 29 55 90 movaps %xmm2,-0x70(%rbp)
2285 0f 29 5d a0 movaps %xmm3,-0x60(%rbp)
2286 0f 29 65 b0 movaps %xmm4,-0x50(%rbp)
2287 0f 29 6d c0 movaps %xmm5,-0x40(%rbp)
2288 0f 29 75 d0 movaps %xmm6,-0x30(%rbp)
2289 0f 29 7d e0 movaps %xmm7,-0x20(%rbp)
2290 after: */
c4f35dd8
MK
2291
2292static CORE_ADDR
df15bd07 2293amd64_skip_xmm_prologue (CORE_ADDR pc, CORE_ADDR start_pc)
53e95fcf 2294{
08711b9a
JK
2295 struct symtab_and_line start_pc_sal, next_sal;
2296 gdb_byte buf[4 + 8 * 7];
2297 int offset, xmmreg;
c4f35dd8 2298
08711b9a
JK
2299 if (pc == start_pc)
2300 return pc;
2301
2302 start_pc_sal = find_pc_sect_line (start_pc, NULL, 0);
2303 if (start_pc_sal.symtab == NULL
43f3e411
DE
2304 || producer_is_gcc_ge_4 (COMPUNIT_PRODUCER
2305 (SYMTAB_COMPUNIT (start_pc_sal.symtab))) < 6
08711b9a
JK
2306 || start_pc_sal.pc != start_pc || pc >= start_pc_sal.end)
2307 return pc;
2308
2309 next_sal = find_pc_sect_line (start_pc_sal.end, NULL, 0);
2310 if (next_sal.line != start_pc_sal.line)
2311 return pc;
2312
2313 /* START_PC can be from overlayed memory, ignored here. */
bae8a07a 2314 if (target_read_code (next_sal.pc - 4, buf, sizeof (buf)) != 0)
08711b9a
JK
2315 return pc;
2316
2317 /* test %al,%al */
2318 if (buf[0] != 0x84 || buf[1] != 0xc0)
2319 return pc;
2320 /* je AFTER */
2321 if (buf[2] != 0x74)
2322 return pc;
2323
2324 offset = 4;
2325 for (xmmreg = 0; xmmreg < 8; xmmreg++)
2326 {
bede5f5f 2327 /* 0x0f 0x29 0b??000101 movaps %xmmreg?,-0x??(%rbp) */
08711b9a 2328 if (buf[offset] != 0x0f || buf[offset + 1] != 0x29
bede5f5f 2329 || (buf[offset + 2] & 0x3f) != (xmmreg << 3 | 0x5))
08711b9a
JK
2330 return pc;
2331
bede5f5f
JK
2332 /* 0b01?????? */
2333 if ((buf[offset + 2] & 0xc0) == 0x40)
08711b9a
JK
2334 {
2335 /* 8-bit displacement. */
2336 offset += 4;
2337 }
bede5f5f
JK
2338 /* 0b10?????? */
2339 else if ((buf[offset + 2] & 0xc0) == 0x80)
08711b9a
JK
2340 {
2341 /* 32-bit displacement. */
2342 offset += 7;
2343 }
2344 else
2345 return pc;
2346 }
2347
2348 /* je AFTER */
2349 if (offset - 4 != buf[3])
2350 return pc;
2351
2352 return next_sal.end;
53e95fcf 2353}
df15bd07
JK
2354
2355/* Return PC of first real instruction. */
2356
2357static CORE_ADDR
2358amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
2359{
2360 struct amd64_frame_cache cache;
2361 CORE_ADDR pc;
56bf0743
KB
2362 CORE_ADDR func_addr;
2363
2364 if (find_pc_partial_function (start_pc, NULL, &func_addr, NULL))
2365 {
2366 CORE_ADDR post_prologue_pc
2367 = skip_prologue_using_sal (gdbarch, func_addr);
43f3e411 2368 struct compunit_symtab *cust = find_pc_compunit_symtab (func_addr);
56bf0743
KB
2369
2370 /* Clang always emits a line note before the prologue and another
2371 one after. We trust clang to emit usable line notes. */
2372 if (post_prologue_pc
43f3e411
DE
2373 && (cust != NULL
2374 && COMPUNIT_PRODUCER (cust) != NULL
2375 && strncmp (COMPUNIT_PRODUCER (cust), "clang ",
2376 sizeof ("clang ") - 1) == 0))
56bf0743
KB
2377 return max (start_pc, post_prologue_pc);
2378 }
df15bd07
JK
2379
2380 amd64_init_frame_cache (&cache);
2381 pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
2382 &cache);
2383 if (cache.frameless_p)
2384 return start_pc;
2385
2386 return amd64_skip_xmm_prologue (pc, start_pc);
2387}
c4f35dd8 2388\f
53e95fcf 2389
c4f35dd8
MK
2390/* Normal frames. */
2391
8fbca658
PA
2392static void
2393amd64_frame_cache_1 (struct frame_info *this_frame,
2394 struct amd64_frame_cache *cache)
6d686a84 2395{
e17a4113
UW
2396 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2397 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
d8de1ef7 2398 gdb_byte buf[8];
6d686a84 2399 int i;
6d686a84 2400
10458914 2401 cache->pc = get_frame_func (this_frame);
c4f35dd8 2402 if (cache->pc != 0)
e17a4113
UW
2403 amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
2404 cache);
c4f35dd8
MK
2405
2406 if (cache->frameless_p)
2407 {
4a28816e
MK
2408 /* We didn't find a valid frame. If we're at the start of a
2409 function, or somewhere half-way its prologue, the function's
2410 frame probably hasn't been fully setup yet. Try to
2411 reconstruct the base address for the stack frame by looking
2412 at the stack pointer. For truly "frameless" functions this
2413 might work too. */
c4f35dd8 2414
e0c62198
L
2415 if (cache->saved_sp_reg != -1)
2416 {
8fbca658
PA
2417 /* Stack pointer has been saved. */
2418 get_frame_register (this_frame, cache->saved_sp_reg, buf);
2419 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
2420
e0c62198
L
2421 /* We're halfway aligning the stack. */
2422 cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
2423 cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
2424
2425 /* This will be added back below. */
2426 cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
2427 }
2428 else
2429 {
2430 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
e17a4113
UW
2431 cache->base = extract_unsigned_integer (buf, 8, byte_order)
2432 + cache->sp_offset;
e0c62198 2433 }
c4f35dd8 2434 }
35883a3f
MK
2435 else
2436 {
10458914 2437 get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
e17a4113 2438 cache->base = extract_unsigned_integer (buf, 8, byte_order);
35883a3f 2439 }
c4f35dd8
MK
2440
2441 /* Now that we have the base address for the stack frame we can
2442 calculate the value of %rsp in the calling frame. */
2443 cache->saved_sp = cache->base + 16;
2444
35883a3f
MK
2445 /* For normal frames, %rip is stored at 8(%rbp). If we don't have a
2446 frame we find it at the same offset from the reconstructed base
e0c62198
L
2447 address. If we're halfway aligning the stack, %rip is handled
2448 differently (see above). */
2449 if (!cache->frameless_p || cache->saved_sp_reg == -1)
2450 cache->saved_regs[AMD64_RIP_REGNUM] = 8;
35883a3f 2451
c4f35dd8
MK
2452 /* Adjust all the saved registers such that they contain addresses
2453 instead of offsets. */
e53bef9f 2454 for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
c4f35dd8
MK
2455 if (cache->saved_regs[i] != -1)
2456 cache->saved_regs[i] += cache->base;
2457
8fbca658
PA
2458 cache->base_p = 1;
2459}
2460
2461static struct amd64_frame_cache *
2462amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
2463{
2464 volatile struct gdb_exception ex;
2465 struct amd64_frame_cache *cache;
2466
2467 if (*this_cache)
2468 return *this_cache;
2469
2470 cache = amd64_alloc_frame_cache ();
2471 *this_cache = cache;
2472
2473 TRY_CATCH (ex, RETURN_MASK_ERROR)
2474 {
2475 amd64_frame_cache_1 (this_frame, cache);
2476 }
2477 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2478 throw_exception (ex);
2479
c4f35dd8 2480 return cache;
6d686a84
ML
2481}
2482
8fbca658
PA
2483static enum unwind_stop_reason
2484amd64_frame_unwind_stop_reason (struct frame_info *this_frame,
2485 void **this_cache)
2486{
2487 struct amd64_frame_cache *cache =
2488 amd64_frame_cache (this_frame, this_cache);
2489
2490 if (!cache->base_p)
2491 return UNWIND_UNAVAILABLE;
2492
2493 /* This marks the outermost frame. */
2494 if (cache->base == 0)
2495 return UNWIND_OUTERMOST;
2496
2497 return UNWIND_NO_REASON;
2498}
2499
c4f35dd8 2500static void
10458914 2501amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
e53bef9f 2502 struct frame_id *this_id)
c4f35dd8 2503{
e53bef9f 2504 struct amd64_frame_cache *cache =
10458914 2505 amd64_frame_cache (this_frame, this_cache);
c4f35dd8 2506
8fbca658 2507 if (!cache->base_p)
5ce0145d
PA
2508 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2509 else if (cache->base == 0)
2510 {
2511 /* This marks the outermost frame. */
2512 return;
2513 }
2514 else
2515 (*this_id) = frame_id_build (cache->base + 16, cache->pc);
c4f35dd8 2516}
e76e1718 2517
10458914
DJ
2518static struct value *
2519amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
2520 int regnum)
53e95fcf 2521{
10458914 2522 struct gdbarch *gdbarch = get_frame_arch (this_frame);
e53bef9f 2523 struct amd64_frame_cache *cache =
10458914 2524 amd64_frame_cache (this_frame, this_cache);
e76e1718 2525
c4f35dd8 2526 gdb_assert (regnum >= 0);
b1ab997b 2527
2ae02b47 2528 if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
10458914 2529 return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
e76e1718 2530
e53bef9f 2531 if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
10458914
DJ
2532 return frame_unwind_got_memory (this_frame, regnum,
2533 cache->saved_regs[regnum]);
e76e1718 2534
10458914 2535 return frame_unwind_got_register (this_frame, regnum, regnum);
c4f35dd8 2536}
e76e1718 2537
e53bef9f 2538static const struct frame_unwind amd64_frame_unwind =
c4f35dd8
MK
2539{
2540 NORMAL_FRAME,
8fbca658 2541 amd64_frame_unwind_stop_reason,
e53bef9f 2542 amd64_frame_this_id,
10458914
DJ
2543 amd64_frame_prev_register,
2544 NULL,
2545 default_frame_sniffer
c4f35dd8 2546};
c4f35dd8 2547\f
6710bf39
SS
2548/* Generate a bytecode expression to get the value of the saved PC. */
2549
2550static void
2551amd64_gen_return_address (struct gdbarch *gdbarch,
2552 struct agent_expr *ax, struct axs_value *value,
2553 CORE_ADDR scope)
2554{
2555 /* The following sequence assumes the traditional use of the base
2556 register. */
2557 ax_reg (ax, AMD64_RBP_REGNUM);
2558 ax_const_l (ax, 8);
2559 ax_simple (ax, aop_add);
2560 value->type = register_type (gdbarch, AMD64_RIP_REGNUM);
2561 value->kind = axs_lvalue_memory;
2562}
2563\f
e76e1718 2564
c4f35dd8
MK
2565/* Signal trampolines. */
2566
2567/* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2568 64-bit variants. This would require using identical frame caches
2569 on both platforms. */
2570
e53bef9f 2571static struct amd64_frame_cache *
10458914 2572amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
c4f35dd8 2573{
e17a4113
UW
2574 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2575 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2576 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8fbca658 2577 volatile struct gdb_exception ex;
e53bef9f 2578 struct amd64_frame_cache *cache;
c4f35dd8 2579 CORE_ADDR addr;
d8de1ef7 2580 gdb_byte buf[8];
2b5e0749 2581 int i;
c4f35dd8
MK
2582
2583 if (*this_cache)
2584 return *this_cache;
2585
e53bef9f 2586 cache = amd64_alloc_frame_cache ();
c4f35dd8 2587
8fbca658
PA
2588 TRY_CATCH (ex, RETURN_MASK_ERROR)
2589 {
2590 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2591 cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2592
2593 addr = tdep->sigcontext_addr (this_frame);
2594 gdb_assert (tdep->sc_reg_offset);
2595 gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2596 for (i = 0; i < tdep->sc_num_regs; i++)
2597 if (tdep->sc_reg_offset[i] != -1)
2598 cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
c4f35dd8 2599
8fbca658
PA
2600 cache->base_p = 1;
2601 }
2602 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2603 throw_exception (ex);
c4f35dd8
MK
2604
2605 *this_cache = cache;
2606 return cache;
53e95fcf
JS
2607}
2608
8fbca658
PA
2609static enum unwind_stop_reason
2610amd64_sigtramp_frame_unwind_stop_reason (struct frame_info *this_frame,
2611 void **this_cache)
2612{
2613 struct amd64_frame_cache *cache =
2614 amd64_sigtramp_frame_cache (this_frame, this_cache);
2615
2616 if (!cache->base_p)
2617 return UNWIND_UNAVAILABLE;
2618
2619 return UNWIND_NO_REASON;
2620}
2621
c4f35dd8 2622static void
10458914 2623amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
e53bef9f 2624 void **this_cache, struct frame_id *this_id)
c4f35dd8 2625{
e53bef9f 2626 struct amd64_frame_cache *cache =
10458914 2627 amd64_sigtramp_frame_cache (this_frame, this_cache);
c4f35dd8 2628
8fbca658 2629 if (!cache->base_p)
5ce0145d
PA
2630 (*this_id) = frame_id_build_unavailable_stack (get_frame_pc (this_frame));
2631 else if (cache->base == 0)
2632 {
2633 /* This marks the outermost frame. */
2634 return;
2635 }
2636 else
2637 (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
c4f35dd8
MK
2638}
2639
10458914
DJ
2640static struct value *
2641amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2642 void **this_cache, int regnum)
c4f35dd8
MK
2643{
2644 /* Make sure we've initialized the cache. */
10458914 2645 amd64_sigtramp_frame_cache (this_frame, this_cache);
c4f35dd8 2646
10458914 2647 return amd64_frame_prev_register (this_frame, this_cache, regnum);
c4f35dd8
MK
2648}
2649
10458914
DJ
2650static int
2651amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2652 struct frame_info *this_frame,
2653 void **this_cache)
c4f35dd8 2654{
10458914 2655 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
911bc6ee
MK
2656
2657 /* We shouldn't even bother if we don't have a sigcontext_addr
2658 handler. */
2659 if (tdep->sigcontext_addr == NULL)
10458914 2660 return 0;
911bc6ee
MK
2661
2662 if (tdep->sigtramp_p != NULL)
2663 {
10458914
DJ
2664 if (tdep->sigtramp_p (this_frame))
2665 return 1;
911bc6ee 2666 }
c4f35dd8 2667
911bc6ee 2668 if (tdep->sigtramp_start != 0)
1c3545ae 2669 {
10458914 2670 CORE_ADDR pc = get_frame_pc (this_frame);
1c3545ae 2671
911bc6ee
MK
2672 gdb_assert (tdep->sigtramp_end != 0);
2673 if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
10458914 2674 return 1;
1c3545ae 2675 }
c4f35dd8 2676
10458914 2677 return 0;
c4f35dd8 2678}
10458914
DJ
2679
2680static const struct frame_unwind amd64_sigtramp_frame_unwind =
2681{
2682 SIGTRAMP_FRAME,
8fbca658 2683 amd64_sigtramp_frame_unwind_stop_reason,
10458914
DJ
2684 amd64_sigtramp_frame_this_id,
2685 amd64_sigtramp_frame_prev_register,
2686 NULL,
2687 amd64_sigtramp_frame_sniffer
2688};
c4f35dd8
MK
2689\f
2690
2691static CORE_ADDR
10458914 2692amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
c4f35dd8 2693{
e53bef9f 2694 struct amd64_frame_cache *cache =
10458914 2695 amd64_frame_cache (this_frame, this_cache);
c4f35dd8
MK
2696
2697 return cache->base;
2698}
2699
e53bef9f 2700static const struct frame_base amd64_frame_base =
c4f35dd8 2701{
e53bef9f
MK
2702 &amd64_frame_unwind,
2703 amd64_frame_base_address,
2704 amd64_frame_base_address,
2705 amd64_frame_base_address
c4f35dd8
MK
2706};
2707
872761f4
MS
2708/* Normal frames, but in a function epilogue. */
2709
2710/* The epilogue is defined here as the 'ret' instruction, which will
2711 follow any instruction such as 'leave' or 'pop %ebp' that destroys
2712 the function's stack frame. */
2713
2714static int
2715amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2716{
2717 gdb_byte insn;
43f3e411 2718 struct compunit_symtab *cust;
e0d00bc7 2719
43f3e411
DE
2720 cust = find_pc_compunit_symtab (pc);
2721 if (cust != NULL && COMPUNIT_EPILOGUE_UNWIND_VALID (cust))
e0d00bc7 2722 return 0;
872761f4
MS
2723
2724 if (target_read_memory (pc, &insn, 1))
2725 return 0; /* Can't read memory at pc. */
2726
2727 if (insn != 0xc3) /* 'ret' instruction. */
2728 return 0;
2729
2730 return 1;
2731}
2732
2733static int
2734amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2735 struct frame_info *this_frame,
2736 void **this_prologue_cache)
2737{
2738 if (frame_relative_level (this_frame) == 0)
2739 return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
2740 get_frame_pc (this_frame));
2741 else
2742 return 0;
2743}
2744
2745static struct amd64_frame_cache *
2746amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2747{
2748 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2749 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
8fbca658 2750 volatile struct gdb_exception ex;
872761f4 2751 struct amd64_frame_cache *cache;
6c10c06b 2752 gdb_byte buf[8];
872761f4
MS
2753
2754 if (*this_cache)
2755 return *this_cache;
2756
2757 cache = amd64_alloc_frame_cache ();
2758 *this_cache = cache;
2759
8fbca658
PA
2760 TRY_CATCH (ex, RETURN_MASK_ERROR)
2761 {
2762 /* Cache base will be %esp plus cache->sp_offset (-8). */
2763 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2764 cache->base = extract_unsigned_integer (buf, 8,
2765 byte_order) + cache->sp_offset;
2766
2767 /* Cache pc will be the frame func. */
2768 cache->pc = get_frame_pc (this_frame);
872761f4 2769
8fbca658
PA
2770 /* The saved %esp will be at cache->base plus 16. */
2771 cache->saved_sp = cache->base + 16;
872761f4 2772
8fbca658
PA
2773 /* The saved %eip will be at cache->base plus 8. */
2774 cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
872761f4 2775
8fbca658
PA
2776 cache->base_p = 1;
2777 }
2778 if (ex.reason < 0 && ex.error != NOT_AVAILABLE_ERROR)
2779 throw_exception (ex);
872761f4
MS
2780
2781 return cache;
2782}
2783
8fbca658
PA
2784static enum unwind_stop_reason
2785amd64_epilogue_frame_unwind_stop_reason (struct frame_info *this_frame,
2786 void **this_cache)
2787{
2788 struct amd64_frame_cache *cache
2789 = amd64_epilogue_frame_cache (this_frame, this_cache);
2790
2791 if (!cache->base_p)
2792 return UNWIND_UNAVAILABLE;
2793
2794 return UNWIND_NO_REASON;
2795}
2796
872761f4
MS
2797static void
2798amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2799 void **this_cache,
2800 struct frame_id *this_id)
2801{
2802 struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2803 this_cache);
2804
8fbca658 2805 if (!cache->base_p)
5ce0145d
PA
2806 (*this_id) = frame_id_build_unavailable_stack (cache->pc);
2807 else
2808 (*this_id) = frame_id_build (cache->base + 8, cache->pc);
872761f4
MS
2809}
2810
2811static const struct frame_unwind amd64_epilogue_frame_unwind =
2812{
2813 NORMAL_FRAME,
8fbca658 2814 amd64_epilogue_frame_unwind_stop_reason,
872761f4
MS
2815 amd64_epilogue_frame_this_id,
2816 amd64_frame_prev_register,
2817 NULL,
2818 amd64_epilogue_frame_sniffer
2819};
2820
166f4c7b 2821static struct frame_id
10458914 2822amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
166f4c7b 2823{
c4f35dd8
MK
2824 CORE_ADDR fp;
2825
10458914 2826 fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
c4f35dd8 2827
10458914 2828 return frame_id_build (fp + 16, get_frame_pc (this_frame));
166f4c7b
ML
2829}
2830
8b148df9
AC
2831/* 16 byte align the SP per frame requirements. */
2832
2833static CORE_ADDR
e53bef9f 2834amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
8b148df9
AC
2835{
2836 return sp & -(CORE_ADDR)16;
2837}
473f17b0
MK
2838\f
2839
593adc23
MK
2840/* Supply register REGNUM from the buffer specified by FPREGS and LEN
2841 in the floating-point register set REGSET to register cache
2842 REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */
473f17b0
MK
2843
2844static void
e53bef9f
MK
2845amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2846 int regnum, const void *fpregs, size_t len)
473f17b0 2847{
09424cff
AA
2848 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2849 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
473f17b0
MK
2850
2851 gdb_assert (len == tdep->sizeof_fpregset);
90f90721 2852 amd64_supply_fxsave (regcache, regnum, fpregs);
473f17b0 2853}
8b148df9 2854
593adc23
MK
2855/* Collect register REGNUM from the register cache REGCACHE and store
2856 it in the buffer specified by FPREGS and LEN as described by the
2857 floating-point register set REGSET. If REGNUM is -1, do this for
2858 all registers in REGSET. */
2859
2860static void
2861amd64_collect_fpregset (const struct regset *regset,
2862 const struct regcache *regcache,
2863 int regnum, void *fpregs, size_t len)
2864{
09424cff
AA
2865 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2866 const struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
593adc23
MK
2867
2868 gdb_assert (len == tdep->sizeof_fpregset);
2869 amd64_collect_fxsave (regcache, regnum, fpregs);
2870}
2871
8f0435f7 2872const struct regset amd64_fpregset =
ecc37a5a
AA
2873 {
2874 NULL, amd64_supply_fpregset, amd64_collect_fpregset
2875 };
c6b33596
MK
2876\f
2877
436675d3
PA
2878/* Figure out where the longjmp will land. Slurp the jmp_buf out of
2879 %rdi. We expect its value to be a pointer to the jmp_buf structure
2880 from which we extract the address that we will land at. This
2881 address is copied into PC. This routine returns non-zero on
2882 success. */
2883
2884static int
2885amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2886{
2887 gdb_byte buf[8];
2888 CORE_ADDR jb_addr;
2889 struct gdbarch *gdbarch = get_frame_arch (frame);
2890 int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
0dfff4cb 2891 int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
436675d3
PA
2892
2893 /* If JB_PC_OFFSET is -1, we have no way to find out where the
2894 longjmp will land. */
2895 if (jb_pc_offset == -1)
2896 return 0;
2897
2898 get_frame_register (frame, AMD64_RDI_REGNUM, buf);
0dfff4cb
UW
2899 jb_addr= extract_typed_address
2900 (buf, builtin_type (gdbarch)->builtin_data_ptr);
436675d3
PA
2901 if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2902 return 0;
2903
0dfff4cb 2904 *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
436675d3
PA
2905
2906 return 1;
2907}
2908
cf648174
HZ
2909static const int amd64_record_regmap[] =
2910{
2911 AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2912 AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2913 AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2914 AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2915 AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2916 AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2917};
2918
2213a65d 2919void
90f90721 2920amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
53e95fcf 2921{
0c1a73d6 2922 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
90884b2b 2923 const struct target_desc *tdesc = info.target_desc;
05c0465e
SDJ
2924 static const char *const stap_integer_prefixes[] = { "$", NULL };
2925 static const char *const stap_register_prefixes[] = { "%", NULL };
2926 static const char *const stap_register_indirection_prefixes[] = { "(",
2927 NULL };
2928 static const char *const stap_register_indirection_suffixes[] = { ")",
2929 NULL };
53e95fcf 2930
473f17b0
MK
2931 /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2932 floating-point registers. */
2933 tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
8f0435f7 2934 tdep->fpregset = &amd64_fpregset;
473f17b0 2935
90884b2b
L
2936 if (! tdesc_has_registers (tdesc))
2937 tdesc = tdesc_amd64;
2938 tdep->tdesc = tdesc;
2939
2940 tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
2941 tdep->register_names = amd64_register_names;
2942
01f9f808
MS
2943 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx512") != NULL)
2944 {
2945 tdep->zmmh_register_names = amd64_zmmh_names;
2946 tdep->k_register_names = amd64_k_names;
2947 tdep->xmm_avx512_register_names = amd64_xmm_avx512_names;
2948 tdep->ymm16h_register_names = amd64_ymmh_avx512_names;
2949
2950 tdep->num_zmm_regs = 32;
2951 tdep->num_xmm_avx512_regs = 16;
2952 tdep->num_ymm_avx512_regs = 16;
2953
2954 tdep->zmm0h_regnum = AMD64_ZMM0H_REGNUM;
2955 tdep->k0_regnum = AMD64_K0_REGNUM;
2956 tdep->xmm16_regnum = AMD64_XMM16_REGNUM;
2957 tdep->ymm16h_regnum = AMD64_YMM16H_REGNUM;
2958 }
2959
a055a187
L
2960 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
2961 {
2962 tdep->ymmh_register_names = amd64_ymmh_names;
2963 tdep->num_ymm_regs = 16;
2964 tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
2965 }
2966
e43e105e
WT
2967 if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.mpx") != NULL)
2968 {
2969 tdep->mpx_register_names = amd64_mpx_names;
2970 tdep->bndcfgu_regnum = AMD64_BNDCFGU_REGNUM;
2971 tdep->bnd0r_regnum = AMD64_BND0R_REGNUM;
2972 }
2973
fe01d668 2974 tdep->num_byte_regs = 20;
1ba53b71
L
2975 tdep->num_word_regs = 16;
2976 tdep->num_dword_regs = 16;
2977 /* Avoid wiring in the MMX registers for now. */
2978 tdep->num_mmx_regs = 0;
2979
3543a589
TT
2980 set_gdbarch_pseudo_register_read_value (gdbarch,
2981 amd64_pseudo_register_read_value);
1ba53b71
L
2982 set_gdbarch_pseudo_register_write (gdbarch,
2983 amd64_pseudo_register_write);
2984
2985 set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
2986
5716833c 2987 /* AMD64 has an FPU and 16 SSE registers. */
90f90721 2988 tdep->st0_regnum = AMD64_ST0_REGNUM;
0c1a73d6 2989 tdep->num_xmm_regs = 16;
53e95fcf 2990
0c1a73d6 2991 /* This is what all the fuss is about. */
53e95fcf
JS
2992 set_gdbarch_long_bit (gdbarch, 64);
2993 set_gdbarch_long_long_bit (gdbarch, 64);
2994 set_gdbarch_ptr_bit (gdbarch, 64);
2995
e53bef9f
MK
2996 /* In contrast to the i386, on AMD64 a `long double' actually takes
2997 up 128 bits, even though it's still based on the i387 extended
2998 floating-point format which has only 80 significant bits. */
b83b026c
MK
2999 set_gdbarch_long_double_bit (gdbarch, 128);
3000
e53bef9f 3001 set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
b83b026c
MK
3002
3003 /* Register numbers of various important registers. */
90f90721
MK
3004 set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
3005 set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
3006 set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
3007 set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
b83b026c 3008
e53bef9f
MK
3009 /* The "default" register numbering scheme for AMD64 is referred to
3010 as the "DWARF Register Number Mapping" in the System V psABI.
3011 The preferred debugging format for all known AMD64 targets is
3012 actually DWARF2, and GCC doesn't seem to support DWARF (that is
3013 DWARF-1), but we provide the same mapping just in case. This
3014 mapping is also used for stabs, which GCC does support. */
3015 set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
e53bef9f 3016 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
de220d0f 3017
c4f35dd8 3018 /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
e53bef9f 3019 be in use on any of the supported AMD64 targets. */
53e95fcf 3020
c4f35dd8 3021 /* Call dummy code. */
e53bef9f
MK
3022 set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
3023 set_gdbarch_frame_align (gdbarch, amd64_frame_align);
8b148df9 3024 set_gdbarch_frame_red_zone_size (gdbarch, 128);
53e95fcf 3025
83acabca 3026 set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
d532c08f
MK
3027 set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
3028 set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
3029
efb1c01c 3030 set_gdbarch_return_value (gdbarch, amd64_return_value);
53e95fcf 3031
e53bef9f 3032 set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
53e95fcf 3033
cf648174
HZ
3034 tdep->record_regmap = amd64_record_regmap;
3035
10458914 3036 set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
53e95fcf 3037
872761f4
MS
3038 /* Hook the function epilogue frame unwinder. This unwinder is
3039 appended to the list first, so that it supercedes the other
3040 unwinders in function epilogues. */
3041 frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
3042
3043 /* Hook the prologue-based frame unwinders. */
10458914
DJ
3044 frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
3045 frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
e53bef9f 3046 frame_base_set_default (gdbarch, &amd64_frame_base);
c6b33596 3047
436675d3 3048 set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
dde08ee1
PA
3049
3050 set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
6710bf39
SS
3051
3052 set_gdbarch_gen_return_address (gdbarch, amd64_gen_return_address);
55aa24fb
SDJ
3053
3054 /* SystemTap variables and functions. */
05c0465e
SDJ
3055 set_gdbarch_stap_integer_prefixes (gdbarch, stap_integer_prefixes);
3056 set_gdbarch_stap_register_prefixes (gdbarch, stap_register_prefixes);
3057 set_gdbarch_stap_register_indirection_prefixes (gdbarch,
3058 stap_register_indirection_prefixes);
3059 set_gdbarch_stap_register_indirection_suffixes (gdbarch,
3060 stap_register_indirection_suffixes);
55aa24fb
SDJ
3061 set_gdbarch_stap_is_single_operand (gdbarch,
3062 i386_stap_is_single_operand);
3063 set_gdbarch_stap_parse_special_token (gdbarch,
3064 i386_stap_parse_special_token);
c2170eef
MM
3065 set_gdbarch_insn_is_call (gdbarch, amd64_insn_is_call);
3066 set_gdbarch_insn_is_ret (gdbarch, amd64_insn_is_ret);
3067 set_gdbarch_insn_is_jump (gdbarch, amd64_insn_is_jump);
c4f35dd8 3068}
fff4548b
MK
3069\f
3070
3071static struct type *
3072amd64_x32_pseudo_register_type (struct gdbarch *gdbarch, int regnum)
3073{
3074 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3075
3076 switch (regnum - tdep->eax_regnum)
3077 {
3078 case AMD64_RBP_REGNUM: /* %ebp */
3079 case AMD64_RSP_REGNUM: /* %esp */
3080 return builtin_type (gdbarch)->builtin_data_ptr;
3081 case AMD64_RIP_REGNUM: /* %eip */
3082 return builtin_type (gdbarch)->builtin_func_ptr;
3083 }
3084
3085 return i386_pseudo_register_type (gdbarch, regnum);
3086}
3087
3088void
3089amd64_x32_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
3090{
3091 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3092 const struct target_desc *tdesc = info.target_desc;
3093
3094 amd64_init_abi (info, gdbarch);
3095
3096 if (! tdesc_has_registers (tdesc))
3097 tdesc = tdesc_x32;
3098 tdep->tdesc = tdesc;
3099
3100 tdep->num_dword_regs = 17;
3101 set_tdesc_pseudo_register_type (gdbarch, amd64_x32_pseudo_register_type);
3102
3103 set_gdbarch_long_bit (gdbarch, 32);
3104 set_gdbarch_ptr_bit (gdbarch, 32);
3105}
90884b2b
L
3106
3107/* Provide a prototype to silence -Wmissing-prototypes. */
3108void _initialize_amd64_tdep (void);
3109
3110void
3111_initialize_amd64_tdep (void)
3112{
3113 initialize_tdesc_amd64 ();
a055a187 3114 initialize_tdesc_amd64_avx ();
e43e105e 3115 initialize_tdesc_amd64_mpx ();
01f9f808
MS
3116 initialize_tdesc_amd64_avx512 ();
3117
ac1438b5
L
3118 initialize_tdesc_x32 ();
3119 initialize_tdesc_x32_avx ();
01f9f808 3120 initialize_tdesc_x32_avx512 ();
90884b2b 3121}
c4f35dd8
MK
3122\f
3123
41d041d6
MK
3124/* The 64-bit FXSAVE format differs from the 32-bit format in the
3125 sense that the instruction pointer and data pointer are simply
3126 64-bit offsets into the code segment and the data segment instead
3127 of a selector offset pair. The functions below store the upper 32
3128 bits of these pointers (instead of just the 16-bits of the segment
3129 selector). */
3130
3131/* Fill register REGNUM in REGCACHE with the appropriate
0485f6ad
MK
3132 floating-point or SSE register value from *FXSAVE. If REGNUM is
3133 -1, do this for all registers. This function masks off any of the
3134 reserved bits in *FXSAVE. */
c4f35dd8
MK
3135
3136void
90f90721 3137amd64_supply_fxsave (struct regcache *regcache, int regnum,
20a6ec49 3138 const void *fxsave)
c4f35dd8 3139{
20a6ec49
MD
3140 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3141 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3142
41d041d6 3143 i387_supply_fxsave (regcache, regnum, fxsave);
c4f35dd8 3144
233dfcf0
L
3145 if (fxsave
3146 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
c4f35dd8 3147 {
d8de1ef7 3148 const gdb_byte *regs = fxsave;
41d041d6 3149
20a6ec49
MD
3150 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3151 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3152 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3153 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
c4f35dd8 3154 }
0c1a73d6
MK
3155}
3156
a055a187
L
3157/* Similar to amd64_supply_fxsave, but use XSAVE extended state. */
3158
3159void
3160amd64_supply_xsave (struct regcache *regcache, int regnum,
3161 const void *xsave)
3162{
3163 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3164 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3165
3166 i387_supply_xsave (regcache, regnum, xsave);
3167
233dfcf0
L
3168 if (xsave
3169 && gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
a055a187
L
3170 {
3171 const gdb_byte *regs = xsave;
3172
3173 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3174 regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep),
3175 regs + 12);
3176 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3177 regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep),
3178 regs + 20);
3179 }
3180}
3181
3c017e40
MK
3182/* Fill register REGNUM (if it is a floating-point or SSE register) in
3183 *FXSAVE with the value from REGCACHE. If REGNUM is -1, do this for
3184 all registers. This function doesn't touch any of the reserved
3185 bits in *FXSAVE. */
3186
3187void
3188amd64_collect_fxsave (const struct regcache *regcache, int regnum,
3189 void *fxsave)
3190{
20a6ec49
MD
3191 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3192 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
d8de1ef7 3193 gdb_byte *regs = fxsave;
3c017e40
MK
3194
3195 i387_collect_fxsave (regcache, regnum, fxsave);
3196
233dfcf0 3197 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
f0ef85a5 3198 {
20a6ec49
MD
3199 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3200 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
3201 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3202 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
f0ef85a5 3203 }
3c017e40 3204}
a055a187 3205
7a9dd1b2 3206/* Similar to amd64_collect_fxsave, but use XSAVE extended state. */
a055a187
L
3207
3208void
3209amd64_collect_xsave (const struct regcache *regcache, int regnum,
3210 void *xsave, int gcore)
3211{
3212 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3213 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3214 gdb_byte *regs = xsave;
3215
3216 i387_collect_xsave (regcache, regnum, xsave, gcore);
3217
233dfcf0 3218 if (gdbarch_bfd_arch_info (gdbarch)->bits_per_word == 64)
a055a187
L
3219 {
3220 if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
3221 regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep),
3222 regs + 12);
3223 if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
3224 regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep),
3225 regs + 20);
3226 }
3227}