]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/amd64-windows-tdep.c
gdb: move a bunch of quit-related things to event-top.{c,h}
[thirdparty/binutils-gdb.git] / gdb / amd64-windows-tdep.c
1 /* Copyright (C) 2009-2024 Free Software Foundation, Inc.
2
3 This file is part of GDB.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>. */
17
18 #include "event-top.h"
19 #include "extract-store-integer.h"
20 #include "osabi.h"
21 #include "amd64-tdep.h"
22 #include "gdbsupport/x86-xstate.h"
23 #include "gdbtypes.h"
24 #include "gdbcore.h"
25 #include "regcache.h"
26 #include "windows-tdep.h"
27 #include "frame.h"
28 #include "objfiles.h"
29 #include "frame-unwind.h"
30 #include "coff/internal.h"
31 #include "coff/i386.h"
32 #include "coff/pe.h"
33 #include "libcoff.h"
34 #include "value.h"
35 #include <algorithm>
36
37 /* The registers used to pass integer arguments during a function call. */
38 static int amd64_windows_dummy_call_integer_regs[] =
39 {
40 AMD64_RCX_REGNUM, /* %rcx */
41 AMD64_RDX_REGNUM, /* %rdx */
42 AMD64_R8_REGNUM, /* %r8 */
43 AMD64_R9_REGNUM /* %r9 */
44 };
45
46 /* This vector maps GDB's idea of a register's number into an offset into
47 the Windows API CONTEXT structure. */
48 static int amd64_windows_gregset_reg_offset[] =
49 {
50 120, /* Rax */
51 144, /* Rbx */
52 128, /* Rcx */
53 136, /* Rdx */
54 168, /* Rsi */
55 176, /* Rdi */
56 160, /* Rbp */
57 152, /* Rsp */
58 184, /* R8 */
59 192, /* R9 */
60 200, /* R10 */
61 208, /* R11 */
62 216, /* R12 */
63 224, /* R13 */
64 232, /* R14 */
65 240, /* R15 */
66 248, /* Rip */
67 68, /* EFlags */
68 56, /* SegCs */
69 66, /* SegSs */
70 58, /* SegDs */
71 60, /* SegEs */
72 62, /* SegFs */
73 64, /* SegGs */
74 288, /* FloatSave.FloatRegisters[0] */
75 304, /* FloatSave.FloatRegisters[1] */
76 320, /* FloatSave.FloatRegisters[2] */
77 336, /* FloatSave.FloatRegisters[3] */
78 352, /* FloatSave.FloatRegisters[4] */
79 368, /* FloatSave.FloatRegisters[5] */
80 384, /* FloatSave.FloatRegisters[6] */
81 400, /* FloatSave.FloatRegisters[7] */
82 256, /* FloatSave.ControlWord */
83 258, /* FloatSave.StatusWord */
84 260, /* FloatSave.TagWord */
85 268, /* FloatSave.ErrorSelector */
86 264, /* FloatSave.ErrorOffset */
87 276, /* FloatSave.DataSelector */
88 272, /* FloatSave.DataOffset */
89 268, /* FloatSave.ErrorSelector */
90 416, /* Xmm0 */
91 432, /* Xmm1 */
92 448, /* Xmm2 */
93 464, /* Xmm3 */
94 480, /* Xmm4 */
95 496, /* Xmm5 */
96 512, /* Xmm6 */
97 528, /* Xmm7 */
98 544, /* Xmm8 */
99 560, /* Xmm9 */
100 576, /* Xmm10 */
101 592, /* Xmm11 */
102 608, /* Xmm12 */
103 624, /* Xmm13 */
104 640, /* Xmm14 */
105 656, /* Xmm15 */
106 280, /* FloatSave.MxCsr */
107 };
108
109 #define AMD64_WINDOWS_SIZEOF_GREGSET 1232
110
111 /* Return nonzero if an argument of type TYPE should be passed
112 via one of the integer registers. */
113
114 static int
115 amd64_windows_passed_by_integer_register (struct type *type)
116 {
117 switch (type->code ())
118 {
119 case TYPE_CODE_INT:
120 case TYPE_CODE_ENUM:
121 case TYPE_CODE_BOOL:
122 case TYPE_CODE_RANGE:
123 case TYPE_CODE_CHAR:
124 case TYPE_CODE_PTR:
125 case TYPE_CODE_REF:
126 case TYPE_CODE_RVALUE_REF:
127 case TYPE_CODE_STRUCT:
128 case TYPE_CODE_UNION:
129 case TYPE_CODE_COMPLEX:
130 return (type->length () == 1
131 || type->length () == 2
132 || type->length () == 4
133 || type->length () == 8);
134
135 default:
136 return 0;
137 }
138 }
139
140 /* Return nonzero if an argument of type TYPE should be passed
141 via one of the XMM registers. */
142
143 static int
144 amd64_windows_passed_by_xmm_register (struct type *type)
145 {
146 return ((type->code () == TYPE_CODE_FLT
147 || type->code () == TYPE_CODE_DECFLOAT)
148 && (type->length () == 4 || type->length () == 8));
149 }
150
151 /* Return non-zero iff an argument of the given TYPE should be passed
152 by pointer. */
153
154 static int
155 amd64_windows_passed_by_pointer (struct type *type)
156 {
157 if (amd64_windows_passed_by_integer_register (type))
158 return 0;
159
160 if (amd64_windows_passed_by_xmm_register (type))
161 return 0;
162
163 return 1;
164 }
165
166 /* For each argument that should be passed by pointer, reserve some
167 stack space, store a copy of the argument on the stack, and replace
168 the argument by its address. Return the new Stack Pointer value.
169
170 NARGS is the number of arguments. ARGS is the array containing
171 the value of each argument. SP is value of the Stack Pointer. */
172
173 static CORE_ADDR
174 amd64_windows_adjust_args_passed_by_pointer (struct value **args,
175 int nargs, CORE_ADDR sp)
176 {
177 int i;
178
179 for (i = 0; i < nargs; i++)
180 if (amd64_windows_passed_by_pointer (args[i]->type ()))
181 {
182 struct type *type = args[i]->type ();
183 const gdb_byte *valbuf = args[i]->contents ().data ();
184 const int len = type->length ();
185
186 /* Store a copy of that argument on the stack, aligned to
187 a 16 bytes boundary, and then use the copy's address as
188 the argument. */
189
190 sp -= len;
191 sp &= ~0xf;
192 write_memory (sp, valbuf, len);
193
194 args[i]
195 = value_addr (value_from_contents_and_address (type, valbuf, sp));
196 }
197
198 return sp;
199 }
200
201 /* Store the value of ARG in register REGNO (right-justified).
202 REGCACHE is the register cache. */
203
204 static void
205 amd64_windows_store_arg_in_reg (struct regcache *regcache,
206 struct value *arg, int regno)
207 {
208 struct type *type = arg->type ();
209 const gdb_byte *valbuf = arg->contents ().data ();
210 gdb_byte buf[8];
211
212 gdb_assert (type->length () <= 8);
213 memset (buf, 0, sizeof buf);
214 memcpy (buf, valbuf, std::min (type->length (), (ULONGEST) 8));
215 regcache->cooked_write (regno, buf);
216 }
217
218 /* Push the arguments for an inferior function call, and return
219 the updated value of the SP (Stack Pointer).
220
221 All arguments are identical to the arguments used in
222 amd64_windows_push_dummy_call. */
223
224 static CORE_ADDR
225 amd64_windows_push_arguments (struct regcache *regcache, int nargs,
226 struct value **args, CORE_ADDR sp,
227 function_call_return_method return_method)
228 {
229 int reg_idx = 0;
230 int i;
231 struct value **stack_args = XALLOCAVEC (struct value *, nargs);
232 int num_stack_args = 0;
233 int num_elements = 0;
234 int element = 0;
235
236 /* First, handle the arguments passed by pointer.
237
238 These arguments are replaced by pointers to a copy we are making
239 in inferior memory. So use a copy of the ARGS table, to avoid
240 modifying the original one. */
241 {
242 struct value **args1 = XALLOCAVEC (struct value *, nargs);
243
244 memcpy (args1, args, nargs * sizeof (struct value *));
245 sp = amd64_windows_adjust_args_passed_by_pointer (args1, nargs, sp);
246 args = args1;
247 }
248
249 /* Reserve a register for the "hidden" argument. */
250 if (return_method == return_method_struct)
251 reg_idx++;
252
253 for (i = 0; i < nargs; i++)
254 {
255 struct type *type = args[i]->type ();
256 int len = type->length ();
257 int on_stack_p = 1;
258
259 if (reg_idx < ARRAY_SIZE (amd64_windows_dummy_call_integer_regs))
260 {
261 if (amd64_windows_passed_by_integer_register (type))
262 {
263 amd64_windows_store_arg_in_reg
264 (regcache, args[i],
265 amd64_windows_dummy_call_integer_regs[reg_idx]);
266 on_stack_p = 0;
267 reg_idx++;
268 }
269 else if (amd64_windows_passed_by_xmm_register (type))
270 {
271 amd64_windows_store_arg_in_reg
272 (regcache, args[i], AMD64_XMM0_REGNUM + reg_idx);
273 /* In case of varargs, these parameters must also be
274 passed via the integer registers. */
275 amd64_windows_store_arg_in_reg
276 (regcache, args[i],
277 amd64_windows_dummy_call_integer_regs[reg_idx]);
278 on_stack_p = 0;
279 reg_idx++;
280 }
281 }
282
283 if (on_stack_p)
284 {
285 num_elements += ((len + 7) / 8);
286 stack_args[num_stack_args++] = args[i];
287 }
288 }
289
290 /* Allocate space for the arguments on the stack, keeping it
291 aligned on a 16 byte boundary. */
292 sp -= num_elements * 8;
293 sp &= ~0xf;
294
295 /* Write out the arguments to the stack. */
296 for (i = 0; i < num_stack_args; i++)
297 {
298 struct type *type = stack_args[i]->type ();
299 const gdb_byte *valbuf = stack_args[i]->contents ().data ();
300
301 write_memory (sp + element * 8, valbuf, type->length ());
302 element += ((type->length () + 7) / 8);
303 }
304
305 return sp;
306 }
307
308 /* Implement the "push_dummy_call" gdbarch method. */
309
310 static CORE_ADDR
311 amd64_windows_push_dummy_call
312 (struct gdbarch *gdbarch, struct value *function,
313 struct regcache *regcache, CORE_ADDR bp_addr,
314 int nargs, struct value **args, CORE_ADDR sp,
315 function_call_return_method return_method, CORE_ADDR struct_addr)
316 {
317 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
318 gdb_byte buf[8];
319
320 /* Pass arguments. */
321 sp = amd64_windows_push_arguments (regcache, nargs, args, sp,
322 return_method);
323
324 /* Pass "hidden" argument". */
325 if (return_method == return_method_struct)
326 {
327 /* The "hidden" argument is passed throught the first argument
328 register. */
329 const int arg_regnum = amd64_windows_dummy_call_integer_regs[0];
330
331 store_unsigned_integer (buf, 8, byte_order, struct_addr);
332 regcache->cooked_write (arg_regnum, buf);
333 }
334
335 /* Reserve some memory on the stack for the integer-parameter
336 registers, as required by the ABI. */
337 sp -= ARRAY_SIZE (amd64_windows_dummy_call_integer_regs) * 8;
338
339 /* Store return address. */
340 sp -= 8;
341 store_unsigned_integer (buf, 8, byte_order, bp_addr);
342 write_memory (sp, buf, 8);
343
344 /* Update the stack pointer... */
345 store_unsigned_integer (buf, 8, byte_order, sp);
346 regcache->cooked_write (AMD64_RSP_REGNUM, buf);
347
348 /* ...and fake a frame pointer. */
349 regcache->cooked_write (AMD64_RBP_REGNUM, buf);
350
351 return sp + 16;
352 }
353
354 /* Implement the "return_value" gdbarch method for amd64-windows. */
355
356 static enum return_value_convention
357 amd64_windows_return_value (struct gdbarch *gdbarch, struct value *function,
358 struct type *type, struct regcache *regcache,
359 struct value **read_value, const gdb_byte *writebuf)
360 {
361 int len = type->length ();
362 int regnum = -1;
363
364 /* See if our value is returned through a register. If it is, then
365 store the associated register number in REGNUM. */
366 switch (type->code ())
367 {
368 case TYPE_CODE_FLT:
369 /* floats, and doubles are returned via XMM0. */
370 if (len == 4 || len == 8)
371 regnum = AMD64_XMM0_REGNUM;
372 break;
373 case TYPE_CODE_ARRAY:
374 /* __m128, __m128i and __m128d are returned via XMM0. */
375 if (type->is_vector () && len == 16)
376 {
377 enum type_code code = type->target_type ()->code ();
378 if (code == TYPE_CODE_INT || code == TYPE_CODE_FLT)
379 {
380 regnum = AMD64_XMM0_REGNUM;
381 break;
382 }
383 }
384 [[fallthrough]];
385 default:
386 /* All other values that are 1, 2, 4 or 8 bytes long are returned
387 via RAX. */
388 if (len == 1 || len == 2 || len == 4 || len == 8)
389 regnum = AMD64_RAX_REGNUM;
390 else if (len == 16 && type->code () == TYPE_CODE_INT)
391 regnum = AMD64_XMM0_REGNUM;
392 break;
393 }
394
395 if (regnum < 0)
396 {
397 /* RAX contains the address where the return value has been stored. */
398 if (read_value != nullptr)
399 {
400 ULONGEST addr;
401
402 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
403 *read_value = value_at_non_lval (type, addr);
404 }
405 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
406 }
407 else
408 {
409 /* Extract the return value from the register where it was stored. */
410 if (read_value != nullptr)
411 {
412 *read_value = value::allocate (type);
413 regcache->raw_read_part (regnum, 0, len,
414 (*read_value)->contents_raw ().data ());
415 }
416 if (writebuf)
417 regcache->raw_write_part (regnum, 0, len, writebuf);
418 return RETURN_VALUE_REGISTER_CONVENTION;
419 }
420 }
421
422 /* Check that the code pointed to by PC corresponds to a call to
423 __main, skip it if so. Return PC otherwise. */
424
425 static CORE_ADDR
426 amd64_skip_main_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
427 {
428 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
429 gdb_byte op;
430
431 target_read_memory (pc, &op, 1);
432 if (op == 0xe8)
433 {
434 gdb_byte buf[4];
435
436 if (target_read_memory (pc + 1, buf, sizeof buf) == 0)
437 {
438 struct bound_minimal_symbol s;
439 CORE_ADDR call_dest;
440
441 call_dest = pc + 5 + extract_signed_integer (buf, 4, byte_order);
442 s = lookup_minimal_symbol_by_pc (call_dest);
443 if (s.minsym != NULL
444 && s.minsym->linkage_name () != NULL
445 && strcmp (s.minsym->linkage_name (), "__main") == 0)
446 pc += 5;
447 }
448 }
449
450 return pc;
451 }
452
453 struct amd64_windows_frame_cache
454 {
455 /* ImageBase for the module. */
456 CORE_ADDR image_base;
457
458 /* Function start and end rva. */
459 CORE_ADDR start_rva;
460 CORE_ADDR end_rva;
461
462 /* Next instruction to be executed. */
463 CORE_ADDR pc;
464
465 /* Current sp. */
466 CORE_ADDR sp;
467
468 /* Address of saved integer and xmm registers. */
469 CORE_ADDR prev_reg_addr[16];
470 CORE_ADDR prev_xmm_addr[16];
471
472 /* These two next fields are set only for machine info frames. */
473
474 /* Likewise for RIP. */
475 CORE_ADDR prev_rip_addr;
476
477 /* Likewise for RSP. */
478 CORE_ADDR prev_rsp_addr;
479
480 /* Address of the previous frame. */
481 CORE_ADDR prev_sp;
482 };
483
484 /* Convert a Windows register number to gdb. */
485 static const enum amd64_regnum amd64_windows_w2gdb_regnum[] =
486 {
487 AMD64_RAX_REGNUM,
488 AMD64_RCX_REGNUM,
489 AMD64_RDX_REGNUM,
490 AMD64_RBX_REGNUM,
491 AMD64_RSP_REGNUM,
492 AMD64_RBP_REGNUM,
493 AMD64_RSI_REGNUM,
494 AMD64_RDI_REGNUM,
495 AMD64_R8_REGNUM,
496 AMD64_R9_REGNUM,
497 AMD64_R10_REGNUM,
498 AMD64_R11_REGNUM,
499 AMD64_R12_REGNUM,
500 AMD64_R13_REGNUM,
501 AMD64_R14_REGNUM,
502 AMD64_R15_REGNUM
503 };
504
505 /* Return TRUE iff PC is the range of the function corresponding to
506 CACHE. */
507
508 static int
509 pc_in_range (CORE_ADDR pc, const struct amd64_windows_frame_cache *cache)
510 {
511 return (pc >= cache->image_base + cache->start_rva
512 && pc < cache->image_base + cache->end_rva);
513 }
514
515 /* Try to recognize and decode an epilogue sequence.
516
517 Return -1 if we fail to read the instructions for any reason.
518 Return 1 if an epilogue sequence was recognized, 0 otherwise. */
519
520 static int
521 amd64_windows_frame_decode_epilogue (const frame_info_ptr &this_frame,
522 struct amd64_windows_frame_cache *cache)
523 {
524 /* According to MSDN an epilogue "must consist of either an add RSP,constant
525 or lea RSP,constant[FPReg], followed by a series of zero or more 8-byte
526 register pops and a return or a jmp".
527
528 Furthermore, according to RtlVirtualUnwind, the complete list of
529 epilog marker is:
530 - ret [c3]
531 - ret n [c2 imm16]
532 - rep ret [f3 c3]
533 - jmp imm8 | imm32 [eb rel8] or [e9 rel32]
534 - jmp qword ptr imm32 - not handled
535 - rex.w jmp reg [4X ff eY]
536 */
537
538 CORE_ADDR pc = cache->pc;
539 CORE_ADDR cur_sp = cache->sp;
540 struct gdbarch *gdbarch = get_frame_arch (this_frame);
541 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
542 gdb_byte op;
543 gdb_byte rex;
544
545 /* We don't care about the instruction deallocating the frame:
546 if it hasn't been executed, the pc is still in the body,
547 if it has been executed, the following epilog decoding will work. */
548
549 /* First decode:
550 - pop reg [41 58-5f] or [58-5f]. */
551
552 while (1)
553 {
554 /* Read opcode. */
555 if (target_read_memory (pc, &op, 1) != 0)
556 return -1;
557
558 if (op >= 0x40 && op <= 0x4f)
559 {
560 /* REX prefix. */
561 rex = op;
562
563 /* Read opcode. */
564 if (target_read_memory (pc + 1, &op, 1) != 0)
565 return -1;
566 }
567 else
568 rex = 0;
569
570 if (op >= 0x58 && op <= 0x5f)
571 {
572 /* pop reg */
573 gdb_byte reg = (op & 0x0f) | ((rex & 1) << 3);
574
575 cache->prev_reg_addr[amd64_windows_w2gdb_regnum[reg]] = cur_sp;
576 cur_sp += 8;
577 pc += rex ? 2 : 1;
578 }
579 else
580 break;
581
582 /* Allow the user to break this loop. This shouldn't happen as the
583 number of consecutive pop should be small. */
584 QUIT;
585 }
586
587 /* Then decode the marker. */
588
589 /* Read opcode. */
590 if (target_read_memory (pc, &op, 1) != 0)
591 return -1;
592
593 switch (op)
594 {
595 case 0xc3:
596 /* Ret. */
597 cache->prev_rip_addr = cur_sp;
598 cache->prev_sp = cur_sp + 8;
599 return 1;
600
601 case 0xeb:
602 {
603 /* jmp rel8 */
604 gdb_byte rel8;
605 CORE_ADDR npc;
606
607 if (target_read_memory (pc + 1, &rel8, 1) != 0)
608 return -1;
609 npc = pc + 2 + (signed char) rel8;
610
611 /* If the jump is within the function, then this is not a marker,
612 otherwise this is a tail-call. */
613 return !pc_in_range (npc, cache);
614 }
615
616 case 0xec:
617 {
618 /* jmp rel32 */
619 gdb_byte rel32[4];
620 CORE_ADDR npc;
621
622 if (target_read_memory (pc + 1, rel32, 4) != 0)
623 return -1;
624 npc = pc + 5 + extract_signed_integer (rel32, 4, byte_order);
625
626 /* If the jump is within the function, then this is not a marker,
627 otherwise this is a tail-call. */
628 return !pc_in_range (npc, cache);
629 }
630
631 case 0xc2:
632 {
633 /* ret n */
634 gdb_byte imm16[2];
635
636 if (target_read_memory (pc + 1, imm16, 2) != 0)
637 return -1;
638 cache->prev_rip_addr = cur_sp;
639 cache->prev_sp = cur_sp
640 + extract_unsigned_integer (imm16, 4, byte_order);
641 return 1;
642 }
643
644 case 0xf3:
645 {
646 /* rep; ret */
647 gdb_byte op1;
648
649 if (target_read_memory (pc + 2, &op1, 1) != 0)
650 return -1;
651 if (op1 != 0xc3)
652 return 0;
653
654 cache->prev_rip_addr = cur_sp;
655 cache->prev_sp = cur_sp + 8;
656 return 1;
657 }
658
659 case 0x40:
660 case 0x41:
661 case 0x42:
662 case 0x43:
663 case 0x44:
664 case 0x45:
665 case 0x46:
666 case 0x47:
667 case 0x48:
668 case 0x49:
669 case 0x4a:
670 case 0x4b:
671 case 0x4c:
672 case 0x4d:
673 case 0x4e:
674 case 0x4f:
675 /* Got a REX prefix, read next byte. */
676 rex = op;
677 if (target_read_memory (pc + 1, &op, 1) != 0)
678 return -1;
679
680 if (op == 0xff)
681 {
682 /* rex jmp reg */
683 gdb_byte op1;
684
685 if (target_read_memory (pc + 2, &op1, 1) != 0)
686 return -1;
687 return (op1 & 0xf8) == 0xe0;
688 }
689 else
690 return 0;
691
692 default:
693 /* Not REX, so unknown. */
694 return 0;
695 }
696 }
697
698 /* Decode and execute unwind insns at UNWIND_INFO. */
699
700 static void
701 amd64_windows_frame_decode_insns (const frame_info_ptr &this_frame,
702 struct amd64_windows_frame_cache *cache,
703 CORE_ADDR unwind_info)
704 {
705 CORE_ADDR save_addr = 0;
706 CORE_ADDR cur_sp = cache->sp;
707 struct gdbarch *gdbarch = get_frame_arch (this_frame);
708 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
709 int first = 1;
710
711 /* There are at least 3 possibilities to share an unwind info entry:
712 1. Two different runtime_function entries (in .pdata) can point to the
713 same unwind info entry. There is no such indication while unwinding,
714 so we don't really care about that case. We suppose this scheme is
715 used to save memory when the unwind entries are exactly the same.
716 2. Chained unwind_info entries, with no unwind codes (no prologue).
717 There is a major difference with the previous case: the pc range for
718 the function is different (in case 1, the pc range comes from the
719 runtime_function entry; in case 2, the pc range for the chained entry
720 comes from the first unwind entry). Case 1 cannot be used instead as
721 the pc is not in the prologue. This case is officially documented.
722 (There might be unwind code in the first unwind entry to handle
723 additional unwinding). GCC (at least until gcc 5.0) doesn't chain
724 entries.
725 3. Undocumented unwind info redirection. Hard to know the exact purpose,
726 so it is considered as a memory optimization of case 2.
727 */
728
729 if (unwind_info & 1)
730 {
731 /* Unofficially documented unwind info redirection, when UNWIND_INFO
732 address is odd (http://www.codemachine.com/article_x64deepdive.html).
733 */
734 struct external_pex64_runtime_function d;
735
736 if (target_read_memory (cache->image_base + (unwind_info & ~1),
737 (gdb_byte *) &d, sizeof (d)) != 0)
738 return;
739
740 cache->start_rva
741 = extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
742 cache->end_rva
743 = extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
744 unwind_info
745 = extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);
746 }
747
748 while (1)
749 {
750 struct external_pex64_unwind_info ex_ui;
751 /* There are at most 256 16-bit unwind insns. */
752 gdb_byte insns[2 * 256];
753 gdb_byte *p;
754 gdb_byte *end_insns;
755 unsigned char codes_count;
756 unsigned char frame_reg;
757 CORE_ADDR start;
758
759 /* Read and decode header. */
760 if (target_read_memory (cache->image_base + unwind_info,
761 (gdb_byte *) &ex_ui, sizeof (ex_ui)) != 0)
762 return;
763
764 frame_debug_printf ("%s: ver: %02x, plgsz: %02x, cnt: %02x, frame: %02x",
765 paddress (gdbarch, unwind_info),
766 ex_ui.Version_Flags, ex_ui.SizeOfPrologue,
767 ex_ui.CountOfCodes, ex_ui.FrameRegisterOffset);
768
769 /* Check version. */
770 if (PEX64_UWI_VERSION (ex_ui.Version_Flags) != 1
771 && PEX64_UWI_VERSION (ex_ui.Version_Flags) != 2)
772 return;
773
774 start = cache->image_base + cache->start_rva;
775 if (first
776 && !(cache->pc >= start && cache->pc < start + ex_ui.SizeOfPrologue))
777 {
778 /* We want to detect if the PC points to an epilogue. This needs
779 to be checked only once, and an epilogue can be anywhere but in
780 the prologue. If so, the epilogue detection+decoding function is
781 sufficient. Otherwise, the unwinder will consider that the PC
782 is in the body of the function and will need to decode unwind
783 info. */
784 if (amd64_windows_frame_decode_epilogue (this_frame, cache) == 1)
785 return;
786
787 /* Not in an epilog. Clear possible side effects. */
788 memset (cache->prev_reg_addr, 0, sizeof (cache->prev_reg_addr));
789 }
790
791 codes_count = ex_ui.CountOfCodes;
792 frame_reg = PEX64_UWI_FRAMEREG (ex_ui.FrameRegisterOffset);
793
794 if (frame_reg != 0)
795 {
796 /* According to msdn:
797 If an FP reg is used, then any unwind code taking an offset must
798 only be used after the FP reg is established in the prolog. */
799 gdb_byte buf[8];
800 int frreg = amd64_windows_w2gdb_regnum[frame_reg];
801
802 get_frame_register (this_frame, frreg, buf);
803 save_addr = extract_unsigned_integer (buf, 8, byte_order);
804
805 frame_debug_printf (" frame_reg=%s, val=%s",
806 gdbarch_register_name (gdbarch, frreg),
807 paddress (gdbarch, save_addr));
808 }
809
810 /* Read opcodes. */
811 if (codes_count != 0
812 && target_read_memory (cache->image_base + unwind_info
813 + sizeof (ex_ui),
814 insns, codes_count * 2) != 0)
815 return;
816
817 end_insns = &insns[codes_count * 2];
818 p = insns;
819
820 /* Skip opcodes 6 of version 2. This opcode is not documented. */
821 if (PEX64_UWI_VERSION (ex_ui.Version_Flags) == 2)
822 {
823 for (; p < end_insns; p += 2)
824 if (PEX64_UNWCODE_CODE (p[1]) != 6)
825 break;
826 }
827
828 for (; p < end_insns; p += 2)
829 {
830 int reg;
831
832 /* Virtually execute the operation if the pc is after the
833 corresponding instruction (that does matter in case of break
834 within the prologue). Note that for chained info (!first), the
835 prologue has been fully executed. */
836 if (cache->pc >= start + p[0] || cache->pc < start)
837 {
838 frame_debug_printf (" op #%u: off=0x%02x, insn=0x%02x",
839 (unsigned) (p - insns), p[0], p[1]);
840
841 /* If there is no frame registers defined, the current value of
842 rsp is used instead. */
843 if (frame_reg == 0)
844 save_addr = cur_sp;
845
846 reg = -1;
847
848 switch (PEX64_UNWCODE_CODE (p[1]))
849 {
850 case UWOP_PUSH_NONVOL:
851 /* Push pre-decrements RSP. */
852 reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
853 cache->prev_reg_addr[reg] = cur_sp;
854 cur_sp += 8;
855 break;
856 case UWOP_ALLOC_LARGE:
857 if (PEX64_UNWCODE_INFO (p[1]) == 0)
858 cur_sp +=
859 8 * extract_unsigned_integer (p + 2, 2, byte_order);
860 else if (PEX64_UNWCODE_INFO (p[1]) == 1)
861 cur_sp += extract_unsigned_integer (p + 2, 4, byte_order);
862 else
863 return;
864 break;
865 case UWOP_ALLOC_SMALL:
866 cur_sp += 8 + 8 * PEX64_UNWCODE_INFO (p[1]);
867 break;
868 case UWOP_SET_FPREG:
869 cur_sp = save_addr
870 - PEX64_UWI_FRAMEOFF (ex_ui.FrameRegisterOffset) * 16;
871 break;
872 case UWOP_SAVE_NONVOL:
873 reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
874 cache->prev_reg_addr[reg] = save_addr
875 + 8 * extract_unsigned_integer (p + 2, 2, byte_order);
876 break;
877 case UWOP_SAVE_NONVOL_FAR:
878 reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
879 cache->prev_reg_addr[reg] = save_addr
880 + 8 * extract_unsigned_integer (p + 2, 4, byte_order);
881 break;
882 case UWOP_SAVE_XMM128:
883 cache->prev_xmm_addr[PEX64_UNWCODE_INFO (p[1])] =
884 save_addr
885 - 16 * extract_unsigned_integer (p + 2, 2, byte_order);
886 break;
887 case UWOP_SAVE_XMM128_FAR:
888 cache->prev_xmm_addr[PEX64_UNWCODE_INFO (p[1])] =
889 save_addr
890 - 16 * extract_unsigned_integer (p + 2, 4, byte_order);
891 break;
892 case UWOP_PUSH_MACHFRAME:
893 if (PEX64_UNWCODE_INFO (p[1]) == 0)
894 {
895 cache->prev_rip_addr = cur_sp + 0;
896 cache->prev_rsp_addr = cur_sp + 24;
897 cur_sp += 40;
898 }
899 else if (PEX64_UNWCODE_INFO (p[1]) == 1)
900 {
901 cache->prev_rip_addr = cur_sp + 8;
902 cache->prev_rsp_addr = cur_sp + 32;
903 cur_sp += 48;
904 }
905 else
906 return;
907 break;
908 default:
909 return;
910 }
911
912 /* Display address where the register was saved. */
913 if (reg >= 0)
914 frame_debug_printf (" [reg %s at %s]",
915 gdbarch_register_name (gdbarch, reg),
916 paddress (gdbarch,
917 cache->prev_reg_addr[reg]));
918 }
919
920 /* Adjust with the length of the opcode. */
921 switch (PEX64_UNWCODE_CODE (p[1]))
922 {
923 case UWOP_PUSH_NONVOL:
924 case UWOP_ALLOC_SMALL:
925 case UWOP_SET_FPREG:
926 case UWOP_PUSH_MACHFRAME:
927 break;
928 case UWOP_ALLOC_LARGE:
929 if (PEX64_UNWCODE_INFO (p[1]) == 0)
930 p += 2;
931 else if (PEX64_UNWCODE_INFO (p[1]) == 1)
932 p += 4;
933 else
934 return;
935 break;
936 case UWOP_SAVE_NONVOL:
937 case UWOP_SAVE_XMM128:
938 p += 2;
939 break;
940 case UWOP_SAVE_NONVOL_FAR:
941 case UWOP_SAVE_XMM128_FAR:
942 p += 4;
943 break;
944 default:
945 return;
946 }
947 }
948 if (PEX64_UWI_FLAGS (ex_ui.Version_Flags) != UNW_FLAG_CHAININFO)
949 {
950 /* End of unwind info. */
951 break;
952 }
953 else
954 {
955 /* Read the chained unwind info. */
956 struct external_pex64_runtime_function d;
957 CORE_ADDR chain_vma;
958
959 /* Not anymore the first entry. */
960 first = 0;
961
962 /* Stay aligned on word boundary. */
963 chain_vma = cache->image_base + unwind_info
964 + sizeof (ex_ui) + ((codes_count + 1) & ~1) * 2;
965
966 if (target_read_memory (chain_vma, (gdb_byte *) &d, sizeof (d)) != 0)
967 return;
968
969 /* Decode begin/end. This may be different from .pdata index, as
970 an unwind info may be shared by several functions (in particular
971 if many functions have the same prolog and handler. */
972 cache->start_rva =
973 extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
974 cache->end_rva =
975 extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
976 unwind_info =
977 extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);
978
979 frame_debug_printf ("next in chain: unwind_data=%s, start_rva=%s, "
980 "end_rva=%s",
981 paddress (gdbarch, unwind_info),
982 paddress (gdbarch, cache->start_rva),
983 paddress (gdbarch, cache->end_rva));
984 }
985
986 /* Allow the user to break this loop. */
987 QUIT;
988 }
989 /* PC is saved by the call. */
990 if (cache->prev_rip_addr == 0)
991 cache->prev_rip_addr = cur_sp;
992 cache->prev_sp = cur_sp + 8;
993
994 frame_debug_printf (" prev_sp: %s, prev_pc @%s",
995 paddress (gdbarch, cache->prev_sp),
996 paddress (gdbarch, cache->prev_rip_addr));
997 }
998
999 /* Find SEH unwind info for PC, returning 0 on success.
1000
1001 UNWIND_INFO is set to the rva of unwind info address, IMAGE_BASE
1002 to the base address of the corresponding image, and START_RVA
1003 to the rva of the function containing PC. */
1004
1005 static int
1006 amd64_windows_find_unwind_info (struct gdbarch *gdbarch, CORE_ADDR pc,
1007 CORE_ADDR *unwind_info,
1008 CORE_ADDR *image_base,
1009 CORE_ADDR *start_rva,
1010 CORE_ADDR *end_rva)
1011 {
1012 struct obj_section *sec;
1013 pe_data_type *pe;
1014 IMAGE_DATA_DIRECTORY *dir;
1015 struct objfile *objfile;
1016 unsigned long lo, hi;
1017 CORE_ADDR base;
1018 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1019
1020 /* Get the corresponding exception directory. */
1021 sec = find_pc_section (pc);
1022 if (sec == NULL)
1023 return -1;
1024 objfile = sec->objfile;
1025 pe = pe_data (sec->objfile->obfd);
1026 dir = &pe->pe_opthdr.DataDirectory[PE_EXCEPTION_TABLE];
1027
1028 base = pe->pe_opthdr.ImageBase + objfile->text_section_offset ();
1029 *image_base = base;
1030
1031 /* Find the entry.
1032
1033 Note: This does not handle dynamically added entries (for JIT
1034 engines). For this, we would need to ask the kernel directly,
1035 which means getting some info from the native layer. For the
1036 rest of the code, however, it's probably faster to search
1037 the entry ourselves. */
1038 lo = 0;
1039 hi = dir->Size / sizeof (struct external_pex64_runtime_function);
1040 *unwind_info = 0;
1041 while (lo <= hi)
1042 {
1043 unsigned long mid = lo + (hi - lo) / 2;
1044 struct external_pex64_runtime_function d;
1045 CORE_ADDR sa, ea;
1046
1047 if (target_read_memory (base + dir->VirtualAddress + mid * sizeof (d),
1048 (gdb_byte *) &d, sizeof (d)) != 0)
1049 return -1;
1050
1051 sa = extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
1052 ea = extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
1053 if (pc < base + sa)
1054 hi = mid - 1;
1055 else if (pc >= base + ea)
1056 lo = mid + 1;
1057 else if (pc >= base + sa && pc < base + ea)
1058 {
1059 /* Got it. */
1060 *start_rva = sa;
1061 *end_rva = ea;
1062 *unwind_info =
1063 extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);
1064 break;
1065 }
1066 else
1067 break;
1068 }
1069
1070 frame_debug_printf ("image_base=%s, unwind_data=%s",
1071 paddress (gdbarch, base),
1072 paddress (gdbarch, *unwind_info));
1073
1074 return 0;
1075 }
1076
1077 /* Fill THIS_CACHE using the native amd64-windows unwinding data
1078 for THIS_FRAME. */
1079
1080 static struct amd64_windows_frame_cache *
1081 amd64_windows_frame_cache (const frame_info_ptr &this_frame, void **this_cache)
1082 {
1083 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1084 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1085 struct amd64_windows_frame_cache *cache;
1086 gdb_byte buf[8];
1087 CORE_ADDR pc;
1088 CORE_ADDR unwind_info = 0;
1089
1090 if (*this_cache)
1091 return (struct amd64_windows_frame_cache *) *this_cache;
1092
1093 cache = FRAME_OBSTACK_ZALLOC (struct amd64_windows_frame_cache);
1094 *this_cache = cache;
1095
1096 /* Get current PC and SP. */
1097 pc = get_frame_pc (this_frame);
1098 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1099 cache->sp = extract_unsigned_integer (buf, 8, byte_order);
1100 cache->pc = pc;
1101
1102 /* If we can't find the unwind info, keep trying as though this is a
1103 leaf function. This situation can happen when PC==0, see
1104 https://sourceware.org/bugzilla/show_bug.cgi?id=30255. */
1105 if (amd64_windows_find_unwind_info (gdbarch, pc, &unwind_info,
1106 &cache->image_base,
1107 &cache->start_rva,
1108 &cache->end_rva)
1109 || unwind_info == 0)
1110 {
1111 /* Assume a leaf function. */
1112 cache->prev_sp = cache->sp + 8;
1113 cache->prev_rip_addr = cache->sp;
1114 }
1115 else
1116 {
1117 /* Decode unwind insns to compute saved addresses. */
1118 amd64_windows_frame_decode_insns (this_frame, cache, unwind_info);
1119 }
1120 return cache;
1121 }
1122
1123 /* Implement the "prev_register" method of struct frame_unwind
1124 using the standard Windows x64 SEH info. */
1125
1126 static struct value *
1127 amd64_windows_frame_prev_register (const frame_info_ptr &this_frame,
1128 void **this_cache, int regnum)
1129 {
1130 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1131 struct amd64_windows_frame_cache *cache =
1132 amd64_windows_frame_cache (this_frame, this_cache);
1133 CORE_ADDR prev;
1134
1135 frame_debug_printf ("%s for sp=%s",
1136 gdbarch_register_name (gdbarch, regnum),
1137 paddress (gdbarch, cache->prev_sp));
1138
1139 if (regnum >= AMD64_XMM0_REGNUM && regnum <= AMD64_XMM0_REGNUM + 15)
1140 prev = cache->prev_xmm_addr[regnum - AMD64_XMM0_REGNUM];
1141 else if (regnum == AMD64_RSP_REGNUM)
1142 {
1143 prev = cache->prev_rsp_addr;
1144 if (prev == 0)
1145 return frame_unwind_got_constant (this_frame, regnum, cache->prev_sp);
1146 }
1147 else if (regnum >= AMD64_RAX_REGNUM && regnum <= AMD64_R15_REGNUM)
1148 prev = cache->prev_reg_addr[regnum - AMD64_RAX_REGNUM];
1149 else if (regnum == AMD64_RIP_REGNUM)
1150 prev = cache->prev_rip_addr;
1151 else
1152 prev = 0;
1153
1154 if (prev != 0)
1155 frame_debug_printf (" -> at %s", paddress (gdbarch, prev));
1156
1157 if (prev)
1158 {
1159 /* Register was saved. */
1160 return frame_unwind_got_memory (this_frame, regnum, prev);
1161 }
1162 else
1163 {
1164 /* Register is either volatile or not modified. */
1165 return frame_unwind_got_register (this_frame, regnum, regnum);
1166 }
1167 }
1168
1169 /* Implement the "this_id" method of struct frame_unwind using
1170 the standard Windows x64 SEH info. */
1171
1172 static void
1173 amd64_windows_frame_this_id (const frame_info_ptr &this_frame, void **this_cache,
1174 struct frame_id *this_id)
1175 {
1176 struct amd64_windows_frame_cache *cache =
1177 amd64_windows_frame_cache (this_frame, this_cache);
1178
1179 *this_id = frame_id_build (cache->prev_sp,
1180 cache->image_base + cache->start_rva);
1181 }
1182
1183 /* Windows x64 SEH unwinder. */
1184
1185 static const struct frame_unwind amd64_windows_frame_unwind =
1186 {
1187 "amd64 windows",
1188 NORMAL_FRAME,
1189 default_frame_unwind_stop_reason,
1190 &amd64_windows_frame_this_id,
1191 &amd64_windows_frame_prev_register,
1192 NULL,
1193 default_frame_sniffer
1194 };
1195
1196 /* Implement the "skip_prologue" gdbarch method. */
1197
1198 static CORE_ADDR
1199 amd64_windows_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1200 {
1201 CORE_ADDR func_addr;
1202 CORE_ADDR unwind_info = 0;
1203 CORE_ADDR image_base, start_rva, end_rva;
1204 struct external_pex64_unwind_info ex_ui;
1205
1206 /* Use prologue size from unwind info. */
1207 if (amd64_windows_find_unwind_info (gdbarch, pc, &unwind_info,
1208 &image_base, &start_rva, &end_rva) == 0)
1209 {
1210 if (unwind_info == 0)
1211 {
1212 /* Leaf function. */
1213 return pc;
1214 }
1215 else if (target_read_memory (image_base + unwind_info,
1216 (gdb_byte *) &ex_ui, sizeof (ex_ui)) == 0
1217 && PEX64_UWI_VERSION (ex_ui.Version_Flags) == 1)
1218 return std::max (pc, image_base + start_rva + ex_ui.SizeOfPrologue);
1219 }
1220
1221 /* See if we can determine the end of the prologue via the symbol
1222 table. If so, then return either the PC, or the PC after
1223 the prologue, whichever is greater. */
1224 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1225 {
1226 CORE_ADDR post_prologue_pc
1227 = skip_prologue_using_sal (gdbarch, func_addr);
1228
1229 if (post_prologue_pc != 0)
1230 return std::max (pc, post_prologue_pc);
1231 }
1232
1233 return pc;
1234 }
1235
1236 /* Check Win64 DLL jmp trampolines and find jump destination. */
1237
1238 static CORE_ADDR
1239 amd64_windows_skip_trampoline_code (const frame_info_ptr &frame, CORE_ADDR pc)
1240 {
1241 CORE_ADDR destination = 0;
1242 struct gdbarch *gdbarch = get_frame_arch (frame);
1243 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1244
1245 /* Check for jmp *<offset>(%rip) (jump near, absolute indirect (/4)). */
1246 if (pc && read_memory_unsigned_integer (pc, 2, byte_order) == 0x25ff)
1247 {
1248 /* Get opcode offset and see if we can find a reference in our data. */
1249 ULONGEST offset
1250 = read_memory_unsigned_integer (pc + 2, 4, byte_order);
1251
1252 /* Get address of function pointer at end of pc. */
1253 CORE_ADDR indirect_addr = pc + offset + 6;
1254
1255 struct minimal_symbol *indsym
1256 = (indirect_addr
1257 ? lookup_minimal_symbol_by_pc (indirect_addr).minsym
1258 : NULL);
1259 const char *symname = indsym ? indsym->linkage_name () : NULL;
1260
1261 if (symname)
1262 {
1263 if (startswith (symname, "__imp_")
1264 || startswith (symname, "_imp_"))
1265 destination
1266 = read_memory_unsigned_integer (indirect_addr, 8, byte_order);
1267 }
1268 }
1269
1270 return destination;
1271 }
1272
1273 /* Implement the "auto_wide_charset" gdbarch method. */
1274
1275 static const char *
1276 amd64_windows_auto_wide_charset (void)
1277 {
1278 return "UTF-16";
1279 }
1280
1281 /* Common parts for gdbarch initialization for Windows and Cygwin on AMD64. */
1282
1283 static void
1284 amd64_windows_init_abi_common (gdbarch_info info, struct gdbarch *gdbarch)
1285 {
1286 i386_gdbarch_tdep *tdep = gdbarch_tdep<i386_gdbarch_tdep> (gdbarch);
1287
1288 /* The dwarf2 unwinder (appended very early by i386_gdbarch_init) is
1289 preferred over the SEH one. The reasons are:
1290 - binaries without SEH but with dwarf2 debug info are correctly handled
1291 (although they aren't ABI compliant, gcc before 4.7 didn't emit SEH
1292 info).
1293 - dwarf3 DW_OP_call_frame_cfa is correctly handled (it can only be
1294 handled if the dwarf2 unwinder is used).
1295
1296 The call to amd64_init_abi appends default unwinders, that aren't
1297 compatible with the SEH one.
1298 */
1299 frame_unwind_append_unwinder (gdbarch, &amd64_windows_frame_unwind);
1300
1301 amd64_init_abi (info, gdbarch,
1302 amd64_target_description (X86_XSTATE_SSE_MASK, false));
1303
1304 /* Function calls. */
1305 set_gdbarch_push_dummy_call (gdbarch, amd64_windows_push_dummy_call);
1306 set_gdbarch_return_value_as_value (gdbarch, amd64_windows_return_value);
1307 set_gdbarch_skip_main_prologue (gdbarch, amd64_skip_main_prologue);
1308 set_gdbarch_skip_trampoline_code (gdbarch,
1309 amd64_windows_skip_trampoline_code);
1310
1311 set_gdbarch_skip_prologue (gdbarch, amd64_windows_skip_prologue);
1312
1313 tdep->gregset_reg_offset = amd64_windows_gregset_reg_offset;
1314 tdep->gregset_num_regs = ARRAY_SIZE (amd64_windows_gregset_reg_offset);
1315 tdep->sizeof_gregset = AMD64_WINDOWS_SIZEOF_GREGSET;
1316 tdep->sizeof_fpregset = 0;
1317
1318 /* Core file support. */
1319 set_gdbarch_core_xfer_shared_libraries
1320 (gdbarch, windows_core_xfer_shared_libraries);
1321 set_gdbarch_core_pid_to_str (gdbarch, windows_core_pid_to_str);
1322
1323 set_gdbarch_auto_wide_charset (gdbarch, amd64_windows_auto_wide_charset);
1324 }
1325
1326 /* gdbarch initialization for Windows on AMD64. */
1327
1328 static void
1329 amd64_windows_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
1330 {
1331 amd64_windows_init_abi_common (info, gdbarch);
1332 windows_init_abi (info, gdbarch);
1333
1334 /* On Windows, "long"s are only 32bit. */
1335 set_gdbarch_long_bit (gdbarch, 32);
1336 }
1337
1338 /* Sigwrapper unwinder instruction patterns for AMD64. */
1339
1340 static const gdb_byte amd64_sigbe_bytes[] = {
1341 0x49, 0xc7, 0xc3, 0xf8, 0xff, 0xff, 0xff, /* movq $-8,%r11 */
1342 0x4d, 0x0f, 0xc1, 0x9a, /* xaddq %r11,$tls::stackptr(%r10) */
1343 /* 4 bytes for tls::stackptr operand. */
1344 };
1345
1346 static const gdb_byte amd64_sigdelayed_bytes[] = {
1347 0x49, 0xc7, 0xc3, 0xf8, 0xff, 0xff, 0xff, /* movq $-8,%r11 */
1348 0x4d, 0x0f, 0xc1, 0x9c, 0x24, /* xaddq %r11,$tls::stackptr(%r12) */
1349 /* 4 bytes for tls::stackptr operand. */
1350 };
1351
1352 static const gdb::array_view<const gdb_byte> amd64_sig_patterns[] {
1353 { amd64_sigbe_bytes },
1354 { amd64_sigdelayed_bytes },
1355 };
1356
1357 /* The sigwrapper unwinder on AMD64. */
1358
1359 static const cygwin_sigwrapper_frame_unwind
1360 amd64_cygwin_sigwrapper_frame_unwind (amd64_sig_patterns);
1361
1362 /* gdbarch initialization for Cygwin on AMD64. */
1363
1364 static void
1365 amd64_cygwin_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
1366 {
1367 frame_unwind_append_unwinder (gdbarch, &amd64_cygwin_sigwrapper_frame_unwind);
1368
1369 amd64_windows_init_abi_common (info, gdbarch);
1370 cygwin_init_abi (info, gdbarch);
1371 }
1372
1373 static gdb_osabi
1374 amd64_windows_osabi_sniffer (bfd *abfd)
1375 {
1376 const char *target_name = bfd_get_target (abfd);
1377
1378 if (!streq (target_name, "pei-x86-64"))
1379 return GDB_OSABI_UNKNOWN;
1380
1381 if (is_linked_with_cygwin_dll (abfd))
1382 return GDB_OSABI_CYGWIN;
1383
1384 return GDB_OSABI_WINDOWS;
1385 }
1386
1387 static enum gdb_osabi
1388 amd64_cygwin_core_osabi_sniffer (bfd *abfd)
1389 {
1390 const char *target_name = bfd_get_target (abfd);
1391
1392 /* Cygwin uses elf core dumps. Do not claim all ELF executables,
1393 check whether there is a .reg section of proper size. */
1394 if (strcmp (target_name, "elf64-x86-64") == 0)
1395 {
1396 asection *section = bfd_get_section_by_name (abfd, ".reg");
1397 if (section != nullptr
1398 && bfd_section_size (section) == AMD64_WINDOWS_SIZEOF_GREGSET)
1399 return GDB_OSABI_CYGWIN;
1400 }
1401
1402 return GDB_OSABI_UNKNOWN;
1403 }
1404
1405 void _initialize_amd64_windows_tdep ();
1406 void
1407 _initialize_amd64_windows_tdep ()
1408 {
1409 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_WINDOWS,
1410 amd64_windows_init_abi);
1411 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_CYGWIN,
1412 amd64_cygwin_init_abi);
1413
1414 gdbarch_register_osabi_sniffer (bfd_arch_i386, bfd_target_coff_flavour,
1415 amd64_windows_osabi_sniffer);
1416
1417 /* Cygwin uses elf core dumps. */
1418 gdbarch_register_osabi_sniffer (bfd_arch_i386, bfd_target_elf_flavour,
1419 amd64_cygwin_core_osabi_sniffer);
1420
1421 }