]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/amd64-windows-tdep.c
Promote windows_core_xfer_shared_libraries and windows_core_pid_to_str
[thirdparty/binutils-gdb.git] / gdb / amd64-windows-tdep.c
1 /* Copyright (C) 2009-2020 Free Software Foundation, Inc.
2
3 This file is part of GDB.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>. */
17
18 #include "defs.h"
19 #include "osabi.h"
20 #include "amd64-tdep.h"
21 #include "gdbsupport/x86-xstate.h"
22 #include "gdbtypes.h"
23 #include "gdbcore.h"
24 #include "regcache.h"
25 #include "windows-tdep.h"
26 #include "frame.h"
27 #include "objfiles.h"
28 #include "frame-unwind.h"
29 #include "coff/internal.h"
30 #include "coff/i386.h"
31 #include "coff/pe.h"
32 #include "libcoff.h"
33 #include "value.h"
34 #include <algorithm>
35
36 /* The registers used to pass integer arguments during a function call. */
37 static int amd64_windows_dummy_call_integer_regs[] =
38 {
39 AMD64_RCX_REGNUM, /* %rcx */
40 AMD64_RDX_REGNUM, /* %rdx */
41 AMD64_R8_REGNUM, /* %r8 */
42 AMD64_R9_REGNUM /* %r9 */
43 };
44
45 /* This vector maps GDB's idea of a register's number into an offset into
46 the Windows API CONTEXT structure. */
47 static int amd64_windows_gregset_reg_offset[] =
48 {
49 120, /* Rax */
50 144, /* Rbx */
51 128, /* Rcx */
52 136, /* Rdx */
53 168, /* Rsi */
54 176, /* Rdi */
55 160, /* Rbp */
56 152, /* Rsp */
57 184, /* R8 */
58 192, /* R9 */
59 200, /* R10 */
60 208, /* R11 */
61 216, /* R12 */
62 224, /* R13 */
63 232, /* R14 */
64 240, /* R15 */
65 248, /* Rip */
66 68, /* EFlags */
67 56, /* SegCs */
68 66, /* SegSs */
69 58, /* SegDs */
70 60, /* SegEs */
71 62, /* SegFs */
72 64, /* SegGs */
73 288, /* FloatSave.FloatRegisters[0] */
74 304, /* FloatSave.FloatRegisters[1] */
75 320, /* FloatSave.FloatRegisters[2] */
76 336, /* FloatSave.FloatRegisters[3] */
77 352, /* FloatSave.FloatRegisters[4] */
78 368, /* FloatSave.FloatRegisters[5] */
79 384, /* FloatSave.FloatRegisters[6] */
80 400, /* FloatSave.FloatRegisters[7] */
81 256, /* FloatSave.ControlWord */
82 258, /* FloatSave.StatusWord */
83 260, /* FloatSave.TagWord */
84 268, /* FloatSave.ErrorSelector */
85 264, /* FloatSave.ErrorOffset */
86 276, /* FloatSave.DataSelector */
87 272, /* FloatSave.DataOffset */
88 268, /* FloatSave.ErrorSelector */
89 416, /* Xmm0 */
90 432, /* Xmm1 */
91 448, /* Xmm2 */
92 464, /* Xmm3 */
93 480, /* Xmm4 */
94 496, /* Xmm5 */
95 512, /* Xmm6 */
96 528, /* Xmm7 */
97 544, /* Xmm8 */
98 560, /* Xmm9 */
99 576, /* Xmm10 */
100 592, /* Xmm11 */
101 608, /* Xmm12 */
102 624, /* Xmm13 */
103 640, /* Xmm14 */
104 656, /* Xmm15 */
105 280, /* FloatSave.MxCsr */
106 };
107
108 #define AMD64_WINDOWS_SIZEOF_GREGSET 1232
109
110 /* Return nonzero if an argument of type TYPE should be passed
111 via one of the integer registers. */
112
113 static int
114 amd64_windows_passed_by_integer_register (struct type *type)
115 {
116 switch (type->code ())
117 {
118 case TYPE_CODE_INT:
119 case TYPE_CODE_ENUM:
120 case TYPE_CODE_BOOL:
121 case TYPE_CODE_RANGE:
122 case TYPE_CODE_CHAR:
123 case TYPE_CODE_PTR:
124 case TYPE_CODE_REF:
125 case TYPE_CODE_RVALUE_REF:
126 case TYPE_CODE_STRUCT:
127 case TYPE_CODE_UNION:
128 return (TYPE_LENGTH (type) == 1
129 || TYPE_LENGTH (type) == 2
130 || TYPE_LENGTH (type) == 4
131 || TYPE_LENGTH (type) == 8);
132
133 default:
134 return 0;
135 }
136 }
137
138 /* Return nonzero if an argument of type TYPE should be passed
139 via one of the XMM registers. */
140
141 static int
142 amd64_windows_passed_by_xmm_register (struct type *type)
143 {
144 return ((type->code () == TYPE_CODE_FLT
145 || type->code () == TYPE_CODE_DECFLOAT)
146 && (TYPE_LENGTH (type) == 4 || TYPE_LENGTH (type) == 8));
147 }
148
149 /* Return non-zero iff an argument of the given TYPE should be passed
150 by pointer. */
151
152 static int
153 amd64_windows_passed_by_pointer (struct type *type)
154 {
155 if (amd64_windows_passed_by_integer_register (type))
156 return 0;
157
158 if (amd64_windows_passed_by_xmm_register (type))
159 return 0;
160
161 return 1;
162 }
163
164 /* For each argument that should be passed by pointer, reserve some
165 stack space, store a copy of the argument on the stack, and replace
166 the argument by its address. Return the new Stack Pointer value.
167
168 NARGS is the number of arguments. ARGS is the array containing
169 the value of each argument. SP is value of the Stack Pointer. */
170
171 static CORE_ADDR
172 amd64_windows_adjust_args_passed_by_pointer (struct value **args,
173 int nargs, CORE_ADDR sp)
174 {
175 int i;
176
177 for (i = 0; i < nargs; i++)
178 if (amd64_windows_passed_by_pointer (value_type (args[i])))
179 {
180 struct type *type = value_type (args[i]);
181 const gdb_byte *valbuf = value_contents (args[i]);
182 const int len = TYPE_LENGTH (type);
183
184 /* Store a copy of that argument on the stack, aligned to
185 a 16 bytes boundary, and then use the copy's address as
186 the argument. */
187
188 sp -= len;
189 sp &= ~0xf;
190 write_memory (sp, valbuf, len);
191
192 args[i]
193 = value_addr (value_from_contents_and_address (type, valbuf, sp));
194 }
195
196 return sp;
197 }
198
199 /* Store the value of ARG in register REGNO (right-justified).
200 REGCACHE is the register cache. */
201
202 static void
203 amd64_windows_store_arg_in_reg (struct regcache *regcache,
204 struct value *arg, int regno)
205 {
206 struct type *type = value_type (arg);
207 const gdb_byte *valbuf = value_contents (arg);
208 gdb_byte buf[8];
209
210 gdb_assert (TYPE_LENGTH (type) <= 8);
211 memset (buf, 0, sizeof buf);
212 memcpy (buf, valbuf, std::min (TYPE_LENGTH (type), (ULONGEST) 8));
213 regcache->cooked_write (regno, buf);
214 }
215
216 /* Push the arguments for an inferior function call, and return
217 the updated value of the SP (Stack Pointer).
218
219 All arguments are identical to the arguments used in
220 amd64_windows_push_dummy_call. */
221
222 static CORE_ADDR
223 amd64_windows_push_arguments (struct regcache *regcache, int nargs,
224 struct value **args, CORE_ADDR sp,
225 function_call_return_method return_method)
226 {
227 int reg_idx = 0;
228 int i;
229 struct value **stack_args = XALLOCAVEC (struct value *, nargs);
230 int num_stack_args = 0;
231 int num_elements = 0;
232 int element = 0;
233
234 /* First, handle the arguments passed by pointer.
235
236 These arguments are replaced by pointers to a copy we are making
237 in inferior memory. So use a copy of the ARGS table, to avoid
238 modifying the original one. */
239 {
240 struct value **args1 = XALLOCAVEC (struct value *, nargs);
241
242 memcpy (args1, args, nargs * sizeof (struct value *));
243 sp = amd64_windows_adjust_args_passed_by_pointer (args1, nargs, sp);
244 args = args1;
245 }
246
247 /* Reserve a register for the "hidden" argument. */
248 if (return_method == return_method_struct)
249 reg_idx++;
250
251 for (i = 0; i < nargs; i++)
252 {
253 struct type *type = value_type (args[i]);
254 int len = TYPE_LENGTH (type);
255 int on_stack_p = 1;
256
257 if (reg_idx < ARRAY_SIZE (amd64_windows_dummy_call_integer_regs))
258 {
259 if (amd64_windows_passed_by_integer_register (type))
260 {
261 amd64_windows_store_arg_in_reg
262 (regcache, args[i],
263 amd64_windows_dummy_call_integer_regs[reg_idx]);
264 on_stack_p = 0;
265 reg_idx++;
266 }
267 else if (amd64_windows_passed_by_xmm_register (type))
268 {
269 amd64_windows_store_arg_in_reg
270 (regcache, args[i], AMD64_XMM0_REGNUM + reg_idx);
271 /* In case of varargs, these parameters must also be
272 passed via the integer registers. */
273 amd64_windows_store_arg_in_reg
274 (regcache, args[i],
275 amd64_windows_dummy_call_integer_regs[reg_idx]);
276 on_stack_p = 0;
277 reg_idx++;
278 }
279 }
280
281 if (on_stack_p)
282 {
283 num_elements += ((len + 7) / 8);
284 stack_args[num_stack_args++] = args[i];
285 }
286 }
287
288 /* Allocate space for the arguments on the stack, keeping it
289 aligned on a 16 byte boundary. */
290 sp -= num_elements * 8;
291 sp &= ~0xf;
292
293 /* Write out the arguments to the stack. */
294 for (i = 0; i < num_stack_args; i++)
295 {
296 struct type *type = value_type (stack_args[i]);
297 const gdb_byte *valbuf = value_contents (stack_args[i]);
298
299 write_memory (sp + element * 8, valbuf, TYPE_LENGTH (type));
300 element += ((TYPE_LENGTH (type) + 7) / 8);
301 }
302
303 return sp;
304 }
305
306 /* Implement the "push_dummy_call" gdbarch method. */
307
308 static CORE_ADDR
309 amd64_windows_push_dummy_call
310 (struct gdbarch *gdbarch, struct value *function,
311 struct regcache *regcache, CORE_ADDR bp_addr,
312 int nargs, struct value **args, CORE_ADDR sp,
313 function_call_return_method return_method, CORE_ADDR struct_addr)
314 {
315 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
316 gdb_byte buf[8];
317
318 /* Pass arguments. */
319 sp = amd64_windows_push_arguments (regcache, nargs, args, sp,
320 return_method);
321
322 /* Pass "hidden" argument". */
323 if (return_method == return_method_struct)
324 {
325 /* The "hidden" argument is passed throught the first argument
326 register. */
327 const int arg_regnum = amd64_windows_dummy_call_integer_regs[0];
328
329 store_unsigned_integer (buf, 8, byte_order, struct_addr);
330 regcache->cooked_write (arg_regnum, buf);
331 }
332
333 /* Reserve some memory on the stack for the integer-parameter
334 registers, as required by the ABI. */
335 sp -= ARRAY_SIZE (amd64_windows_dummy_call_integer_regs) * 8;
336
337 /* Store return address. */
338 sp -= 8;
339 store_unsigned_integer (buf, 8, byte_order, bp_addr);
340 write_memory (sp, buf, 8);
341
342 /* Update the stack pointer... */
343 store_unsigned_integer (buf, 8, byte_order, sp);
344 regcache->cooked_write (AMD64_RSP_REGNUM, buf);
345
346 /* ...and fake a frame pointer. */
347 regcache->cooked_write (AMD64_RBP_REGNUM, buf);
348
349 return sp + 16;
350 }
351
352 /* Implement the "return_value" gdbarch method for amd64-windows. */
353
354 static enum return_value_convention
355 amd64_windows_return_value (struct gdbarch *gdbarch, struct value *function,
356 struct type *type, struct regcache *regcache,
357 gdb_byte *readbuf, const gdb_byte *writebuf)
358 {
359 int len = TYPE_LENGTH (type);
360 int regnum = -1;
361
362 /* See if our value is returned through a register. If it is, then
363 store the associated register number in REGNUM. */
364 switch (type->code ())
365 {
366 case TYPE_CODE_FLT:
367 case TYPE_CODE_DECFLOAT:
368 /* __m128, __m128i, __m128d, floats, and doubles are returned
369 via XMM0. */
370 if (len == 4 || len == 8 || len == 16)
371 regnum = AMD64_XMM0_REGNUM;
372 break;
373 default:
374 /* All other values that are 1, 2, 4 or 8 bytes long are returned
375 via RAX. */
376 if (len == 1 || len == 2 || len == 4 || len == 8)
377 regnum = AMD64_RAX_REGNUM;
378 break;
379 }
380
381 if (regnum < 0)
382 {
383 /* RAX contains the address where the return value has been stored. */
384 if (readbuf)
385 {
386 ULONGEST addr;
387
388 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
389 read_memory (addr, readbuf, TYPE_LENGTH (type));
390 }
391 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
392 }
393 else
394 {
395 /* Extract the return value from the register where it was stored. */
396 if (readbuf)
397 regcache->raw_read_part (regnum, 0, len, readbuf);
398 if (writebuf)
399 regcache->raw_write_part (regnum, 0, len, writebuf);
400 return RETURN_VALUE_REGISTER_CONVENTION;
401 }
402 }
403
404 /* Check that the code pointed to by PC corresponds to a call to
405 __main, skip it if so. Return PC otherwise. */
406
407 static CORE_ADDR
408 amd64_skip_main_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
409 {
410 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
411 gdb_byte op;
412
413 target_read_memory (pc, &op, 1);
414 if (op == 0xe8)
415 {
416 gdb_byte buf[4];
417
418 if (target_read_memory (pc + 1, buf, sizeof buf) == 0)
419 {
420 struct bound_minimal_symbol s;
421 CORE_ADDR call_dest;
422
423 call_dest = pc + 5 + extract_signed_integer (buf, 4, byte_order);
424 s = lookup_minimal_symbol_by_pc (call_dest);
425 if (s.minsym != NULL
426 && s.minsym->linkage_name () != NULL
427 && strcmp (s.minsym->linkage_name (), "__main") == 0)
428 pc += 5;
429 }
430 }
431
432 return pc;
433 }
434
435 struct amd64_windows_frame_cache
436 {
437 /* ImageBase for the module. */
438 CORE_ADDR image_base;
439
440 /* Function start and end rva. */
441 CORE_ADDR start_rva;
442 CORE_ADDR end_rva;
443
444 /* Next instruction to be executed. */
445 CORE_ADDR pc;
446
447 /* Current sp. */
448 CORE_ADDR sp;
449
450 /* Address of saved integer and xmm registers. */
451 CORE_ADDR prev_reg_addr[16];
452 CORE_ADDR prev_xmm_addr[16];
453
454 /* These two next fields are set only for machine info frames. */
455
456 /* Likewise for RIP. */
457 CORE_ADDR prev_rip_addr;
458
459 /* Likewise for RSP. */
460 CORE_ADDR prev_rsp_addr;
461
462 /* Address of the previous frame. */
463 CORE_ADDR prev_sp;
464 };
465
466 /* Convert a Windows register number to gdb. */
467 static const enum amd64_regnum amd64_windows_w2gdb_regnum[] =
468 {
469 AMD64_RAX_REGNUM,
470 AMD64_RCX_REGNUM,
471 AMD64_RDX_REGNUM,
472 AMD64_RBX_REGNUM,
473 AMD64_RSP_REGNUM,
474 AMD64_RBP_REGNUM,
475 AMD64_RSI_REGNUM,
476 AMD64_RDI_REGNUM,
477 AMD64_R8_REGNUM,
478 AMD64_R9_REGNUM,
479 AMD64_R10_REGNUM,
480 AMD64_R11_REGNUM,
481 AMD64_R12_REGNUM,
482 AMD64_R13_REGNUM,
483 AMD64_R14_REGNUM,
484 AMD64_R15_REGNUM
485 };
486
487 /* Return TRUE iff PC is the range of the function corresponding to
488 CACHE. */
489
490 static int
491 pc_in_range (CORE_ADDR pc, const struct amd64_windows_frame_cache *cache)
492 {
493 return (pc >= cache->image_base + cache->start_rva
494 && pc < cache->image_base + cache->end_rva);
495 }
496
497 /* Try to recognize and decode an epilogue sequence.
498
499 Return -1 if we fail to read the instructions for any reason.
500 Return 1 if an epilogue sequence was recognized, 0 otherwise. */
501
502 static int
503 amd64_windows_frame_decode_epilogue (struct frame_info *this_frame,
504 struct amd64_windows_frame_cache *cache)
505 {
506 /* According to MSDN an epilogue "must consist of either an add RSP,constant
507 or lea RSP,constant[FPReg], followed by a series of zero or more 8-byte
508 register pops and a return or a jmp".
509
510 Furthermore, according to RtlVirtualUnwind, the complete list of
511 epilog marker is:
512 - ret [c3]
513 - ret n [c2 imm16]
514 - rep ret [f3 c3]
515 - jmp imm8 | imm32 [eb rel8] or [e9 rel32]
516 - jmp qword ptr imm32 - not handled
517 - rex.w jmp reg [4X ff eY]
518 */
519
520 CORE_ADDR pc = cache->pc;
521 CORE_ADDR cur_sp = cache->sp;
522 struct gdbarch *gdbarch = get_frame_arch (this_frame);
523 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
524 gdb_byte op;
525 gdb_byte rex;
526
527 /* We don't care about the instruction deallocating the frame:
528 if it hasn't been executed, the pc is still in the body,
529 if it has been executed, the following epilog decoding will work. */
530
531 /* First decode:
532 - pop reg [41 58-5f] or [58-5f]. */
533
534 while (1)
535 {
536 /* Read opcode. */
537 if (target_read_memory (pc, &op, 1) != 0)
538 return -1;
539
540 if (op >= 0x40 && op <= 0x4f)
541 {
542 /* REX prefix. */
543 rex = op;
544
545 /* Read opcode. */
546 if (target_read_memory (pc + 1, &op, 1) != 0)
547 return -1;
548 }
549 else
550 rex = 0;
551
552 if (op >= 0x58 && op <= 0x5f)
553 {
554 /* pop reg */
555 gdb_byte reg = (op & 0x0f) | ((rex & 1) << 3);
556
557 cache->prev_reg_addr[amd64_windows_w2gdb_regnum[reg]] = cur_sp;
558 cur_sp += 8;
559 pc += rex ? 2 : 1;
560 }
561 else
562 break;
563
564 /* Allow the user to break this loop. This shouldn't happen as the
565 number of consecutive pop should be small. */
566 QUIT;
567 }
568
569 /* Then decode the marker. */
570
571 /* Read opcode. */
572 if (target_read_memory (pc, &op, 1) != 0)
573 return -1;
574
575 switch (op)
576 {
577 case 0xc3:
578 /* Ret. */
579 cache->prev_rip_addr = cur_sp;
580 cache->prev_sp = cur_sp + 8;
581 return 1;
582
583 case 0xeb:
584 {
585 /* jmp rel8 */
586 gdb_byte rel8;
587 CORE_ADDR npc;
588
589 if (target_read_memory (pc + 1, &rel8, 1) != 0)
590 return -1;
591 npc = pc + 2 + (signed char) rel8;
592
593 /* If the jump is within the function, then this is not a marker,
594 otherwise this is a tail-call. */
595 return !pc_in_range (npc, cache);
596 }
597
598 case 0xec:
599 {
600 /* jmp rel32 */
601 gdb_byte rel32[4];
602 CORE_ADDR npc;
603
604 if (target_read_memory (pc + 1, rel32, 4) != 0)
605 return -1;
606 npc = pc + 5 + extract_signed_integer (rel32, 4, byte_order);
607
608 /* If the jump is within the function, then this is not a marker,
609 otherwise this is a tail-call. */
610 return !pc_in_range (npc, cache);
611 }
612
613 case 0xc2:
614 {
615 /* ret n */
616 gdb_byte imm16[2];
617
618 if (target_read_memory (pc + 1, imm16, 2) != 0)
619 return -1;
620 cache->prev_rip_addr = cur_sp;
621 cache->prev_sp = cur_sp
622 + extract_unsigned_integer (imm16, 4, byte_order);
623 return 1;
624 }
625
626 case 0xf3:
627 {
628 /* rep; ret */
629 gdb_byte op1;
630
631 if (target_read_memory (pc + 2, &op1, 1) != 0)
632 return -1;
633 if (op1 != 0xc3)
634 return 0;
635
636 cache->prev_rip_addr = cur_sp;
637 cache->prev_sp = cur_sp + 8;
638 return 1;
639 }
640
641 case 0x40:
642 case 0x41:
643 case 0x42:
644 case 0x43:
645 case 0x44:
646 case 0x45:
647 case 0x46:
648 case 0x47:
649 case 0x48:
650 case 0x49:
651 case 0x4a:
652 case 0x4b:
653 case 0x4c:
654 case 0x4d:
655 case 0x4e:
656 case 0x4f:
657 /* Got a REX prefix, read next byte. */
658 rex = op;
659 if (target_read_memory (pc + 1, &op, 1) != 0)
660 return -1;
661
662 if (op == 0xff)
663 {
664 /* rex jmp reg */
665 gdb_byte op1;
666
667 if (target_read_memory (pc + 2, &op1, 1) != 0)
668 return -1;
669 return (op1 & 0xf8) == 0xe0;
670 }
671 else
672 return 0;
673
674 default:
675 /* Not REX, so unknown. */
676 return 0;
677 }
678 }
679
680 /* Decode and execute unwind insns at UNWIND_INFO. */
681
682 static void
683 amd64_windows_frame_decode_insns (struct frame_info *this_frame,
684 struct amd64_windows_frame_cache *cache,
685 CORE_ADDR unwind_info)
686 {
687 CORE_ADDR save_addr = 0;
688 CORE_ADDR cur_sp = cache->sp;
689 struct gdbarch *gdbarch = get_frame_arch (this_frame);
690 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
691 int first = 1;
692
693 /* There are at least 3 possibilities to share an unwind info entry:
694 1. Two different runtime_function entries (in .pdata) can point to the
695 same unwind info entry. There is no such indication while unwinding,
696 so we don't really care about that case. We suppose this scheme is
697 used to save memory when the unwind entries are exactly the same.
698 2. Chained unwind_info entries, with no unwind codes (no prologue).
699 There is a major difference with the previous case: the pc range for
700 the function is different (in case 1, the pc range comes from the
701 runtime_function entry; in case 2, the pc range for the chained entry
702 comes from the first unwind entry). Case 1 cannot be used instead as
703 the pc is not in the prologue. This case is officially documented.
704 (There might be unwind code in the first unwind entry to handle
705 additional unwinding). GCC (at least until gcc 5.0) doesn't chain
706 entries.
707 3. Undocumented unwind info redirection. Hard to know the exact purpose,
708 so it is considered as a memory optimization of case 2.
709 */
710
711 if (unwind_info & 1)
712 {
713 /* Unofficially documented unwind info redirection, when UNWIND_INFO
714 address is odd (http://www.codemachine.com/article_x64deepdive.html).
715 */
716 struct external_pex64_runtime_function d;
717
718 if (target_read_memory (cache->image_base + (unwind_info & ~1),
719 (gdb_byte *) &d, sizeof (d)) != 0)
720 return;
721
722 cache->start_rva
723 = extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
724 cache->end_rva
725 = extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
726 unwind_info
727 = extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);
728 }
729
730 while (1)
731 {
732 struct external_pex64_unwind_info ex_ui;
733 /* There are at most 256 16-bit unwind insns. */
734 gdb_byte insns[2 * 256];
735 gdb_byte *p;
736 gdb_byte *end_insns;
737 unsigned char codes_count;
738 unsigned char frame_reg;
739 CORE_ADDR start;
740
741 /* Read and decode header. */
742 if (target_read_memory (cache->image_base + unwind_info,
743 (gdb_byte *) &ex_ui, sizeof (ex_ui)) != 0)
744 return;
745
746 if (frame_debug)
747 fprintf_unfiltered
748 (gdb_stdlog,
749 "amd64_windows_frame_decodes_insn: "
750 "%s: ver: %02x, plgsz: %02x, cnt: %02x, frame: %02x\n",
751 paddress (gdbarch, unwind_info),
752 ex_ui.Version_Flags, ex_ui.SizeOfPrologue,
753 ex_ui.CountOfCodes, ex_ui.FrameRegisterOffset);
754
755 /* Check version. */
756 if (PEX64_UWI_VERSION (ex_ui.Version_Flags) != 1
757 && PEX64_UWI_VERSION (ex_ui.Version_Flags) != 2)
758 return;
759
760 start = cache->image_base + cache->start_rva;
761 if (first
762 && !(cache->pc >= start && cache->pc < start + ex_ui.SizeOfPrologue))
763 {
764 /* We want to detect if the PC points to an epilogue. This needs
765 to be checked only once, and an epilogue can be anywhere but in
766 the prologue. If so, the epilogue detection+decoding function is
767 sufficient. Otherwise, the unwinder will consider that the PC
768 is in the body of the function and will need to decode unwind
769 info. */
770 if (amd64_windows_frame_decode_epilogue (this_frame, cache) == 1)
771 return;
772
773 /* Not in an epilog. Clear possible side effects. */
774 memset (cache->prev_reg_addr, 0, sizeof (cache->prev_reg_addr));
775 }
776
777 codes_count = ex_ui.CountOfCodes;
778 frame_reg = PEX64_UWI_FRAMEREG (ex_ui.FrameRegisterOffset);
779
780 if (frame_reg != 0)
781 {
782 /* According to msdn:
783 If an FP reg is used, then any unwind code taking an offset must
784 only be used after the FP reg is established in the prolog. */
785 gdb_byte buf[8];
786 int frreg = amd64_windows_w2gdb_regnum[frame_reg];
787
788 get_frame_register (this_frame, frreg, buf);
789 save_addr = extract_unsigned_integer (buf, 8, byte_order);
790
791 if (frame_debug)
792 fprintf_unfiltered (gdb_stdlog, " frame_reg=%s, val=%s\n",
793 gdbarch_register_name (gdbarch, frreg),
794 paddress (gdbarch, save_addr));
795 }
796
797 /* Read opcodes. */
798 if (codes_count != 0
799 && target_read_memory (cache->image_base + unwind_info
800 + sizeof (ex_ui),
801 insns, codes_count * 2) != 0)
802 return;
803
804 end_insns = &insns[codes_count * 2];
805 p = insns;
806
807 /* Skip opcodes 6 of version 2. This opcode is not documented. */
808 if (PEX64_UWI_VERSION (ex_ui.Version_Flags) == 2)
809 {
810 for (; p < end_insns; p += 2)
811 if (PEX64_UNWCODE_CODE (p[1]) != 6)
812 break;
813 }
814
815 for (; p < end_insns; p += 2)
816 {
817 int reg;
818
819 /* Virtually execute the operation if the pc is after the
820 corresponding instruction (that does matter in case of break
821 within the prologue). Note that for chained info (!first), the
822 prologue has been fully executed. */
823 if (cache->pc >= start + p[0] || cache->pc < start)
824 {
825 if (frame_debug)
826 fprintf_unfiltered
827 (gdb_stdlog, " op #%u: off=0x%02x, insn=0x%02x\n",
828 (unsigned) (p - insns), p[0], p[1]);
829
830 /* If there is no frame registers defined, the current value of
831 rsp is used instead. */
832 if (frame_reg == 0)
833 save_addr = cur_sp;
834
835 reg = -1;
836
837 switch (PEX64_UNWCODE_CODE (p[1]))
838 {
839 case UWOP_PUSH_NONVOL:
840 /* Push pre-decrements RSP. */
841 reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
842 cache->prev_reg_addr[reg] = cur_sp;
843 cur_sp += 8;
844 break;
845 case UWOP_ALLOC_LARGE:
846 if (PEX64_UNWCODE_INFO (p[1]) == 0)
847 cur_sp +=
848 8 * extract_unsigned_integer (p + 2, 2, byte_order);
849 else if (PEX64_UNWCODE_INFO (p[1]) == 1)
850 cur_sp += extract_unsigned_integer (p + 2, 4, byte_order);
851 else
852 return;
853 break;
854 case UWOP_ALLOC_SMALL:
855 cur_sp += 8 + 8 * PEX64_UNWCODE_INFO (p[1]);
856 break;
857 case UWOP_SET_FPREG:
858 cur_sp = save_addr
859 - PEX64_UWI_FRAMEOFF (ex_ui.FrameRegisterOffset) * 16;
860 break;
861 case UWOP_SAVE_NONVOL:
862 reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
863 cache->prev_reg_addr[reg] = save_addr
864 + 8 * extract_unsigned_integer (p + 2, 2, byte_order);
865 break;
866 case UWOP_SAVE_NONVOL_FAR:
867 reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
868 cache->prev_reg_addr[reg] = save_addr
869 + 8 * extract_unsigned_integer (p + 2, 4, byte_order);
870 break;
871 case UWOP_SAVE_XMM128:
872 cache->prev_xmm_addr[PEX64_UNWCODE_INFO (p[1])] =
873 save_addr
874 - 16 * extract_unsigned_integer (p + 2, 2, byte_order);
875 break;
876 case UWOP_SAVE_XMM128_FAR:
877 cache->prev_xmm_addr[PEX64_UNWCODE_INFO (p[1])] =
878 save_addr
879 - 16 * extract_unsigned_integer (p + 2, 4, byte_order);
880 break;
881 case UWOP_PUSH_MACHFRAME:
882 if (PEX64_UNWCODE_INFO (p[1]) == 0)
883 {
884 cache->prev_rip_addr = cur_sp + 0;
885 cache->prev_rsp_addr = cur_sp + 24;
886 cur_sp += 40;
887 }
888 else if (PEX64_UNWCODE_INFO (p[1]) == 1)
889 {
890 cache->prev_rip_addr = cur_sp + 8;
891 cache->prev_rsp_addr = cur_sp + 32;
892 cur_sp += 48;
893 }
894 else
895 return;
896 break;
897 default:
898 return;
899 }
900
901 /* Display address where the register was saved. */
902 if (frame_debug && reg >= 0)
903 fprintf_unfiltered
904 (gdb_stdlog, " [reg %s at %s]\n",
905 gdbarch_register_name (gdbarch, reg),
906 paddress (gdbarch, cache->prev_reg_addr[reg]));
907 }
908
909 /* Adjust with the length of the opcode. */
910 switch (PEX64_UNWCODE_CODE (p[1]))
911 {
912 case UWOP_PUSH_NONVOL:
913 case UWOP_ALLOC_SMALL:
914 case UWOP_SET_FPREG:
915 case UWOP_PUSH_MACHFRAME:
916 break;
917 case UWOP_ALLOC_LARGE:
918 if (PEX64_UNWCODE_INFO (p[1]) == 0)
919 p += 2;
920 else if (PEX64_UNWCODE_INFO (p[1]) == 1)
921 p += 4;
922 else
923 return;
924 break;
925 case UWOP_SAVE_NONVOL:
926 case UWOP_SAVE_XMM128:
927 p += 2;
928 break;
929 case UWOP_SAVE_NONVOL_FAR:
930 case UWOP_SAVE_XMM128_FAR:
931 p += 4;
932 break;
933 default:
934 return;
935 }
936 }
937 if (PEX64_UWI_FLAGS (ex_ui.Version_Flags) != UNW_FLAG_CHAININFO)
938 {
939 /* End of unwind info. */
940 break;
941 }
942 else
943 {
944 /* Read the chained unwind info. */
945 struct external_pex64_runtime_function d;
946 CORE_ADDR chain_vma;
947
948 /* Not anymore the first entry. */
949 first = 0;
950
951 /* Stay aligned on word boundary. */
952 chain_vma = cache->image_base + unwind_info
953 + sizeof (ex_ui) + ((codes_count + 1) & ~1) * 2;
954
955 if (target_read_memory (chain_vma, (gdb_byte *) &d, sizeof (d)) != 0)
956 return;
957
958 /* Decode begin/end. This may be different from .pdata index, as
959 an unwind info may be shared by several functions (in particular
960 if many functions have the same prolog and handler. */
961 cache->start_rva =
962 extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
963 cache->end_rva =
964 extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
965 unwind_info =
966 extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);
967
968 if (frame_debug)
969 fprintf_unfiltered
970 (gdb_stdlog,
971 "amd64_windows_frame_decodes_insn (next in chain):"
972 " unwind_data=%s, start_rva=%s, end_rva=%s\n",
973 paddress (gdbarch, unwind_info),
974 paddress (gdbarch, cache->start_rva),
975 paddress (gdbarch, cache->end_rva));
976 }
977
978 /* Allow the user to break this loop. */
979 QUIT;
980 }
981 /* PC is saved by the call. */
982 if (cache->prev_rip_addr == 0)
983 cache->prev_rip_addr = cur_sp;
984 cache->prev_sp = cur_sp + 8;
985
986 if (frame_debug)
987 fprintf_unfiltered (gdb_stdlog, " prev_sp: %s, prev_pc @%s\n",
988 paddress (gdbarch, cache->prev_sp),
989 paddress (gdbarch, cache->prev_rip_addr));
990 }
991
992 /* Find SEH unwind info for PC, returning 0 on success.
993
994 UNWIND_INFO is set to the rva of unwind info address, IMAGE_BASE
995 to the base address of the corresponding image, and START_RVA
996 to the rva of the function containing PC. */
997
998 static int
999 amd64_windows_find_unwind_info (struct gdbarch *gdbarch, CORE_ADDR pc,
1000 CORE_ADDR *unwind_info,
1001 CORE_ADDR *image_base,
1002 CORE_ADDR *start_rva,
1003 CORE_ADDR *end_rva)
1004 {
1005 struct obj_section *sec;
1006 pe_data_type *pe;
1007 IMAGE_DATA_DIRECTORY *dir;
1008 struct objfile *objfile;
1009 unsigned long lo, hi;
1010 CORE_ADDR base;
1011 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1012
1013 /* Get the corresponding exception directory. */
1014 sec = find_pc_section (pc);
1015 if (sec == NULL)
1016 return -1;
1017 objfile = sec->objfile;
1018 pe = pe_data (sec->objfile->obfd);
1019 dir = &pe->pe_opthdr.DataDirectory[PE_EXCEPTION_TABLE];
1020
1021 base = pe->pe_opthdr.ImageBase + objfile->text_section_offset ();
1022 *image_base = base;
1023
1024 /* Find the entry.
1025
1026 Note: This does not handle dynamically added entries (for JIT
1027 engines). For this, we would need to ask the kernel directly,
1028 which means getting some info from the native layer. For the
1029 rest of the code, however, it's probably faster to search
1030 the entry ourselves. */
1031 lo = 0;
1032 hi = dir->Size / sizeof (struct external_pex64_runtime_function);
1033 *unwind_info = 0;
1034 while (lo <= hi)
1035 {
1036 unsigned long mid = lo + (hi - lo) / 2;
1037 struct external_pex64_runtime_function d;
1038 CORE_ADDR sa, ea;
1039
1040 if (target_read_memory (base + dir->VirtualAddress + mid * sizeof (d),
1041 (gdb_byte *) &d, sizeof (d)) != 0)
1042 return -1;
1043
1044 sa = extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
1045 ea = extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
1046 if (pc < base + sa)
1047 hi = mid - 1;
1048 else if (pc >= base + ea)
1049 lo = mid + 1;
1050 else if (pc >= base + sa && pc < base + ea)
1051 {
1052 /* Got it. */
1053 *start_rva = sa;
1054 *end_rva = ea;
1055 *unwind_info =
1056 extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);
1057 break;
1058 }
1059 else
1060 break;
1061 }
1062
1063 if (frame_debug)
1064 fprintf_unfiltered
1065 (gdb_stdlog,
1066 "amd64_windows_find_unwind_data: image_base=%s, unwind_data=%s\n",
1067 paddress (gdbarch, base), paddress (gdbarch, *unwind_info));
1068
1069 return 0;
1070 }
1071
1072 /* Fill THIS_CACHE using the native amd64-windows unwinding data
1073 for THIS_FRAME. */
1074
1075 static struct amd64_windows_frame_cache *
1076 amd64_windows_frame_cache (struct frame_info *this_frame, void **this_cache)
1077 {
1078 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1079 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1080 struct amd64_windows_frame_cache *cache;
1081 gdb_byte buf[8];
1082 CORE_ADDR pc;
1083 CORE_ADDR unwind_info = 0;
1084
1085 if (*this_cache)
1086 return (struct amd64_windows_frame_cache *) *this_cache;
1087
1088 cache = FRAME_OBSTACK_ZALLOC (struct amd64_windows_frame_cache);
1089 *this_cache = cache;
1090
1091 /* Get current PC and SP. */
1092 pc = get_frame_pc (this_frame);
1093 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1094 cache->sp = extract_unsigned_integer (buf, 8, byte_order);
1095 cache->pc = pc;
1096
1097 if (amd64_windows_find_unwind_info (gdbarch, pc, &unwind_info,
1098 &cache->image_base,
1099 &cache->start_rva,
1100 &cache->end_rva))
1101 return cache;
1102
1103 if (unwind_info == 0)
1104 {
1105 /* Assume a leaf function. */
1106 cache->prev_sp = cache->sp + 8;
1107 cache->prev_rip_addr = cache->sp;
1108 }
1109 else
1110 {
1111 /* Decode unwind insns to compute saved addresses. */
1112 amd64_windows_frame_decode_insns (this_frame, cache, unwind_info);
1113 }
1114 return cache;
1115 }
1116
1117 /* Implement the "prev_register" method of struct frame_unwind
1118 using the standard Windows x64 SEH info. */
1119
1120 static struct value *
1121 amd64_windows_frame_prev_register (struct frame_info *this_frame,
1122 void **this_cache, int regnum)
1123 {
1124 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1125 struct amd64_windows_frame_cache *cache =
1126 amd64_windows_frame_cache (this_frame, this_cache);
1127 CORE_ADDR prev;
1128
1129 if (frame_debug)
1130 fprintf_unfiltered (gdb_stdlog,
1131 "amd64_windows_frame_prev_register %s for sp=%s\n",
1132 gdbarch_register_name (gdbarch, regnum),
1133 paddress (gdbarch, cache->prev_sp));
1134
1135 if (regnum >= AMD64_XMM0_REGNUM && regnum <= AMD64_XMM0_REGNUM + 15)
1136 prev = cache->prev_xmm_addr[regnum - AMD64_XMM0_REGNUM];
1137 else if (regnum == AMD64_RSP_REGNUM)
1138 {
1139 prev = cache->prev_rsp_addr;
1140 if (prev == 0)
1141 return frame_unwind_got_constant (this_frame, regnum, cache->prev_sp);
1142 }
1143 else if (regnum >= AMD64_RAX_REGNUM && regnum <= AMD64_R15_REGNUM)
1144 prev = cache->prev_reg_addr[regnum - AMD64_RAX_REGNUM];
1145 else if (regnum == AMD64_RIP_REGNUM)
1146 prev = cache->prev_rip_addr;
1147 else
1148 prev = 0;
1149
1150 if (prev && frame_debug)
1151 fprintf_unfiltered (gdb_stdlog, " -> at %s\n", paddress (gdbarch, prev));
1152
1153 if (prev)
1154 {
1155 /* Register was saved. */
1156 return frame_unwind_got_memory (this_frame, regnum, prev);
1157 }
1158 else
1159 {
1160 /* Register is either volatile or not modified. */
1161 return frame_unwind_got_register (this_frame, regnum, regnum);
1162 }
1163 }
1164
1165 /* Implement the "this_id" method of struct frame_unwind using
1166 the standard Windows x64 SEH info. */
1167
1168 static void
1169 amd64_windows_frame_this_id (struct frame_info *this_frame, void **this_cache,
1170 struct frame_id *this_id)
1171 {
1172 struct amd64_windows_frame_cache *cache =
1173 amd64_windows_frame_cache (this_frame, this_cache);
1174
1175 *this_id = frame_id_build (cache->prev_sp,
1176 cache->image_base + cache->start_rva);
1177 }
1178
1179 /* Windows x64 SEH unwinder. */
1180
1181 static const struct frame_unwind amd64_windows_frame_unwind =
1182 {
1183 NORMAL_FRAME,
1184 default_frame_unwind_stop_reason,
1185 &amd64_windows_frame_this_id,
1186 &amd64_windows_frame_prev_register,
1187 NULL,
1188 default_frame_sniffer
1189 };
1190
1191 /* Implement the "skip_prologue" gdbarch method. */
1192
1193 static CORE_ADDR
1194 amd64_windows_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1195 {
1196 CORE_ADDR func_addr;
1197 CORE_ADDR unwind_info = 0;
1198 CORE_ADDR image_base, start_rva, end_rva;
1199 struct external_pex64_unwind_info ex_ui;
1200
1201 /* Use prologue size from unwind info. */
1202 if (amd64_windows_find_unwind_info (gdbarch, pc, &unwind_info,
1203 &image_base, &start_rva, &end_rva) == 0)
1204 {
1205 if (unwind_info == 0)
1206 {
1207 /* Leaf function. */
1208 return pc;
1209 }
1210 else if (target_read_memory (image_base + unwind_info,
1211 (gdb_byte *) &ex_ui, sizeof (ex_ui)) == 0
1212 && PEX64_UWI_VERSION (ex_ui.Version_Flags) == 1)
1213 return std::max (pc, image_base + start_rva + ex_ui.SizeOfPrologue);
1214 }
1215
1216 /* See if we can determine the end of the prologue via the symbol
1217 table. If so, then return either the PC, or the PC after
1218 the prologue, whichever is greater. */
1219 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1220 {
1221 CORE_ADDR post_prologue_pc
1222 = skip_prologue_using_sal (gdbarch, func_addr);
1223
1224 if (post_prologue_pc != 0)
1225 return std::max (pc, post_prologue_pc);
1226 }
1227
1228 return pc;
1229 }
1230
1231 /* Check Win64 DLL jmp trampolines and find jump destination. */
1232
1233 static CORE_ADDR
1234 amd64_windows_skip_trampoline_code (struct frame_info *frame, CORE_ADDR pc)
1235 {
1236 CORE_ADDR destination = 0;
1237 struct gdbarch *gdbarch = get_frame_arch (frame);
1238 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1239
1240 /* Check for jmp *<offset>(%rip) (jump near, absolute indirect (/4)). */
1241 if (pc && read_memory_unsigned_integer (pc, 2, byte_order) == 0x25ff)
1242 {
1243 /* Get opcode offset and see if we can find a reference in our data. */
1244 ULONGEST offset
1245 = read_memory_unsigned_integer (pc + 2, 4, byte_order);
1246
1247 /* Get address of function pointer at end of pc. */
1248 CORE_ADDR indirect_addr = pc + offset + 6;
1249
1250 struct minimal_symbol *indsym
1251 = (indirect_addr
1252 ? lookup_minimal_symbol_by_pc (indirect_addr).minsym
1253 : NULL);
1254 const char *symname = indsym ? indsym->linkage_name () : NULL;
1255
1256 if (symname)
1257 {
1258 if (startswith (symname, "__imp_")
1259 || startswith (symname, "_imp_"))
1260 destination
1261 = read_memory_unsigned_integer (indirect_addr, 8, byte_order);
1262 }
1263 }
1264
1265 return destination;
1266 }
1267
1268 /* Implement the "auto_wide_charset" gdbarch method. */
1269
1270 static const char *
1271 amd64_windows_auto_wide_charset (void)
1272 {
1273 return "UTF-16";
1274 }
1275
1276 /* Common parts for gdbarch initialization for Windows and Cygwin on AMD64. */
1277
1278 static void
1279 amd64_windows_init_abi_common (gdbarch_info info, struct gdbarch *gdbarch)
1280 {
1281 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1282
1283 /* The dwarf2 unwinder (appended very early by i386_gdbarch_init) is
1284 preferred over the SEH one. The reasons are:
1285 - binaries without SEH but with dwarf2 debug info are correctly handled
1286 (although they aren't ABI compliant, gcc before 4.7 didn't emit SEH
1287 info).
1288 - dwarf3 DW_OP_call_frame_cfa is correctly handled (it can only be
1289 handled if the dwarf2 unwinder is used).
1290
1291 The call to amd64_init_abi appends default unwinders, that aren't
1292 compatible with the SEH one.
1293 */
1294 frame_unwind_append_unwinder (gdbarch, &amd64_windows_frame_unwind);
1295
1296 amd64_init_abi (info, gdbarch,
1297 amd64_target_description (X86_XSTATE_SSE_MASK, false));
1298
1299 /* Function calls. */
1300 set_gdbarch_push_dummy_call (gdbarch, amd64_windows_push_dummy_call);
1301 set_gdbarch_return_value (gdbarch, amd64_windows_return_value);
1302 set_gdbarch_skip_main_prologue (gdbarch, amd64_skip_main_prologue);
1303 set_gdbarch_skip_trampoline_code (gdbarch,
1304 amd64_windows_skip_trampoline_code);
1305
1306 set_gdbarch_skip_prologue (gdbarch, amd64_windows_skip_prologue);
1307
1308 tdep->gregset_reg_offset = amd64_windows_gregset_reg_offset;
1309 tdep->gregset_num_regs = ARRAY_SIZE (amd64_windows_gregset_reg_offset);
1310 tdep->sizeof_gregset = AMD64_WINDOWS_SIZEOF_GREGSET;
1311 tdep->sizeof_fpregset = 0;
1312
1313 /* Core file support. */
1314 set_gdbarch_core_xfer_shared_libraries
1315 (gdbarch, windows_core_xfer_shared_libraries);
1316 set_gdbarch_core_pid_to_str (gdbarch, windows_core_pid_to_str);
1317
1318 set_gdbarch_auto_wide_charset (gdbarch, amd64_windows_auto_wide_charset);
1319 }
1320
1321 /* gdbarch initialization for Windows on AMD64. */
1322
1323 static void
1324 amd64_windows_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
1325 {
1326 amd64_windows_init_abi_common (info, gdbarch);
1327 windows_init_abi (info, gdbarch);
1328
1329 /* On Windows, "long"s are only 32bit. */
1330 set_gdbarch_long_bit (gdbarch, 32);
1331 }
1332
1333 /* gdbarch initialization for Cygwin on AMD64. */
1334
1335 static void
1336 amd64_cygwin_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
1337 {
1338 amd64_windows_init_abi_common (info, gdbarch);
1339 cygwin_init_abi (info, gdbarch);
1340 }
1341
1342 static gdb_osabi
1343 amd64_windows_osabi_sniffer (bfd *abfd)
1344 {
1345 const char *target_name = bfd_get_target (abfd);
1346
1347 if (!streq (target_name, "pei-x86-64"))
1348 return GDB_OSABI_UNKNOWN;
1349
1350 if (is_linked_with_cygwin_dll (abfd))
1351 return GDB_OSABI_CYGWIN;
1352
1353 return GDB_OSABI_WINDOWS;
1354 }
1355
1356 static enum gdb_osabi
1357 amd64_cygwin_core_osabi_sniffer (bfd *abfd)
1358 {
1359 const char *target_name = bfd_get_target (abfd);
1360
1361 /* Cygwin uses elf core dumps. Do not claim all ELF executables,
1362 check whether there is a .reg section of proper size. */
1363 if (strcmp (target_name, "elf64-x86-64") == 0)
1364 {
1365 asection *section = bfd_get_section_by_name (abfd, ".reg");
1366 if (section != nullptr
1367 && bfd_section_size (section) == AMD64_WINDOWS_SIZEOF_GREGSET)
1368 return GDB_OSABI_CYGWIN;
1369 }
1370
1371 return GDB_OSABI_UNKNOWN;
1372 }
1373
1374 void _initialize_amd64_windows_tdep ();
1375 void
1376 _initialize_amd64_windows_tdep ()
1377 {
1378 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_WINDOWS,
1379 amd64_windows_init_abi);
1380 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_CYGWIN,
1381 amd64_cygwin_init_abi);
1382
1383 gdbarch_register_osabi_sniffer (bfd_arch_i386, bfd_target_coff_flavour,
1384 amd64_windows_osabi_sniffer);
1385
1386 /* Cygwin uses elf core dumps. */
1387 gdbarch_register_osabi_sniffer (bfd_arch_i386, bfd_target_elf_flavour,
1388 amd64_cygwin_core_osabi_sniffer);
1389
1390 }