]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/amd64-windows-tdep.c
Replace some xmalloc-family functions with XNEW-family ones
[thirdparty/binutils-gdb.git] / gdb / amd64-windows-tdep.c
1 /* Copyright (C) 2009-2015 Free Software Foundation, Inc.
2
3 This file is part of GDB.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>. */
17
18 #include "defs.h"
19 #include "osabi.h"
20 #include "amd64-tdep.h"
21 #include "gdbtypes.h"
22 #include "gdbcore.h"
23 #include "regcache.h"
24 #include "windows-tdep.h"
25 #include "frame.h"
26 #include "objfiles.h"
27 #include "frame-unwind.h"
28 #include "coff/internal.h"
29 #include "coff/i386.h"
30 #include "coff/pe.h"
31 #include "libcoff.h"
32 #include "value.h"
33
34 /* The registers used to pass integer arguments during a function call. */
35 static int amd64_windows_dummy_call_integer_regs[] =
36 {
37 AMD64_RCX_REGNUM, /* %rcx */
38 AMD64_RDX_REGNUM, /* %rdx */
39 AMD64_R8_REGNUM, /* %r8 */
40 AMD64_R9_REGNUM /* %r9 */
41 };
42
43 /* Return nonzero if an argument of type TYPE should be passed
44 via one of the integer registers. */
45
46 static int
47 amd64_windows_passed_by_integer_register (struct type *type)
48 {
49 switch (TYPE_CODE (type))
50 {
51 case TYPE_CODE_INT:
52 case TYPE_CODE_ENUM:
53 case TYPE_CODE_BOOL:
54 case TYPE_CODE_RANGE:
55 case TYPE_CODE_CHAR:
56 case TYPE_CODE_PTR:
57 case TYPE_CODE_REF:
58 case TYPE_CODE_STRUCT:
59 case TYPE_CODE_UNION:
60 return (TYPE_LENGTH (type) == 1
61 || TYPE_LENGTH (type) == 2
62 || TYPE_LENGTH (type) == 4
63 || TYPE_LENGTH (type) == 8);
64
65 default:
66 return 0;
67 }
68 }
69
70 /* Return nonzero if an argument of type TYPE should be passed
71 via one of the XMM registers. */
72
73 static int
74 amd64_windows_passed_by_xmm_register (struct type *type)
75 {
76 return ((TYPE_CODE (type) == TYPE_CODE_FLT
77 || TYPE_CODE (type) == TYPE_CODE_DECFLOAT)
78 && (TYPE_LENGTH (type) == 4 || TYPE_LENGTH (type) == 8));
79 }
80
81 /* Return non-zero iff an argument of the given TYPE should be passed
82 by pointer. */
83
84 static int
85 amd64_windows_passed_by_pointer (struct type *type)
86 {
87 if (amd64_windows_passed_by_integer_register (type))
88 return 0;
89
90 if (amd64_windows_passed_by_xmm_register (type))
91 return 0;
92
93 return 1;
94 }
95
96 /* For each argument that should be passed by pointer, reserve some
97 stack space, store a copy of the argument on the stack, and replace
98 the argument by its address. Return the new Stack Pointer value.
99
100 NARGS is the number of arguments. ARGS is the array containing
101 the value of each argument. SP is value of the Stack Pointer. */
102
103 static CORE_ADDR
104 amd64_windows_adjust_args_passed_by_pointer (struct value **args,
105 int nargs, CORE_ADDR sp)
106 {
107 int i;
108
109 for (i = 0; i < nargs; i++)
110 if (amd64_windows_passed_by_pointer (value_type (args[i])))
111 {
112 struct type *type = value_type (args[i]);
113 const gdb_byte *valbuf = value_contents (args[i]);
114 const int len = TYPE_LENGTH (type);
115
116 /* Store a copy of that argument on the stack, aligned to
117 a 16 bytes boundary, and then use the copy's address as
118 the argument. */
119
120 sp -= len;
121 sp &= ~0xf;
122 write_memory (sp, valbuf, len);
123
124 args[i]
125 = value_addr (value_from_contents_and_address (type, valbuf, sp));
126 }
127
128 return sp;
129 }
130
131 /* Store the value of ARG in register REGNO (right-justified).
132 REGCACHE is the register cache. */
133
134 static void
135 amd64_windows_store_arg_in_reg (struct regcache *regcache,
136 struct value *arg, int regno)
137 {
138 struct type *type = value_type (arg);
139 const gdb_byte *valbuf = value_contents (arg);
140 gdb_byte buf[8];
141
142 gdb_assert (TYPE_LENGTH (type) <= 8);
143 memset (buf, 0, sizeof buf);
144 memcpy (buf, valbuf, min (TYPE_LENGTH (type), 8));
145 regcache_cooked_write (regcache, regno, buf);
146 }
147
148 /* Push the arguments for an inferior function call, and return
149 the updated value of the SP (Stack Pointer).
150
151 All arguments are identical to the arguments used in
152 amd64_windows_push_dummy_call. */
153
154 static CORE_ADDR
155 amd64_windows_push_arguments (struct regcache *regcache, int nargs,
156 struct value **args, CORE_ADDR sp,
157 int struct_return)
158 {
159 int reg_idx = 0;
160 int i;
161 struct value **stack_args = XALLOCAVEC (struct value *, nargs);
162 int num_stack_args = 0;
163 int num_elements = 0;
164 int element = 0;
165
166 /* First, handle the arguments passed by pointer.
167
168 These arguments are replaced by pointers to a copy we are making
169 in inferior memory. So use a copy of the ARGS table, to avoid
170 modifying the original one. */
171 {
172 struct value **args1 = XALLOCAVEC (struct value *, nargs);
173
174 memcpy (args1, args, nargs * sizeof (struct value *));
175 sp = amd64_windows_adjust_args_passed_by_pointer (args1, nargs, sp);
176 args = args1;
177 }
178
179 /* Reserve a register for the "hidden" argument. */
180 if (struct_return)
181 reg_idx++;
182
183 for (i = 0; i < nargs; i++)
184 {
185 struct type *type = value_type (args[i]);
186 int len = TYPE_LENGTH (type);
187 int on_stack_p = 1;
188
189 if (reg_idx < ARRAY_SIZE (amd64_windows_dummy_call_integer_regs))
190 {
191 if (amd64_windows_passed_by_integer_register (type))
192 {
193 amd64_windows_store_arg_in_reg
194 (regcache, args[i],
195 amd64_windows_dummy_call_integer_regs[reg_idx]);
196 on_stack_p = 0;
197 reg_idx++;
198 }
199 else if (amd64_windows_passed_by_xmm_register (type))
200 {
201 amd64_windows_store_arg_in_reg
202 (regcache, args[i], AMD64_XMM0_REGNUM + reg_idx);
203 /* In case of varargs, these parameters must also be
204 passed via the integer registers. */
205 amd64_windows_store_arg_in_reg
206 (regcache, args[i],
207 amd64_windows_dummy_call_integer_regs[reg_idx]);
208 on_stack_p = 0;
209 reg_idx++;
210 }
211 }
212
213 if (on_stack_p)
214 {
215 num_elements += ((len + 7) / 8);
216 stack_args[num_stack_args++] = args[i];
217 }
218 }
219
220 /* Allocate space for the arguments on the stack, keeping it
221 aligned on a 16 byte boundary. */
222 sp -= num_elements * 8;
223 sp &= ~0xf;
224
225 /* Write out the arguments to the stack. */
226 for (i = 0; i < num_stack_args; i++)
227 {
228 struct type *type = value_type (stack_args[i]);
229 const gdb_byte *valbuf = value_contents (stack_args[i]);
230
231 write_memory (sp + element * 8, valbuf, TYPE_LENGTH (type));
232 element += ((TYPE_LENGTH (type) + 7) / 8);
233 }
234
235 return sp;
236 }
237
238 /* Implement the "push_dummy_call" gdbarch method. */
239
240 static CORE_ADDR
241 amd64_windows_push_dummy_call
242 (struct gdbarch *gdbarch, struct value *function,
243 struct regcache *regcache, CORE_ADDR bp_addr,
244 int nargs, struct value **args,
245 CORE_ADDR sp, int struct_return, CORE_ADDR struct_addr)
246 {
247 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
248 gdb_byte buf[8];
249
250 /* Pass arguments. */
251 sp = amd64_windows_push_arguments (regcache, nargs, args, sp,
252 struct_return);
253
254 /* Pass "hidden" argument". */
255 if (struct_return)
256 {
257 /* The "hidden" argument is passed throught the first argument
258 register. */
259 const int arg_regnum = amd64_windows_dummy_call_integer_regs[0];
260
261 store_unsigned_integer (buf, 8, byte_order, struct_addr);
262 regcache_cooked_write (regcache, arg_regnum, buf);
263 }
264
265 /* Reserve some memory on the stack for the integer-parameter
266 registers, as required by the ABI. */
267 sp -= ARRAY_SIZE (amd64_windows_dummy_call_integer_regs) * 8;
268
269 /* Store return address. */
270 sp -= 8;
271 store_unsigned_integer (buf, 8, byte_order, bp_addr);
272 write_memory (sp, buf, 8);
273
274 /* Update the stack pointer... */
275 store_unsigned_integer (buf, 8, byte_order, sp);
276 regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
277
278 /* ...and fake a frame pointer. */
279 regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
280
281 return sp + 16;
282 }
283
284 /* Implement the "return_value" gdbarch method for amd64-windows. */
285
286 static enum return_value_convention
287 amd64_windows_return_value (struct gdbarch *gdbarch, struct value *function,
288 struct type *type, struct regcache *regcache,
289 gdb_byte *readbuf, const gdb_byte *writebuf)
290 {
291 int len = TYPE_LENGTH (type);
292 int regnum = -1;
293
294 /* See if our value is returned through a register. If it is, then
295 store the associated register number in REGNUM. */
296 switch (TYPE_CODE (type))
297 {
298 case TYPE_CODE_FLT:
299 case TYPE_CODE_DECFLOAT:
300 /* __m128, __m128i, __m128d, floats, and doubles are returned
301 via XMM0. */
302 if (len == 4 || len == 8 || len == 16)
303 regnum = AMD64_XMM0_REGNUM;
304 break;
305 default:
306 /* All other values that are 1, 2, 4 or 8 bytes long are returned
307 via RAX. */
308 if (len == 1 || len == 2 || len == 4 || len == 8)
309 regnum = AMD64_RAX_REGNUM;
310 break;
311 }
312
313 if (regnum < 0)
314 {
315 /* RAX contains the address where the return value has been stored. */
316 if (readbuf)
317 {
318 ULONGEST addr;
319
320 regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
321 read_memory (addr, readbuf, TYPE_LENGTH (type));
322 }
323 return RETURN_VALUE_ABI_RETURNS_ADDRESS;
324 }
325 else
326 {
327 /* Extract the return value from the register where it was stored. */
328 if (readbuf)
329 regcache_raw_read_part (regcache, regnum, 0, len, readbuf);
330 if (writebuf)
331 regcache_raw_write_part (regcache, regnum, 0, len, writebuf);
332 return RETURN_VALUE_REGISTER_CONVENTION;
333 }
334 }
335
336 /* Check that the code pointed to by PC corresponds to a call to
337 __main, skip it if so. Return PC otherwise. */
338
339 static CORE_ADDR
340 amd64_skip_main_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
341 {
342 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
343 gdb_byte op;
344
345 target_read_memory (pc, &op, 1);
346 if (op == 0xe8)
347 {
348 gdb_byte buf[4];
349
350 if (target_read_memory (pc + 1, buf, sizeof buf) == 0)
351 {
352 struct bound_minimal_symbol s;
353 CORE_ADDR call_dest;
354
355 call_dest = pc + 5 + extract_signed_integer (buf, 4, byte_order);
356 s = lookup_minimal_symbol_by_pc (call_dest);
357 if (s.minsym != NULL
358 && MSYMBOL_LINKAGE_NAME (s.minsym) != NULL
359 && strcmp (MSYMBOL_LINKAGE_NAME (s.minsym), "__main") == 0)
360 pc += 5;
361 }
362 }
363
364 return pc;
365 }
366
367 struct amd64_windows_frame_cache
368 {
369 /* ImageBase for the module. */
370 CORE_ADDR image_base;
371
372 /* Function start and end rva. */
373 CORE_ADDR start_rva;
374 CORE_ADDR end_rva;
375
376 /* Next instruction to be executed. */
377 CORE_ADDR pc;
378
379 /* Current sp. */
380 CORE_ADDR sp;
381
382 /* Address of saved integer and xmm registers. */
383 CORE_ADDR prev_reg_addr[16];
384 CORE_ADDR prev_xmm_addr[16];
385
386 /* These two next fields are set only for machine info frames. */
387
388 /* Likewise for RIP. */
389 CORE_ADDR prev_rip_addr;
390
391 /* Likewise for RSP. */
392 CORE_ADDR prev_rsp_addr;
393
394 /* Address of the previous frame. */
395 CORE_ADDR prev_sp;
396 };
397
398 /* Convert a Windows register number to gdb. */
399 static const enum amd64_regnum amd64_windows_w2gdb_regnum[] =
400 {
401 AMD64_RAX_REGNUM,
402 AMD64_RCX_REGNUM,
403 AMD64_RDX_REGNUM,
404 AMD64_RBX_REGNUM,
405 AMD64_RSP_REGNUM,
406 AMD64_RBP_REGNUM,
407 AMD64_RSI_REGNUM,
408 AMD64_RDI_REGNUM,
409 AMD64_R8_REGNUM,
410 AMD64_R9_REGNUM,
411 AMD64_R10_REGNUM,
412 AMD64_R11_REGNUM,
413 AMD64_R12_REGNUM,
414 AMD64_R13_REGNUM,
415 AMD64_R14_REGNUM,
416 AMD64_R15_REGNUM
417 };
418
419 /* Return TRUE iff PC is the the range of the function corresponding to
420 CACHE. */
421
422 static int
423 pc_in_range (CORE_ADDR pc, const struct amd64_windows_frame_cache *cache)
424 {
425 return (pc >= cache->image_base + cache->start_rva
426 && pc < cache->image_base + cache->end_rva);
427 }
428
429 /* Try to recognize and decode an epilogue sequence.
430
431 Return -1 if we fail to read the instructions for any reason.
432 Return 1 if an epilogue sequence was recognized, 0 otherwise. */
433
434 static int
435 amd64_windows_frame_decode_epilogue (struct frame_info *this_frame,
436 struct amd64_windows_frame_cache *cache)
437 {
438 /* According to MSDN an epilogue "must consist of either an add RSP,constant
439 or lea RSP,constant[FPReg], followed by a series of zero or more 8-byte
440 register pops and a return or a jmp".
441
442 Furthermore, according to RtlVirtualUnwind, the complete list of
443 epilog marker is:
444 - ret [c3]
445 - ret n [c2 imm16]
446 - rep ret [f3 c3]
447 - jmp imm8 | imm32 [eb rel8] or [e9 rel32]
448 - jmp qword ptr imm32 - not handled
449 - rex.w jmp reg [4X ff eY]
450 */
451
452 CORE_ADDR pc = cache->pc;
453 CORE_ADDR cur_sp = cache->sp;
454 struct gdbarch *gdbarch = get_frame_arch (this_frame);
455 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
456 gdb_byte op;
457 gdb_byte rex;
458
459 /* We don't care about the instruction deallocating the frame:
460 if it hasn't been executed, the pc is still in the body,
461 if it has been executed, the following epilog decoding will work. */
462
463 /* First decode:
464 - pop reg [41 58-5f] or [58-5f]. */
465
466 while (1)
467 {
468 /* Read opcode. */
469 if (target_read_memory (pc, &op, 1) != 0)
470 return -1;
471
472 if (op >= 0x40 && op <= 0x4f)
473 {
474 /* REX prefix. */
475 rex = op;
476
477 /* Read opcode. */
478 if (target_read_memory (pc + 1, &op, 1) != 0)
479 return -1;
480 }
481 else
482 rex = 0;
483
484 if (op >= 0x58 && op <= 0x5f)
485 {
486 /* pop reg */
487 gdb_byte reg = (op & 0x0f) | ((rex & 1) << 3);
488
489 cache->prev_reg_addr[amd64_windows_w2gdb_regnum[reg]] = cur_sp;
490 cur_sp += 8;
491 }
492 else
493 break;
494
495 /* Allow the user to break this loop. This shouldn't happen as the
496 number of consecutive pop should be small. */
497 QUIT;
498 }
499
500 /* Then decode the marker. */
501
502 /* Read opcode. */
503 if (target_read_memory (pc, &op, 1) != 0)
504 return -1;
505
506 switch (op)
507 {
508 case 0xc3:
509 /* Ret. */
510 cache->prev_rip_addr = cur_sp;
511 cache->prev_sp = cur_sp + 8;
512 return 1;
513
514 case 0xeb:
515 {
516 /* jmp rel8 */
517 gdb_byte rel8;
518 CORE_ADDR npc;
519
520 if (target_read_memory (pc + 1, &rel8, 1) != 0)
521 return -1;
522 npc = pc + 2 + (signed char) rel8;
523
524 /* If the jump is within the function, then this is not a marker,
525 otherwise this is a tail-call. */
526 return !pc_in_range (npc, cache);
527 }
528
529 case 0xec:
530 {
531 /* jmp rel32 */
532 gdb_byte rel32[4];
533 CORE_ADDR npc;
534
535 if (target_read_memory (pc + 1, rel32, 4) != 0)
536 return -1;
537 npc = pc + 5 + extract_signed_integer (rel32, 4, byte_order);
538
539 /* If the jump is within the function, then this is not a marker,
540 otherwise this is a tail-call. */
541 return !pc_in_range (npc, cache);
542 }
543
544 case 0xc2:
545 {
546 /* ret n */
547 gdb_byte imm16[2];
548
549 if (target_read_memory (pc + 1, imm16, 2) != 0)
550 return -1;
551 cache->prev_rip_addr = cur_sp;
552 cache->prev_sp = cur_sp
553 + extract_unsigned_integer (imm16, 4, byte_order);
554 return 1;
555 }
556
557 case 0xf3:
558 {
559 /* rep; ret */
560 gdb_byte op1;
561
562 if (target_read_memory (pc + 2, &op1, 1) != 0)
563 return -1;
564 if (op1 != 0xc3)
565 return 0;
566
567 cache->prev_rip_addr = cur_sp;
568 cache->prev_sp = cur_sp + 8;
569 return 1;
570 }
571
572 case 0x40:
573 case 0x41:
574 case 0x42:
575 case 0x43:
576 case 0x44:
577 case 0x45:
578 case 0x46:
579 case 0x47:
580 case 0x48:
581 case 0x49:
582 case 0x4a:
583 case 0x4b:
584 case 0x4c:
585 case 0x4d:
586 case 0x4e:
587 case 0x4f:
588 /* Got a REX prefix, read next byte. */
589 rex = op;
590 if (target_read_memory (pc + 1, &op, 1) != 0)
591 return -1;
592
593 if (op == 0xff)
594 {
595 /* rex jmp reg */
596 gdb_byte op1;
597 unsigned int reg;
598 gdb_byte buf[8];
599
600 if (target_read_memory (pc + 2, &op1, 1) != 0)
601 return -1;
602 return (op1 & 0xf8) == 0xe0;
603 }
604 else
605 return 0;
606
607 default:
608 /* Not REX, so unknown. */
609 return 0;
610 }
611 }
612
613 /* Decode and execute unwind insns at UNWIND_INFO. */
614
615 static void
616 amd64_windows_frame_decode_insns (struct frame_info *this_frame,
617 struct amd64_windows_frame_cache *cache,
618 CORE_ADDR unwind_info)
619 {
620 CORE_ADDR save_addr = 0;
621 CORE_ADDR cur_sp = cache->sp;
622 struct gdbarch *gdbarch = get_frame_arch (this_frame);
623 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
624 int first = 1;
625
626 /* There are at least 3 possibilities to share an unwind info entry:
627 1. Two different runtime_function entries (in .pdata) can point to the
628 same unwind info entry. There is no such indication while unwinding,
629 so we don't really care about that case. We suppose this scheme is
630 used to save memory when the unwind entries are exactly the same.
631 2. Chained unwind_info entries, with no unwind codes (no prologue).
632 There is a major difference with the previous case: the pc range for
633 the function is different (in case 1, the pc range comes from the
634 runtime_function entry; in case 2, the pc range for the chained entry
635 comes from the first unwind entry). Case 1 cannot be used instead as
636 the pc is not in the prologue. This case is officially documented.
637 (There might be unwind code in the first unwind entry to handle
638 additional unwinding). GCC (at least until gcc 5.0) doesn't chain
639 entries.
640 3. Undocumented unwind info redirection. Hard to know the exact purpose,
641 so it is considered as a memory optimization of case 2.
642 */
643
644 if (unwind_info & 1)
645 {
646 /* Unofficially documented unwind info redirection, when UNWIND_INFO
647 address is odd (http://www.codemachine.com/article_x64deepdive.html).
648 */
649 struct external_pex64_runtime_function d;
650 CORE_ADDR sa, ea;
651
652 if (target_read_memory (cache->image_base + (unwind_info & ~1),
653 (gdb_byte *) &d, sizeof (d)) != 0)
654 return;
655
656 cache->start_rva
657 = extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
658 cache->end_rva
659 = extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
660 unwind_info
661 = extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);
662 }
663
664 while (1)
665 {
666 struct external_pex64_unwind_info ex_ui;
667 /* There are at most 256 16-bit unwind insns. */
668 gdb_byte insns[2 * 256];
669 gdb_byte *p;
670 gdb_byte *end_insns;
671 unsigned char codes_count;
672 unsigned char frame_reg;
673 unsigned char frame_off;
674 CORE_ADDR start;
675
676 /* Read and decode header. */
677 if (target_read_memory (cache->image_base + unwind_info,
678 (gdb_byte *) &ex_ui, sizeof (ex_ui)) != 0)
679 return;
680
681 if (frame_debug)
682 fprintf_unfiltered
683 (gdb_stdlog,
684 "amd64_windows_frame_decodes_insn: "
685 "%s: ver: %02x, plgsz: %02x, cnt: %02x, frame: %02x\n",
686 paddress (gdbarch, unwind_info),
687 ex_ui.Version_Flags, ex_ui.SizeOfPrologue,
688 ex_ui.CountOfCodes, ex_ui.FrameRegisterOffset);
689
690 /* Check version. */
691 if (PEX64_UWI_VERSION (ex_ui.Version_Flags) != 1
692 && PEX64_UWI_VERSION (ex_ui.Version_Flags) != 2)
693 return;
694
695 start = cache->image_base + cache->start_rva;
696 if (first
697 && !(cache->pc >= start && cache->pc < start + ex_ui.SizeOfPrologue))
698 {
699 /* We want to detect if the PC points to an epilogue. This needs
700 to be checked only once, and an epilogue can be anywhere but in
701 the prologue. If so, the epilogue detection+decoding function is
702 sufficient. Otherwise, the unwinder will consider that the PC
703 is in the body of the function and will need to decode unwind
704 info. */
705 if (amd64_windows_frame_decode_epilogue (this_frame, cache) == 1)
706 return;
707
708 /* Not in an epilog. Clear possible side effects. */
709 memset (cache->prev_reg_addr, 0, sizeof (cache->prev_reg_addr));
710 }
711
712 codes_count = ex_ui.CountOfCodes;
713 frame_reg = PEX64_UWI_FRAMEREG (ex_ui.FrameRegisterOffset);
714
715 if (frame_reg != 0)
716 {
717 /* According to msdn:
718 If an FP reg is used, then any unwind code taking an offset must
719 only be used after the FP reg is established in the prolog. */
720 gdb_byte buf[8];
721 int frreg = amd64_windows_w2gdb_regnum[frame_reg];
722
723 get_frame_register (this_frame, frreg, buf);
724 save_addr = extract_unsigned_integer (buf, 8, byte_order);
725
726 if (frame_debug)
727 fprintf_unfiltered (gdb_stdlog, " frame_reg=%s, val=%s\n",
728 gdbarch_register_name (gdbarch, frreg),
729 paddress (gdbarch, save_addr));
730 }
731
732 /* Read opcodes. */
733 if (codes_count != 0
734 && target_read_memory (cache->image_base + unwind_info
735 + sizeof (ex_ui),
736 insns, codes_count * 2) != 0)
737 return;
738
739 end_insns = &insns[codes_count * 2];
740 p = insns;
741
742 /* Skip opcodes 6 of version 2. This opcode is not documented. */
743 if (PEX64_UWI_VERSION (ex_ui.Version_Flags) == 2)
744 {
745 for (; p < end_insns; p += 2)
746 if (PEX64_UNWCODE_CODE (p[1]) != 6)
747 break;
748 }
749
750 for (; p < end_insns; p += 2)
751 {
752 int reg;
753
754 /* Virtually execute the operation if the pc is after the
755 corresponding instruction (that does matter in case of break
756 within the prologue). Note that for chained info (!first), the
757 prologue has been fully executed. */
758 if (cache->pc >= start + p[0] || cache->pc < start)
759 {
760 if (frame_debug)
761 fprintf_unfiltered
762 (gdb_stdlog, " op #%u: off=0x%02x, insn=0x%02x\n",
763 (unsigned) (p - insns), p[0], p[1]);
764
765 /* If there is no frame registers defined, the current value of
766 rsp is used instead. */
767 if (frame_reg == 0)
768 save_addr = cur_sp;
769
770 reg = -1;
771
772 switch (PEX64_UNWCODE_CODE (p[1]))
773 {
774 case UWOP_PUSH_NONVOL:
775 /* Push pre-decrements RSP. */
776 reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
777 cache->prev_reg_addr[reg] = cur_sp;
778 cur_sp += 8;
779 break;
780 case UWOP_ALLOC_LARGE:
781 if (PEX64_UNWCODE_INFO (p[1]) == 0)
782 cur_sp +=
783 8 * extract_unsigned_integer (p + 2, 2, byte_order);
784 else if (PEX64_UNWCODE_INFO (p[1]) == 1)
785 cur_sp += extract_unsigned_integer (p + 2, 4, byte_order);
786 else
787 return;
788 break;
789 case UWOP_ALLOC_SMALL:
790 cur_sp += 8 + 8 * PEX64_UNWCODE_INFO (p[1]);
791 break;
792 case UWOP_SET_FPREG:
793 cur_sp = save_addr
794 - PEX64_UWI_FRAMEOFF (ex_ui.FrameRegisterOffset) * 16;
795 break;
796 case UWOP_SAVE_NONVOL:
797 reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
798 cache->prev_reg_addr[reg] = save_addr
799 + 8 * extract_unsigned_integer (p + 2, 2, byte_order);
800 break;
801 case UWOP_SAVE_NONVOL_FAR:
802 reg = amd64_windows_w2gdb_regnum[PEX64_UNWCODE_INFO (p[1])];
803 cache->prev_reg_addr[reg] = save_addr
804 + 8 * extract_unsigned_integer (p + 2, 4, byte_order);
805 break;
806 case UWOP_SAVE_XMM128:
807 cache->prev_xmm_addr[PEX64_UNWCODE_INFO (p[1])] =
808 save_addr
809 - 16 * extract_unsigned_integer (p + 2, 2, byte_order);
810 break;
811 case UWOP_SAVE_XMM128_FAR:
812 cache->prev_xmm_addr[PEX64_UNWCODE_INFO (p[1])] =
813 save_addr
814 - 16 * extract_unsigned_integer (p + 2, 4, byte_order);
815 break;
816 case UWOP_PUSH_MACHFRAME:
817 if (PEX64_UNWCODE_INFO (p[1]) == 0)
818 {
819 cache->prev_rip_addr = cur_sp + 0;
820 cache->prev_rsp_addr = cur_sp + 24;
821 cur_sp += 40;
822 }
823 else if (PEX64_UNWCODE_INFO (p[1]) == 1)
824 {
825 cache->prev_rip_addr = cur_sp + 8;
826 cache->prev_rsp_addr = cur_sp + 32;
827 cur_sp += 48;
828 }
829 else
830 return;
831 break;
832 default:
833 return;
834 }
835
836 /* Display address where the register was saved. */
837 if (frame_debug && reg >= 0)
838 fprintf_unfiltered
839 (gdb_stdlog, " [reg %s at %s]\n",
840 gdbarch_register_name (gdbarch, reg),
841 paddress (gdbarch, cache->prev_reg_addr[reg]));
842 }
843
844 /* Adjust with the length of the opcode. */
845 switch (PEX64_UNWCODE_CODE (p[1]))
846 {
847 case UWOP_PUSH_NONVOL:
848 case UWOP_ALLOC_SMALL:
849 case UWOP_SET_FPREG:
850 case UWOP_PUSH_MACHFRAME:
851 break;
852 case UWOP_ALLOC_LARGE:
853 if (PEX64_UNWCODE_INFO (p[1]) == 0)
854 p += 2;
855 else if (PEX64_UNWCODE_INFO (p[1]) == 1)
856 p += 4;
857 else
858 return;
859 break;
860 case UWOP_SAVE_NONVOL:
861 case UWOP_SAVE_XMM128:
862 p += 2;
863 break;
864 case UWOP_SAVE_NONVOL_FAR:
865 case UWOP_SAVE_XMM128_FAR:
866 p += 4;
867 break;
868 default:
869 return;
870 }
871 }
872 if (PEX64_UWI_FLAGS (ex_ui.Version_Flags) != UNW_FLAG_CHAININFO)
873 {
874 /* End of unwind info. */
875 break;
876 }
877 else
878 {
879 /* Read the chained unwind info. */
880 struct external_pex64_runtime_function d;
881 CORE_ADDR chain_vma;
882
883 /* Not anymore the first entry. */
884 first = 0;
885
886 /* Stay aligned on word boundary. */
887 chain_vma = cache->image_base + unwind_info
888 + sizeof (ex_ui) + ((codes_count + 1) & ~1) * 2;
889
890 if (target_read_memory (chain_vma, (gdb_byte *) &d, sizeof (d)) != 0)
891 return;
892
893 /* Decode begin/end. This may be different from .pdata index, as
894 an unwind info may be shared by several functions (in particular
895 if many functions have the same prolog and handler. */
896 cache->start_rva =
897 extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
898 cache->end_rva =
899 extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
900 unwind_info =
901 extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);
902
903 if (frame_debug)
904 fprintf_unfiltered
905 (gdb_stdlog,
906 "amd64_windows_frame_decodes_insn (next in chain):"
907 " unwind_data=%s, start_rva=%s, end_rva=%s\n",
908 paddress (gdbarch, unwind_info),
909 paddress (gdbarch, cache->start_rva),
910 paddress (gdbarch, cache->end_rva));
911 }
912
913 /* Allow the user to break this loop. */
914 QUIT;
915 }
916 /* PC is saved by the call. */
917 if (cache->prev_rip_addr == 0)
918 cache->prev_rip_addr = cur_sp;
919 cache->prev_sp = cur_sp + 8;
920
921 if (frame_debug)
922 fprintf_unfiltered (gdb_stdlog, " prev_sp: %s, prev_pc @%s\n",
923 paddress (gdbarch, cache->prev_sp),
924 paddress (gdbarch, cache->prev_rip_addr));
925 }
926
927 /* Find SEH unwind info for PC, returning 0 on success.
928
929 UNWIND_INFO is set to the rva of unwind info address, IMAGE_BASE
930 to the base address of the corresponding image, and START_RVA
931 to the rva of the function containing PC. */
932
933 static int
934 amd64_windows_find_unwind_info (struct gdbarch *gdbarch, CORE_ADDR pc,
935 CORE_ADDR *unwind_info,
936 CORE_ADDR *image_base,
937 CORE_ADDR *start_rva,
938 CORE_ADDR *end_rva)
939 {
940 struct obj_section *sec;
941 pe_data_type *pe;
942 IMAGE_DATA_DIRECTORY *dir;
943 struct objfile *objfile;
944 unsigned long lo, hi;
945 CORE_ADDR base;
946 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
947
948 /* Get the corresponding exception directory. */
949 sec = find_pc_section (pc);
950 if (sec == NULL)
951 return -1;
952 objfile = sec->objfile;
953 pe = pe_data (sec->objfile->obfd);
954 dir = &pe->pe_opthdr.DataDirectory[PE_EXCEPTION_TABLE];
955
956 base = pe->pe_opthdr.ImageBase
957 + ANOFFSET (objfile->section_offsets, SECT_OFF_TEXT (objfile));
958 *image_base = base;
959
960 /* Find the entry.
961
962 Note: This does not handle dynamically added entries (for JIT
963 engines). For this, we would need to ask the kernel directly,
964 which means getting some info from the native layer. For the
965 rest of the code, however, it's probably faster to search
966 the entry ourselves. */
967 lo = 0;
968 hi = dir->Size / sizeof (struct external_pex64_runtime_function);
969 *unwind_info = 0;
970 while (lo <= hi)
971 {
972 unsigned long mid = lo + (hi - lo) / 2;
973 struct external_pex64_runtime_function d;
974 CORE_ADDR sa, ea;
975
976 if (target_read_memory (base + dir->VirtualAddress + mid * sizeof (d),
977 (gdb_byte *) &d, sizeof (d)) != 0)
978 return -1;
979
980 sa = extract_unsigned_integer (d.rva_BeginAddress, 4, byte_order);
981 ea = extract_unsigned_integer (d.rva_EndAddress, 4, byte_order);
982 if (pc < base + sa)
983 hi = mid - 1;
984 else if (pc >= base + ea)
985 lo = mid + 1;
986 else if (pc >= base + sa && pc < base + ea)
987 {
988 /* Got it. */
989 *start_rva = sa;
990 *end_rva = ea;
991 *unwind_info =
992 extract_unsigned_integer (d.rva_UnwindData, 4, byte_order);
993 break;
994 }
995 else
996 break;
997 }
998
999 if (frame_debug)
1000 fprintf_unfiltered
1001 (gdb_stdlog,
1002 "amd64_windows_find_unwind_data: image_base=%s, unwind_data=%s\n",
1003 paddress (gdbarch, base), paddress (gdbarch, *unwind_info));
1004
1005 return 0;
1006 }
1007
1008 /* Fill THIS_CACHE using the native amd64-windows unwinding data
1009 for THIS_FRAME. */
1010
1011 static struct amd64_windows_frame_cache *
1012 amd64_windows_frame_cache (struct frame_info *this_frame, void **this_cache)
1013 {
1014 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1015 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1016 struct amd64_windows_frame_cache *cache;
1017 gdb_byte buf[8];
1018 struct obj_section *sec;
1019 pe_data_type *pe;
1020 IMAGE_DATA_DIRECTORY *dir;
1021 CORE_ADDR image_base;
1022 CORE_ADDR pc;
1023 struct objfile *objfile;
1024 unsigned long lo, hi;
1025 CORE_ADDR unwind_info = 0;
1026
1027 if (*this_cache)
1028 return *this_cache;
1029
1030 cache = FRAME_OBSTACK_ZALLOC (struct amd64_windows_frame_cache);
1031 *this_cache = cache;
1032
1033 /* Get current PC and SP. */
1034 pc = get_frame_pc (this_frame);
1035 get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1036 cache->sp = extract_unsigned_integer (buf, 8, byte_order);
1037 cache->pc = pc;
1038
1039 if (amd64_windows_find_unwind_info (gdbarch, pc, &unwind_info,
1040 &cache->image_base,
1041 &cache->start_rva,
1042 &cache->end_rva))
1043 return cache;
1044
1045 if (unwind_info == 0)
1046 {
1047 /* Assume a leaf function. */
1048 cache->prev_sp = cache->sp + 8;
1049 cache->prev_rip_addr = cache->sp;
1050 }
1051 else
1052 {
1053 /* Decode unwind insns to compute saved addresses. */
1054 amd64_windows_frame_decode_insns (this_frame, cache, unwind_info);
1055 }
1056 return cache;
1057 }
1058
1059 /* Implement the "prev_register" method of struct frame_unwind
1060 using the standard Windows x64 SEH info. */
1061
1062 static struct value *
1063 amd64_windows_frame_prev_register (struct frame_info *this_frame,
1064 void **this_cache, int regnum)
1065 {
1066 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1067 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1068 struct amd64_windows_frame_cache *cache =
1069 amd64_windows_frame_cache (this_frame, this_cache);
1070 struct value *val;
1071 CORE_ADDR prev;
1072
1073 if (frame_debug)
1074 fprintf_unfiltered (gdb_stdlog,
1075 "amd64_windows_frame_prev_register %s for sp=%s\n",
1076 gdbarch_register_name (gdbarch, regnum),
1077 paddress (gdbarch, cache->prev_sp));
1078
1079 if (regnum >= AMD64_XMM0_REGNUM && regnum <= AMD64_XMM0_REGNUM + 15)
1080 prev = cache->prev_xmm_addr[regnum - AMD64_XMM0_REGNUM];
1081 else if (regnum == AMD64_RSP_REGNUM)
1082 {
1083 prev = cache->prev_rsp_addr;
1084 if (prev == 0)
1085 return frame_unwind_got_constant (this_frame, regnum, cache->prev_sp);
1086 }
1087 else if (regnum >= AMD64_RAX_REGNUM && regnum <= AMD64_R15_REGNUM)
1088 prev = cache->prev_reg_addr[regnum - AMD64_RAX_REGNUM];
1089 else if (regnum == AMD64_RIP_REGNUM)
1090 prev = cache->prev_rip_addr;
1091 else
1092 prev = 0;
1093
1094 if (prev && frame_debug)
1095 fprintf_unfiltered (gdb_stdlog, " -> at %s\n", paddress (gdbarch, prev));
1096
1097 if (prev)
1098 {
1099 /* Register was saved. */
1100 return frame_unwind_got_memory (this_frame, regnum, prev);
1101 }
1102 else
1103 {
1104 /* Register is either volatile or not modified. */
1105 return frame_unwind_got_register (this_frame, regnum, regnum);
1106 }
1107 }
1108
1109 /* Implement the "this_id" method of struct frame_unwind using
1110 the standard Windows x64 SEH info. */
1111
1112 static void
1113 amd64_windows_frame_this_id (struct frame_info *this_frame, void **this_cache,
1114 struct frame_id *this_id)
1115 {
1116 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1117 struct amd64_windows_frame_cache *cache =
1118 amd64_windows_frame_cache (this_frame, this_cache);
1119
1120 *this_id = frame_id_build (cache->prev_sp,
1121 cache->image_base + cache->start_rva);
1122 }
1123
1124 /* Windows x64 SEH unwinder. */
1125
1126 static const struct frame_unwind amd64_windows_frame_unwind =
1127 {
1128 NORMAL_FRAME,
1129 default_frame_unwind_stop_reason,
1130 &amd64_windows_frame_this_id,
1131 &amd64_windows_frame_prev_register,
1132 NULL,
1133 default_frame_sniffer
1134 };
1135
1136 /* Implement the "skip_prologue" gdbarch method. */
1137
1138 static CORE_ADDR
1139 amd64_windows_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1140 {
1141 CORE_ADDR func_addr;
1142 CORE_ADDR unwind_info = 0;
1143 CORE_ADDR image_base, start_rva, end_rva;
1144 struct external_pex64_unwind_info ex_ui;
1145
1146 /* Use prologue size from unwind info. */
1147 if (amd64_windows_find_unwind_info (gdbarch, pc, &unwind_info,
1148 &image_base, &start_rva, &end_rva) == 0)
1149 {
1150 if (unwind_info == 0)
1151 {
1152 /* Leaf function. */
1153 return pc;
1154 }
1155 else if (target_read_memory (image_base + unwind_info,
1156 (gdb_byte *) &ex_ui, sizeof (ex_ui)) == 0
1157 && PEX64_UWI_VERSION (ex_ui.Version_Flags) == 1)
1158 return max (pc, image_base + start_rva + ex_ui.SizeOfPrologue);
1159 }
1160
1161 /* See if we can determine the end of the prologue via the symbol
1162 table. If so, then return either the PC, or the PC after
1163 the prologue, whichever is greater. */
1164 if (find_pc_partial_function (pc, NULL, &func_addr, NULL))
1165 {
1166 CORE_ADDR post_prologue_pc
1167 = skip_prologue_using_sal (gdbarch, func_addr);
1168
1169 if (post_prologue_pc != 0)
1170 return max (pc, post_prologue_pc);
1171 }
1172
1173 return pc;
1174 }
1175
1176 /* Check Win64 DLL jmp trampolines and find jump destination. */
1177
1178 static CORE_ADDR
1179 amd64_windows_skip_trampoline_code (struct frame_info *frame, CORE_ADDR pc)
1180 {
1181 CORE_ADDR destination = 0;
1182 struct gdbarch *gdbarch = get_frame_arch (frame);
1183 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1184
1185 /* Check for jmp *<offset>(%rip) (jump near, absolute indirect (/4)). */
1186 if (pc && read_memory_unsigned_integer (pc, 2, byte_order) == 0x25ff)
1187 {
1188 /* Get opcode offset and see if we can find a reference in our data. */
1189 ULONGEST offset
1190 = read_memory_unsigned_integer (pc + 2, 4, byte_order);
1191
1192 /* Get address of function pointer at end of pc. */
1193 CORE_ADDR indirect_addr = pc + offset + 6;
1194
1195 struct minimal_symbol *indsym
1196 = (indirect_addr
1197 ? lookup_minimal_symbol_by_pc (indirect_addr).minsym
1198 : NULL);
1199 const char *symname = indsym ? MSYMBOL_LINKAGE_NAME (indsym) : NULL;
1200
1201 if (symname)
1202 {
1203 if (startswith (symname, "__imp_")
1204 || startswith (symname, "_imp_"))
1205 destination
1206 = read_memory_unsigned_integer (indirect_addr, 8, byte_order);
1207 }
1208 }
1209
1210 return destination;
1211 }
1212
1213 /* Implement the "auto_wide_charset" gdbarch method. */
1214
1215 static const char *
1216 amd64_windows_auto_wide_charset (void)
1217 {
1218 return "UTF-16";
1219 }
1220
1221 static void
1222 amd64_windows_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
1223 {
1224 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1225
1226 /* The dwarf2 unwinder (appended very early by i386_gdbarch_init) is
1227 preferred over the SEH one. The reasons are:
1228 - binaries without SEH but with dwarf2 debug info are correcly handled
1229 (although they aren't ABI compliant, gcc before 4.7 didn't emit SEH
1230 info).
1231 - dwarf3 DW_OP_call_frame_cfa is correctly handled (it can only be
1232 handled if the dwarf2 unwinder is used).
1233
1234 The call to amd64_init_abi appends default unwinders, that aren't
1235 compatible with the SEH one.
1236 */
1237 frame_unwind_append_unwinder (gdbarch, &amd64_windows_frame_unwind);
1238
1239 amd64_init_abi (info, gdbarch);
1240
1241 windows_init_abi (info, gdbarch);
1242
1243 /* On Windows, "long"s are only 32bit. */
1244 set_gdbarch_long_bit (gdbarch, 32);
1245
1246 /* Function calls. */
1247 set_gdbarch_push_dummy_call (gdbarch, amd64_windows_push_dummy_call);
1248 set_gdbarch_return_value (gdbarch, amd64_windows_return_value);
1249 set_gdbarch_skip_main_prologue (gdbarch, amd64_skip_main_prologue);
1250 set_gdbarch_skip_trampoline_code (gdbarch,
1251 amd64_windows_skip_trampoline_code);
1252
1253 set_gdbarch_skip_prologue (gdbarch, amd64_windows_skip_prologue);
1254
1255 set_gdbarch_auto_wide_charset (gdbarch, amd64_windows_auto_wide_charset);
1256 }
1257
1258 /* -Wmissing-prototypes */
1259 extern initialize_file_ftype _initialize_amd64_windows_tdep;
1260
1261 void
1262 _initialize_amd64_windows_tdep (void)
1263 {
1264 gdbarch_register_osabi (bfd_arch_i386, bfd_mach_x86_64, GDB_OSABI_CYGWIN,
1265 amd64_windows_init_abi);
1266 }