]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/ia64-tdep.c
internal_error: remove need to pass __FILE__/__LINE__
[thirdparty/binutils-gdb.git] / gdb / ia64-tdep.c
1 /* Target-dependent code for the IA-64 for GDB, the GNU debugger.
2
3 Copyright (C) 1999-2022 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "gdbcore.h"
23 #include "arch-utils.h"
24 #include "floatformat.h"
25 #include "gdbtypes.h"
26 #include "regcache.h"
27 #include "reggroups.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "target-float.h"
32 #include "value.h"
33 #include "objfiles.h"
34 #include "elf/common.h" /* for DT_PLTGOT value */
35 #include "elf-bfd.h"
36 #include "dis-asm.h"
37 #include "infcall.h"
38 #include "osabi.h"
39 #include "ia64-tdep.h"
40 #include "cp-abi.h"
41
42 #ifdef HAVE_LIBUNWIND_IA64_H
43 #include "elf/ia64.h" /* for PT_IA_64_UNWIND value */
44 #include "ia64-libunwind-tdep.h"
45
46 /* Note: KERNEL_START is supposed to be an address which is not going
47 to ever contain any valid unwind info. For ia64 linux, the choice
48 of 0xc000000000000000 is fairly safe since that's uncached space.
49
50 We use KERNEL_START as follows: after obtaining the kernel's
51 unwind table via getunwind(), we project its unwind data into
52 address-range KERNEL_START-(KERNEL_START+ktab_size) and then
53 when ia64_access_mem() sees a memory access to this
54 address-range, we redirect it to ktab instead.
55
56 None of this hackery is needed with a modern kernel/libcs
57 which uses the kernel virtual DSO to provide access to the
58 kernel's unwind info. In that case, ktab_size remains 0 and
59 hence the value of KERNEL_START doesn't matter. */
60
61 #define KERNEL_START 0xc000000000000000ULL
62
63 static size_t ktab_size = 0;
64 struct ia64_table_entry
65 {
66 uint64_t start_offset;
67 uint64_t end_offset;
68 uint64_t info_offset;
69 };
70
71 static struct ia64_table_entry *ktab = NULL;
72 static gdb::optional<gdb::byte_vector> ktab_buf;
73
74 #endif
75
76 /* An enumeration of the different IA-64 instruction types. */
77
78 enum ia64_instruction_type
79 {
80 A, /* Integer ALU ; I-unit or M-unit */
81 I, /* Non-ALU integer; I-unit */
82 M, /* Memory ; M-unit */
83 F, /* Floating-point ; F-unit */
84 B, /* Branch ; B-unit */
85 L, /* Extended (L+X) ; I-unit */
86 X, /* Extended (L+X) ; I-unit */
87 undefined /* undefined or reserved */
88 };
89
90 /* We represent IA-64 PC addresses as the value of the instruction
91 pointer or'd with some bit combination in the low nibble which
92 represents the slot number in the bundle addressed by the
93 instruction pointer. The problem is that the Linux kernel
94 multiplies its slot numbers (for exceptions) by one while the
95 disassembler multiplies its slot numbers by 6. In addition, I've
96 heard it said that the simulator uses 1 as the multiplier.
97
98 I've fixed the disassembler so that the bytes_per_line field will
99 be the slot multiplier. If bytes_per_line comes in as zero, it
100 is set to six (which is how it was set up initially). -- objdump
101 displays pretty disassembly dumps with this value. For our purposes,
102 we'll set bytes_per_line to SLOT_MULTIPLIER. This is okay since we
103 never want to also display the raw bytes the way objdump does. */
104
105 #define SLOT_MULTIPLIER 1
106
107 /* Length in bytes of an instruction bundle. */
108
109 #define BUNDLE_LEN 16
110
111 /* See the saved memory layout comment for ia64_memory_insert_breakpoint. */
112
113 #if BREAKPOINT_MAX < BUNDLE_LEN - 2
114 # error "BREAKPOINT_MAX < BUNDLE_LEN - 2"
115 #endif
116
117 static gdbarch_init_ftype ia64_gdbarch_init;
118
119 static gdbarch_register_name_ftype ia64_register_name;
120 static gdbarch_register_type_ftype ia64_register_type;
121 static gdbarch_breakpoint_from_pc_ftype ia64_breakpoint_from_pc;
122 static gdbarch_skip_prologue_ftype ia64_skip_prologue;
123 static struct type *is_float_or_hfa_type (struct type *t);
124 static CORE_ADDR ia64_find_global_pointer (struct gdbarch *gdbarch,
125 CORE_ADDR faddr);
126
127 #define NUM_IA64_RAW_REGS 462
128
129 /* Big enough to hold a FP register in bytes. */
130 #define IA64_FP_REGISTER_SIZE 16
131
132 static int sp_regnum = IA64_GR12_REGNUM;
133
134 /* NOTE: we treat the register stack registers r32-r127 as
135 pseudo-registers because they may not be accessible via the ptrace
136 register get/set interfaces. */
137
138 enum pseudo_regs { FIRST_PSEUDO_REGNUM = NUM_IA64_RAW_REGS,
139 VBOF_REGNUM = IA64_NAT127_REGNUM + 1, V32_REGNUM,
140 V127_REGNUM = V32_REGNUM + 95,
141 VP0_REGNUM, VP16_REGNUM = VP0_REGNUM + 16,
142 VP63_REGNUM = VP0_REGNUM + 63, LAST_PSEUDO_REGNUM };
143
144 /* Array of register names; There should be ia64_num_regs strings in
145 the initializer. */
146
147 static const char * const ia64_register_names[] =
148 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
149 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
150 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
151 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
152 "", "", "", "", "", "", "", "",
153 "", "", "", "", "", "", "", "",
154 "", "", "", "", "", "", "", "",
155 "", "", "", "", "", "", "", "",
156 "", "", "", "", "", "", "", "",
157 "", "", "", "", "", "", "", "",
158 "", "", "", "", "", "", "", "",
159 "", "", "", "", "", "", "", "",
160 "", "", "", "", "", "", "", "",
161 "", "", "", "", "", "", "", "",
162 "", "", "", "", "", "", "", "",
163 "", "", "", "", "", "", "", "",
164
165 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
166 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
167 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
168 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
169 "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
170 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47",
171 "f48", "f49", "f50", "f51", "f52", "f53", "f54", "f55",
172 "f56", "f57", "f58", "f59", "f60", "f61", "f62", "f63",
173 "f64", "f65", "f66", "f67", "f68", "f69", "f70", "f71",
174 "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
175 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87",
176 "f88", "f89", "f90", "f91", "f92", "f93", "f94", "f95",
177 "f96", "f97", "f98", "f99", "f100", "f101", "f102", "f103",
178 "f104", "f105", "f106", "f107", "f108", "f109", "f110", "f111",
179 "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
180 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127",
181
182 "", "", "", "", "", "", "", "",
183 "", "", "", "", "", "", "", "",
184 "", "", "", "", "", "", "", "",
185 "", "", "", "", "", "", "", "",
186 "", "", "", "", "", "", "", "",
187 "", "", "", "", "", "", "", "",
188 "", "", "", "", "", "", "", "",
189 "", "", "", "", "", "", "", "",
190
191 "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7",
192
193 "vfp", "vrap",
194
195 "pr", "ip", "psr", "cfm",
196
197 "kr0", "kr1", "kr2", "kr3", "kr4", "kr5", "kr6", "kr7",
198 "", "", "", "", "", "", "", "",
199 "rsc", "bsp", "bspstore", "rnat",
200 "", "fcr", "", "",
201 "eflag", "csd", "ssd", "cflg", "fsr", "fir", "fdr", "",
202 "ccv", "", "", "", "unat", "", "", "",
203 "fpsr", "", "", "", "itc",
204 "", "", "", "", "", "", "", "", "", "",
205 "", "", "", "", "", "", "", "", "",
206 "pfs", "lc", "ec",
207 "", "", "", "", "", "", "", "", "", "",
208 "", "", "", "", "", "", "", "", "", "",
209 "", "", "", "", "", "", "", "", "", "",
210 "", "", "", "", "", "", "", "", "", "",
211 "", "", "", "", "", "", "", "", "", "",
212 "", "", "", "", "", "", "", "", "", "",
213 "",
214 "nat0", "nat1", "nat2", "nat3", "nat4", "nat5", "nat6", "nat7",
215 "nat8", "nat9", "nat10", "nat11", "nat12", "nat13", "nat14", "nat15",
216 "nat16", "nat17", "nat18", "nat19", "nat20", "nat21", "nat22", "nat23",
217 "nat24", "nat25", "nat26", "nat27", "nat28", "nat29", "nat30", "nat31",
218 "nat32", "nat33", "nat34", "nat35", "nat36", "nat37", "nat38", "nat39",
219 "nat40", "nat41", "nat42", "nat43", "nat44", "nat45", "nat46", "nat47",
220 "nat48", "nat49", "nat50", "nat51", "nat52", "nat53", "nat54", "nat55",
221 "nat56", "nat57", "nat58", "nat59", "nat60", "nat61", "nat62", "nat63",
222 "nat64", "nat65", "nat66", "nat67", "nat68", "nat69", "nat70", "nat71",
223 "nat72", "nat73", "nat74", "nat75", "nat76", "nat77", "nat78", "nat79",
224 "nat80", "nat81", "nat82", "nat83", "nat84", "nat85", "nat86", "nat87",
225 "nat88", "nat89", "nat90", "nat91", "nat92", "nat93", "nat94", "nat95",
226 "nat96", "nat97", "nat98", "nat99", "nat100","nat101","nat102","nat103",
227 "nat104","nat105","nat106","nat107","nat108","nat109","nat110","nat111",
228 "nat112","nat113","nat114","nat115","nat116","nat117","nat118","nat119",
229 "nat120","nat121","nat122","nat123","nat124","nat125","nat126","nat127",
230
231 "bof",
232
233 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
234 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
235 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
236 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
237 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
238 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
239 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
240 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
241 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
242 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
243 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
244 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
245
246 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7",
247 "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
248 "p16", "p17", "p18", "p19", "p20", "p21", "p22", "p23",
249 "p24", "p25", "p26", "p27", "p28", "p29", "p30", "p31",
250 "p32", "p33", "p34", "p35", "p36", "p37", "p38", "p39",
251 "p40", "p41", "p42", "p43", "p44", "p45", "p46", "p47",
252 "p48", "p49", "p50", "p51", "p52", "p53", "p54", "p55",
253 "p56", "p57", "p58", "p59", "p60", "p61", "p62", "p63",
254 };
255
256 struct ia64_frame_cache
257 {
258 CORE_ADDR base; /* frame pointer base for frame */
259 CORE_ADDR pc; /* function start pc for frame */
260 CORE_ADDR saved_sp; /* stack pointer for frame */
261 CORE_ADDR bsp; /* points at r32 for the current frame */
262 CORE_ADDR cfm; /* cfm value for current frame */
263 CORE_ADDR prev_cfm; /* cfm value for previous frame */
264 int frameless;
265 int sof; /* Size of frame (decoded from cfm value). */
266 int sol; /* Size of locals (decoded from cfm value). */
267 int sor; /* Number of rotating registers (decoded from
268 cfm value). */
269 CORE_ADDR after_prologue;
270 /* Address of first instruction after the last
271 prologue instruction; Note that there may
272 be instructions from the function's body
273 intermingled with the prologue. */
274 int mem_stack_frame_size;
275 /* Size of the memory stack frame (may be zero),
276 or -1 if it has not been determined yet. */
277 int fp_reg; /* Register number (if any) used a frame pointer
278 for this frame. 0 if no register is being used
279 as the frame pointer. */
280
281 /* Saved registers. */
282 CORE_ADDR saved_regs[NUM_IA64_RAW_REGS];
283
284 };
285
286 static int
287 floatformat_valid (const struct floatformat *fmt, const void *from)
288 {
289 return 1;
290 }
291
292 static const struct floatformat floatformat_ia64_ext_little =
293 {
294 floatformat_little, 82, 0, 1, 17, 65535, 0x1ffff, 18, 64,
295 floatformat_intbit_yes, "floatformat_ia64_ext_little", floatformat_valid, NULL
296 };
297
298 static const struct floatformat floatformat_ia64_ext_big =
299 {
300 floatformat_big, 82, 46, 47, 17, 65535, 0x1ffff, 64, 64,
301 floatformat_intbit_yes, "floatformat_ia64_ext_big", floatformat_valid
302 };
303
304 static const struct floatformat *floatformats_ia64_ext[2] =
305 {
306 &floatformat_ia64_ext_big,
307 &floatformat_ia64_ext_little
308 };
309
310 static struct type *
311 ia64_ext_type (struct gdbarch *gdbarch)
312 {
313 ia64_gdbarch_tdep *tdep = gdbarch_tdep<ia64_gdbarch_tdep> (gdbarch);
314
315 if (!tdep->ia64_ext_type)
316 tdep->ia64_ext_type
317 = arch_float_type (gdbarch, 128, "builtin_type_ia64_ext",
318 floatformats_ia64_ext);
319
320 return tdep->ia64_ext_type;
321 }
322
323 static int
324 ia64_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
325 const struct reggroup *group)
326 {
327 int vector_p;
328 int float_p;
329 int raw_p;
330 if (group == all_reggroup)
331 return 1;
332 vector_p = register_type (gdbarch, regnum)->is_vector ();
333 float_p = register_type (gdbarch, regnum)->code () == TYPE_CODE_FLT;
334 raw_p = regnum < NUM_IA64_RAW_REGS;
335 if (group == float_reggroup)
336 return float_p;
337 if (group == vector_reggroup)
338 return vector_p;
339 if (group == general_reggroup)
340 return (!vector_p && !float_p);
341 if (group == save_reggroup || group == restore_reggroup)
342 return raw_p;
343 return 0;
344 }
345
346 static const char *
347 ia64_register_name (struct gdbarch *gdbarch, int reg)
348 {
349 return ia64_register_names[reg];
350 }
351
352 struct type *
353 ia64_register_type (struct gdbarch *arch, int reg)
354 {
355 if (reg >= IA64_FR0_REGNUM && reg <= IA64_FR127_REGNUM)
356 return ia64_ext_type (arch);
357 else
358 return builtin_type (arch)->builtin_long;
359 }
360
361 static int
362 ia64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
363 {
364 if (reg >= IA64_GR32_REGNUM && reg <= IA64_GR127_REGNUM)
365 return V32_REGNUM + (reg - IA64_GR32_REGNUM);
366 return reg;
367 }
368
369
370 /* Extract ``len'' bits from an instruction bundle starting at
371 bit ``from''. */
372
373 static long long
374 extract_bit_field (const gdb_byte *bundle, int from, int len)
375 {
376 long long result = 0LL;
377 int to = from + len;
378 int from_byte = from / 8;
379 int to_byte = to / 8;
380 unsigned char *b = (unsigned char *) bundle;
381 unsigned char c;
382 int lshift;
383 int i;
384
385 c = b[from_byte];
386 if (from_byte == to_byte)
387 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
388 result = c >> (from % 8);
389 lshift = 8 - (from % 8);
390
391 for (i = from_byte+1; i < to_byte; i++)
392 {
393 result |= ((long long) b[i]) << lshift;
394 lshift += 8;
395 }
396
397 if (from_byte < to_byte && (to % 8 != 0))
398 {
399 c = b[to_byte];
400 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
401 result |= ((long long) c) << lshift;
402 }
403
404 return result;
405 }
406
407 /* Replace the specified bits in an instruction bundle. */
408
409 static void
410 replace_bit_field (gdb_byte *bundle, long long val, int from, int len)
411 {
412 int to = from + len;
413 int from_byte = from / 8;
414 int to_byte = to / 8;
415 unsigned char *b = (unsigned char *) bundle;
416 unsigned char c;
417
418 if (from_byte == to_byte)
419 {
420 unsigned char left, right;
421 c = b[from_byte];
422 left = (c >> (to % 8)) << (to % 8);
423 right = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
424 c = (unsigned char) (val & 0xff);
425 c = (unsigned char) (c << (from % 8 + 8 - to % 8)) >> (8 - to % 8);
426 c |= right | left;
427 b[from_byte] = c;
428 }
429 else
430 {
431 int i;
432 c = b[from_byte];
433 c = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
434 c = c | (val << (from % 8));
435 b[from_byte] = c;
436 val >>= 8 - from % 8;
437
438 for (i = from_byte+1; i < to_byte; i++)
439 {
440 c = val & 0xff;
441 val >>= 8;
442 b[i] = c;
443 }
444
445 if (to % 8 != 0)
446 {
447 unsigned char cv = (unsigned char) val;
448 c = b[to_byte];
449 c = c >> (to % 8) << (to % 8);
450 c |= ((unsigned char) (cv << (8 - to % 8))) >> (8 - to % 8);
451 b[to_byte] = c;
452 }
453 }
454 }
455
456 /* Return the contents of slot N (for N = 0, 1, or 2) in
457 and instruction bundle. */
458
459 static long long
460 slotN_contents (gdb_byte *bundle, int slotnum)
461 {
462 return extract_bit_field (bundle, 5+41*slotnum, 41);
463 }
464
465 /* Store an instruction in an instruction bundle. */
466
467 static void
468 replace_slotN_contents (gdb_byte *bundle, long long instr, int slotnum)
469 {
470 replace_bit_field (bundle, instr, 5+41*slotnum, 41);
471 }
472
473 static const enum ia64_instruction_type template_encoding_table[32][3] =
474 {
475 { M, I, I }, /* 00 */
476 { M, I, I }, /* 01 */
477 { M, I, I }, /* 02 */
478 { M, I, I }, /* 03 */
479 { M, L, X }, /* 04 */
480 { M, L, X }, /* 05 */
481 { undefined, undefined, undefined }, /* 06 */
482 { undefined, undefined, undefined }, /* 07 */
483 { M, M, I }, /* 08 */
484 { M, M, I }, /* 09 */
485 { M, M, I }, /* 0A */
486 { M, M, I }, /* 0B */
487 { M, F, I }, /* 0C */
488 { M, F, I }, /* 0D */
489 { M, M, F }, /* 0E */
490 { M, M, F }, /* 0F */
491 { M, I, B }, /* 10 */
492 { M, I, B }, /* 11 */
493 { M, B, B }, /* 12 */
494 { M, B, B }, /* 13 */
495 { undefined, undefined, undefined }, /* 14 */
496 { undefined, undefined, undefined }, /* 15 */
497 { B, B, B }, /* 16 */
498 { B, B, B }, /* 17 */
499 { M, M, B }, /* 18 */
500 { M, M, B }, /* 19 */
501 { undefined, undefined, undefined }, /* 1A */
502 { undefined, undefined, undefined }, /* 1B */
503 { M, F, B }, /* 1C */
504 { M, F, B }, /* 1D */
505 { undefined, undefined, undefined }, /* 1E */
506 { undefined, undefined, undefined }, /* 1F */
507 };
508
509 /* Fetch and (partially) decode an instruction at ADDR and return the
510 address of the next instruction to fetch. */
511
512 static CORE_ADDR
513 fetch_instruction (CORE_ADDR addr, ia64_instruction_type *it, long long *instr)
514 {
515 gdb_byte bundle[BUNDLE_LEN];
516 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER;
517 long long templ;
518 int val;
519
520 /* Warn about slot numbers greater than 2. We used to generate
521 an error here on the assumption that the user entered an invalid
522 address. But, sometimes GDB itself requests an invalid address.
523 This can (easily) happen when execution stops in a function for
524 which there are no symbols. The prologue scanner will attempt to
525 find the beginning of the function - if the nearest symbol
526 happens to not be aligned on a bundle boundary (16 bytes), the
527 resulting starting address will cause GDB to think that the slot
528 number is too large.
529
530 So we warn about it and set the slot number to zero. It is
531 not necessarily a fatal condition, particularly if debugging
532 at the assembly language level. */
533 if (slotnum > 2)
534 {
535 warning (_("Can't fetch instructions for slot numbers greater than 2.\n"
536 "Using slot 0 instead"));
537 slotnum = 0;
538 }
539
540 addr &= ~0x0f;
541
542 val = target_read_memory (addr, bundle, BUNDLE_LEN);
543
544 if (val != 0)
545 return 0;
546
547 *instr = slotN_contents (bundle, slotnum);
548 templ = extract_bit_field (bundle, 0, 5);
549 *it = template_encoding_table[(int)templ][slotnum];
550
551 if (slotnum == 2 || (slotnum == 1 && *it == L))
552 addr += 16;
553 else
554 addr += (slotnum + 1) * SLOT_MULTIPLIER;
555
556 return addr;
557 }
558
559 /* There are 5 different break instructions (break.i, break.b,
560 break.m, break.f, and break.x), but they all have the same
561 encoding. (The five bit template in the low five bits of the
562 instruction bundle distinguishes one from another.)
563
564 The runtime architecture manual specifies that break instructions
565 used for debugging purposes must have the upper two bits of the 21
566 bit immediate set to a 0 and a 1 respectively. A breakpoint
567 instruction encodes the most significant bit of its 21 bit
568 immediate at bit 36 of the 41 bit instruction. The penultimate msb
569 is at bit 25 which leads to the pattern below.
570
571 Originally, I had this set up to do, e.g, a "break.i 0x80000" But
572 it turns out that 0x80000 was used as the syscall break in the early
573 simulators. So I changed the pattern slightly to do "break.i 0x080001"
574 instead. But that didn't work either (I later found out that this
575 pattern was used by the simulator that I was using.) So I ended up
576 using the pattern seen below.
577
578 SHADOW_CONTENTS has byte-based addressing (PLACED_ADDRESS and SHADOW_LEN)
579 while we need bit-based addressing as the instructions length is 41 bits and
580 we must not modify/corrupt the adjacent slots in the same bundle.
581 Fortunately we may store larger memory incl. the adjacent bits with the
582 original memory content (not the possibly already stored breakpoints there).
583 We need to be careful in ia64_memory_remove_breakpoint to always restore
584 only the specific bits of this instruction ignoring any adjacent stored
585 bits.
586
587 We use the original addressing with the low nibble in the range <0..2> which
588 gets incorrectly interpreted by generic non-ia64 breakpoint_restore_shadows
589 as the direct byte offset of SHADOW_CONTENTS. We store whole BUNDLE_LEN
590 bytes just without these two possibly skipped bytes to not to exceed to the
591 next bundle.
592
593 If we would like to store the whole bundle to SHADOW_CONTENTS we would have
594 to store already the base address (`address & ~0x0f') into PLACED_ADDRESS.
595 In such case there is no other place where to store
596 SLOTNUM (`adress & 0x0f', value in the range <0..2>). We need to know
597 SLOTNUM in ia64_memory_remove_breakpoint.
598
599 There is one special case where we need to be extra careful:
600 L-X instructions, which are instructions that occupy 2 slots
601 (The L part is always in slot 1, and the X part is always in
602 slot 2). We must refuse to insert breakpoints for an address
603 that points at slot 2 of a bundle where an L-X instruction is
604 present, since there is logically no instruction at that address.
605 However, to make things more interesting, the opcode of L-X
606 instructions is located in slot 2. This means that, to insert
607 a breakpoint at an address that points to slot 1, we actually
608 need to write the breakpoint in slot 2! Slot 1 is actually
609 the extended operand, so writing the breakpoint there would not
610 have the desired effect. Another side-effect of this issue
611 is that we need to make sure that the shadow contents buffer
612 does save byte 15 of our instruction bundle (this is the tail
613 end of slot 2, which wouldn't be saved if we were to insert
614 the breakpoint in slot 1).
615
616 ia64 16-byte bundle layout:
617 | 5 bits | slot 0 with 41 bits | slot 1 with 41 bits | slot 2 with 41 bits |
618
619 The current addressing used by the code below:
620 original PC placed_address placed_size required covered
621 == bp_tgt->shadow_len reqd \subset covered
622 0xABCDE0 0xABCDE0 0x10 <0x0...0x5> <0x0..0xF>
623 0xABCDE1 0xABCDE1 0xF <0x5...0xA> <0x1..0xF>
624 0xABCDE2 0xABCDE2 0xE <0xA...0xF> <0x2..0xF>
625
626 L-X instructions are treated a little specially, as explained above:
627 0xABCDE1 0xABCDE1 0xF <0xA...0xF> <0x1..0xF>
628
629 `objdump -d' and some other tools show a bit unjustified offsets:
630 original PC byte where starts the instruction objdump offset
631 0xABCDE0 0xABCDE0 0xABCDE0
632 0xABCDE1 0xABCDE5 0xABCDE6
633 0xABCDE2 0xABCDEA 0xABCDEC
634 */
635
636 #define IA64_BREAKPOINT 0x00003333300LL
637
638 static int
639 ia64_memory_insert_breakpoint (struct gdbarch *gdbarch,
640 struct bp_target_info *bp_tgt)
641 {
642 CORE_ADDR addr = bp_tgt->placed_address = bp_tgt->reqstd_address;
643 gdb_byte bundle[BUNDLE_LEN];
644 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
645 long long instr_breakpoint;
646 int val;
647 int templ;
648
649 if (slotnum > 2)
650 error (_("Can't insert breakpoint for slot numbers greater than 2."));
651
652 addr &= ~0x0f;
653
654 /* Enable the automatic memory restoration from breakpoints while
655 we read our instruction bundle for the purpose of SHADOW_CONTENTS.
656 Otherwise, we could possibly store into the shadow parts of the adjacent
657 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
658 breakpoint instruction bits region. */
659 scoped_restore restore_memory_0
660 = make_scoped_restore_show_memory_breakpoints (0);
661 val = target_read_memory (addr, bundle, BUNDLE_LEN);
662 if (val != 0)
663 return val;
664
665 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
666 for addressing the SHADOW_CONTENTS placement. */
667 shadow_slotnum = slotnum;
668
669 /* Always cover the last byte of the bundle in case we are inserting
670 a breakpoint on an L-X instruction. */
671 bp_tgt->shadow_len = BUNDLE_LEN - shadow_slotnum;
672
673 templ = extract_bit_field (bundle, 0, 5);
674 if (template_encoding_table[templ][slotnum] == X)
675 {
676 /* X unit types can only be used in slot 2, and are actually
677 part of a 2-slot L-X instruction. We cannot break at this
678 address, as this is the second half of an instruction that
679 lives in slot 1 of that bundle. */
680 gdb_assert (slotnum == 2);
681 error (_("Can't insert breakpoint for non-existing slot X"));
682 }
683 if (template_encoding_table[templ][slotnum] == L)
684 {
685 /* L unit types can only be used in slot 1. But the associated
686 opcode for that instruction is in slot 2, so bump the slot number
687 accordingly. */
688 gdb_assert (slotnum == 1);
689 slotnum = 2;
690 }
691
692 /* Store the whole bundle, except for the initial skipped bytes by the slot
693 number interpreted as bytes offset in PLACED_ADDRESS. */
694 memcpy (bp_tgt->shadow_contents, bundle + shadow_slotnum,
695 bp_tgt->shadow_len);
696
697 /* Re-read the same bundle as above except that, this time, read it in order
698 to compute the new bundle inside which we will be inserting the
699 breakpoint. Therefore, disable the automatic memory restoration from
700 breakpoints while we read our instruction bundle. Otherwise, the general
701 restoration mechanism kicks in and we would possibly remove parts of the
702 adjacent placed breakpoints. It is due to our SHADOW_CONTENTS overlapping
703 the real breakpoint instruction bits region. */
704 scoped_restore restore_memory_1
705 = make_scoped_restore_show_memory_breakpoints (1);
706 val = target_read_memory (addr, bundle, BUNDLE_LEN);
707 if (val != 0)
708 return val;
709
710 /* Breakpoints already present in the code will get detected and not get
711 reinserted by bp_loc_is_permanent. Multiple breakpoints at the same
712 location cannot induce the internal error as they are optimized into
713 a single instance by update_global_location_list. */
714 instr_breakpoint = slotN_contents (bundle, slotnum);
715 if (instr_breakpoint == IA64_BREAKPOINT)
716 internal_error (_("Address %s already contains a breakpoint."),
717 paddress (gdbarch, bp_tgt->placed_address));
718 replace_slotN_contents (bundle, IA64_BREAKPOINT, slotnum);
719
720 val = target_write_memory (addr + shadow_slotnum, bundle + shadow_slotnum,
721 bp_tgt->shadow_len);
722
723 return val;
724 }
725
726 static int
727 ia64_memory_remove_breakpoint (struct gdbarch *gdbarch,
728 struct bp_target_info *bp_tgt)
729 {
730 CORE_ADDR addr = bp_tgt->placed_address;
731 gdb_byte bundle_mem[BUNDLE_LEN], bundle_saved[BUNDLE_LEN];
732 int slotnum = (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
733 long long instr_breakpoint, instr_saved;
734 int val;
735 int templ;
736
737 addr &= ~0x0f;
738
739 /* Disable the automatic memory restoration from breakpoints while
740 we read our instruction bundle. Otherwise, the general restoration
741 mechanism kicks in and we would possibly remove parts of the adjacent
742 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
743 breakpoint instruction bits region. */
744 scoped_restore restore_memory_1
745 = make_scoped_restore_show_memory_breakpoints (1);
746 val = target_read_memory (addr, bundle_mem, BUNDLE_LEN);
747 if (val != 0)
748 return val;
749
750 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
751 for addressing the SHADOW_CONTENTS placement. */
752 shadow_slotnum = slotnum;
753
754 templ = extract_bit_field (bundle_mem, 0, 5);
755 if (template_encoding_table[templ][slotnum] == X)
756 {
757 /* X unit types can only be used in slot 2, and are actually
758 part of a 2-slot L-X instruction. We refuse to insert
759 breakpoints at this address, so there should be no reason
760 for us attempting to remove one there, except if the program's
761 code somehow got modified in memory. */
762 gdb_assert (slotnum == 2);
763 warning (_("Cannot remove breakpoint at address %s from non-existing "
764 "X-type slot, memory has changed underneath"),
765 paddress (gdbarch, bp_tgt->placed_address));
766 return -1;
767 }
768 if (template_encoding_table[templ][slotnum] == L)
769 {
770 /* L unit types can only be used in slot 1. But the breakpoint
771 was actually saved using slot 2, so update the slot number
772 accordingly. */
773 gdb_assert (slotnum == 1);
774 slotnum = 2;
775 }
776
777 gdb_assert (bp_tgt->shadow_len == BUNDLE_LEN - shadow_slotnum);
778
779 instr_breakpoint = slotN_contents (bundle_mem, slotnum);
780 if (instr_breakpoint != IA64_BREAKPOINT)
781 {
782 warning (_("Cannot remove breakpoint at address %s, "
783 "no break instruction at such address."),
784 paddress (gdbarch, bp_tgt->placed_address));
785 return -1;
786 }
787
788 /* Extract the original saved instruction from SLOTNUM normalizing its
789 bit-shift for INSTR_SAVED. */
790 memcpy (bundle_saved, bundle_mem, BUNDLE_LEN);
791 memcpy (bundle_saved + shadow_slotnum, bp_tgt->shadow_contents,
792 bp_tgt->shadow_len);
793 instr_saved = slotN_contents (bundle_saved, slotnum);
794
795 /* In BUNDLE_MEM, be careful to modify only the bits belonging to SLOTNUM
796 and not any of the other ones that are stored in SHADOW_CONTENTS. */
797 replace_slotN_contents (bundle_mem, instr_saved, slotnum);
798 val = target_write_raw_memory (addr, bundle_mem, BUNDLE_LEN);
799
800 return val;
801 }
802
803 /* Implement the breakpoint_kind_from_pc gdbarch method. */
804
805 static int
806 ia64_breakpoint_kind_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr)
807 {
808 /* A place holder of gdbarch method breakpoint_kind_from_pc. */
809 return 0;
810 }
811
812 /* As gdbarch_breakpoint_from_pc ranges have byte granularity and ia64
813 instruction slots ranges are bit-granular (41 bits) we have to provide an
814 extended range as described for ia64_memory_insert_breakpoint. We also take
815 care of preserving the `break' instruction 21-bit (or 62-bit) parameter to
816 make a match for permanent breakpoints. */
817
818 static const gdb_byte *
819 ia64_breakpoint_from_pc (struct gdbarch *gdbarch,
820 CORE_ADDR *pcptr, int *lenptr)
821 {
822 CORE_ADDR addr = *pcptr;
823 static gdb_byte bundle[BUNDLE_LEN];
824 int slotnum = (int) (*pcptr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
825 long long instr_fetched;
826 int val;
827 int templ;
828
829 if (slotnum > 2)
830 error (_("Can't insert breakpoint for slot numbers greater than 2."));
831
832 addr &= ~0x0f;
833
834 /* Enable the automatic memory restoration from breakpoints while
835 we read our instruction bundle to match bp_loc_is_permanent. */
836 {
837 scoped_restore restore_memory_0
838 = make_scoped_restore_show_memory_breakpoints (0);
839 val = target_read_memory (addr, bundle, BUNDLE_LEN);
840 }
841
842 /* The memory might be unreachable. This can happen, for instance,
843 when the user inserts a breakpoint at an invalid address. */
844 if (val != 0)
845 return NULL;
846
847 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
848 for addressing the SHADOW_CONTENTS placement. */
849 shadow_slotnum = slotnum;
850
851 /* Cover always the last byte of the bundle for the L-X slot case. */
852 *lenptr = BUNDLE_LEN - shadow_slotnum;
853
854 /* Check for L type instruction in slot 1, if present then bump up the slot
855 number to the slot 2. */
856 templ = extract_bit_field (bundle, 0, 5);
857 if (template_encoding_table[templ][slotnum] == X)
858 {
859 gdb_assert (slotnum == 2);
860 error (_("Can't insert breakpoint for non-existing slot X"));
861 }
862 if (template_encoding_table[templ][slotnum] == L)
863 {
864 gdb_assert (slotnum == 1);
865 slotnum = 2;
866 }
867
868 /* A break instruction has its all its opcode bits cleared except for
869 the parameter value. For L+X slot pair we are at the X slot (slot 2) so
870 we should not touch the L slot - the upper 41 bits of the parameter. */
871 instr_fetched = slotN_contents (bundle, slotnum);
872 instr_fetched &= 0x1003ffffc0LL;
873 replace_slotN_contents (bundle, instr_fetched, slotnum);
874
875 return bundle + shadow_slotnum;
876 }
877
878 static CORE_ADDR
879 ia64_read_pc (readable_regcache *regcache)
880 {
881 ULONGEST psr_value, pc_value;
882 int slot_num;
883
884 regcache->cooked_read (IA64_PSR_REGNUM, &psr_value);
885 regcache->cooked_read (IA64_IP_REGNUM, &pc_value);
886 slot_num = (psr_value >> 41) & 3;
887
888 return pc_value | (slot_num * SLOT_MULTIPLIER);
889 }
890
891 void
892 ia64_write_pc (struct regcache *regcache, CORE_ADDR new_pc)
893 {
894 int slot_num = (int) (new_pc & 0xf) / SLOT_MULTIPLIER;
895 ULONGEST psr_value;
896
897 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
898 psr_value &= ~(3LL << 41);
899 psr_value |= (ULONGEST)(slot_num & 0x3) << 41;
900
901 new_pc &= ~0xfLL;
902
903 regcache_cooked_write_unsigned (regcache, IA64_PSR_REGNUM, psr_value);
904 regcache_cooked_write_unsigned (regcache, IA64_IP_REGNUM, new_pc);
905 }
906
907 #define IS_NaT_COLLECTION_ADDR(addr) ((((addr) >> 3) & 0x3f) == 0x3f)
908
909 /* Returns the address of the slot that's NSLOTS slots away from
910 the address ADDR. NSLOTS may be positive or negative. */
911 static CORE_ADDR
912 rse_address_add(CORE_ADDR addr, int nslots)
913 {
914 CORE_ADDR new_addr;
915 int mandatory_nat_slots = nslots / 63;
916 int direction = nslots < 0 ? -1 : 1;
917
918 new_addr = addr + 8 * (nslots + mandatory_nat_slots);
919
920 if ((new_addr >> 9) != ((addr + 8 * 64 * mandatory_nat_slots) >> 9))
921 new_addr += 8 * direction;
922
923 if (IS_NaT_COLLECTION_ADDR(new_addr))
924 new_addr += 8 * direction;
925
926 return new_addr;
927 }
928
929 static enum register_status
930 ia64_pseudo_register_read (struct gdbarch *gdbarch, readable_regcache *regcache,
931 int regnum, gdb_byte *buf)
932 {
933 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
934 enum register_status status;
935
936 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
937 {
938 #ifdef HAVE_LIBUNWIND_IA64_H
939 /* First try and use the libunwind special reg accessor,
940 otherwise fallback to standard logic. */
941 if (!libunwind_is_initialized ()
942 || libunwind_get_reg_special (gdbarch, regcache, regnum, buf) != 0)
943 #endif
944 {
945 /* The fallback position is to assume that r32-r127 are
946 found sequentially in memory starting at $bof. This
947 isn't always true, but without libunwind, this is the
948 best we can do. */
949 ULONGEST cfm;
950 ULONGEST bsp;
951 CORE_ADDR reg;
952
953 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
954 if (status != REG_VALID)
955 return status;
956
957 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
958 if (status != REG_VALID)
959 return status;
960
961 /* The bsp points at the end of the register frame so we
962 subtract the size of frame from it to get start of
963 register frame. */
964 bsp = rse_address_add (bsp, -(cfm & 0x7f));
965
966 if ((cfm & 0x7f) > regnum - V32_REGNUM)
967 {
968 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
969 reg = read_memory_integer ((CORE_ADDR)reg_addr, 8, byte_order);
970 store_unsigned_integer (buf, register_size (gdbarch, regnum),
971 byte_order, reg);
972 }
973 else
974 store_unsigned_integer (buf, register_size (gdbarch, regnum),
975 byte_order, 0);
976 }
977 }
978 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
979 {
980 ULONGEST unatN_val;
981 ULONGEST unat;
982
983 status = regcache->cooked_read (IA64_UNAT_REGNUM, &unat);
984 if (status != REG_VALID)
985 return status;
986 unatN_val = (unat & (1LL << (regnum - IA64_NAT0_REGNUM))) != 0;
987 store_unsigned_integer (buf, register_size (gdbarch, regnum),
988 byte_order, unatN_val);
989 }
990 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
991 {
992 ULONGEST natN_val = 0;
993 ULONGEST bsp;
994 ULONGEST cfm;
995 CORE_ADDR gr_addr = 0;
996
997 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
998 if (status != REG_VALID)
999 return status;
1000
1001 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1002 if (status != REG_VALID)
1003 return status;
1004
1005 /* The bsp points at the end of the register frame so we
1006 subtract the size of frame from it to get start of register frame. */
1007 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1008
1009 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1010 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1011
1012 if (gr_addr != 0)
1013 {
1014 /* Compute address of nat collection bits. */
1015 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1016 ULONGEST nat_collection;
1017 int nat_bit;
1018 /* If our nat collection address is bigger than bsp, we have to get
1019 the nat collection from rnat. Otherwise, we fetch the nat
1020 collection from the computed address. */
1021 if (nat_addr >= bsp)
1022 regcache->cooked_read (IA64_RNAT_REGNUM, &nat_collection);
1023 else
1024 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1025 nat_bit = (gr_addr >> 3) & 0x3f;
1026 natN_val = (nat_collection >> nat_bit) & 1;
1027 }
1028
1029 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1030 byte_order, natN_val);
1031 }
1032 else if (regnum == VBOF_REGNUM)
1033 {
1034 /* A virtual register frame start is provided for user convenience.
1035 It can be calculated as the bsp - sof (sizeof frame). */
1036 ULONGEST bsp, vbsp;
1037 ULONGEST cfm;
1038
1039 status = regcache->cooked_read (IA64_BSP_REGNUM, &bsp);
1040 if (status != REG_VALID)
1041 return status;
1042 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1043 if (status != REG_VALID)
1044 return status;
1045
1046 /* The bsp points at the end of the register frame so we
1047 subtract the size of frame from it to get beginning of frame. */
1048 vbsp = rse_address_add (bsp, -(cfm & 0x7f));
1049 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1050 byte_order, vbsp);
1051 }
1052 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1053 {
1054 ULONGEST pr;
1055 ULONGEST cfm;
1056 ULONGEST prN_val;
1057
1058 status = regcache->cooked_read (IA64_PR_REGNUM, &pr);
1059 if (status != REG_VALID)
1060 return status;
1061 status = regcache->cooked_read (IA64_CFM_REGNUM, &cfm);
1062 if (status != REG_VALID)
1063 return status;
1064
1065 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1066 {
1067 /* Fetch predicate register rename base from current frame
1068 marker for this frame. */
1069 int rrb_pr = (cfm >> 32) & 0x3f;
1070
1071 /* Adjust the register number to account for register rotation. */
1072 regnum = VP16_REGNUM
1073 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1074 }
1075 prN_val = (pr & (1LL << (regnum - VP0_REGNUM))) != 0;
1076 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1077 byte_order, prN_val);
1078 }
1079 else
1080 memset (buf, 0, register_size (gdbarch, regnum));
1081
1082 return REG_VALID;
1083 }
1084
1085 static void
1086 ia64_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
1087 int regnum, const gdb_byte *buf)
1088 {
1089 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1090
1091 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
1092 {
1093 ULONGEST bsp;
1094 ULONGEST cfm;
1095 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1096 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1097
1098 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1099
1100 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1101 {
1102 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1103 write_memory (reg_addr, buf, 8);
1104 }
1105 }
1106 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1107 {
1108 ULONGEST unatN_val, unat, unatN_mask;
1109 regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
1110 unatN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1111 regnum),
1112 byte_order);
1113 unatN_mask = (1LL << (regnum - IA64_NAT0_REGNUM));
1114 if (unatN_val == 0)
1115 unat &= ~unatN_mask;
1116 else if (unatN_val == 1)
1117 unat |= unatN_mask;
1118 regcache_cooked_write_unsigned (regcache, IA64_UNAT_REGNUM, unat);
1119 }
1120 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
1121 {
1122 ULONGEST natN_val;
1123 ULONGEST bsp;
1124 ULONGEST cfm;
1125 CORE_ADDR gr_addr = 0;
1126 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1127 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1128
1129 /* The bsp points at the end of the register frame so we
1130 subtract the size of frame from it to get start of register frame. */
1131 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1132
1133 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1134 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1135
1136 natN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1137 regnum),
1138 byte_order);
1139
1140 if (gr_addr != 0 && (natN_val == 0 || natN_val == 1))
1141 {
1142 /* Compute address of nat collection bits. */
1143 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1144 CORE_ADDR nat_collection;
1145 int natN_bit = (gr_addr >> 3) & 0x3f;
1146 ULONGEST natN_mask = (1LL << natN_bit);
1147 /* If our nat collection address is bigger than bsp, we have to get
1148 the nat collection from rnat. Otherwise, we fetch the nat
1149 collection from the computed address. */
1150 if (nat_addr >= bsp)
1151 {
1152 regcache_cooked_read_unsigned (regcache,
1153 IA64_RNAT_REGNUM,
1154 &nat_collection);
1155 if (natN_val)
1156 nat_collection |= natN_mask;
1157 else
1158 nat_collection &= ~natN_mask;
1159 regcache_cooked_write_unsigned (regcache, IA64_RNAT_REGNUM,
1160 nat_collection);
1161 }
1162 else
1163 {
1164 gdb_byte nat_buf[8];
1165 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1166 if (natN_val)
1167 nat_collection |= natN_mask;
1168 else
1169 nat_collection &= ~natN_mask;
1170 store_unsigned_integer (nat_buf, register_size (gdbarch, regnum),
1171 byte_order, nat_collection);
1172 write_memory (nat_addr, nat_buf, 8);
1173 }
1174 }
1175 }
1176 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1177 {
1178 ULONGEST pr;
1179 ULONGEST cfm;
1180 ULONGEST prN_val;
1181 ULONGEST prN_mask;
1182
1183 regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1184 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1185
1186 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1187 {
1188 /* Fetch predicate register rename base from current frame
1189 marker for this frame. */
1190 int rrb_pr = (cfm >> 32) & 0x3f;
1191
1192 /* Adjust the register number to account for register rotation. */
1193 regnum = VP16_REGNUM
1194 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1195 }
1196 prN_val = extract_unsigned_integer (buf, register_size (gdbarch, regnum),
1197 byte_order);
1198 prN_mask = (1LL << (regnum - VP0_REGNUM));
1199 if (prN_val == 0)
1200 pr &= ~prN_mask;
1201 else if (prN_val == 1)
1202 pr |= prN_mask;
1203 regcache_cooked_write_unsigned (regcache, IA64_PR_REGNUM, pr);
1204 }
1205 }
1206
1207 /* The ia64 needs to convert between various ieee floating-point formats
1208 and the special ia64 floating point register format. */
1209
1210 static int
1211 ia64_convert_register_p (struct gdbarch *gdbarch, int regno, struct type *type)
1212 {
1213 return (regno >= IA64_FR0_REGNUM && regno <= IA64_FR127_REGNUM
1214 && type->code () == TYPE_CODE_FLT
1215 && type != ia64_ext_type (gdbarch));
1216 }
1217
1218 static int
1219 ia64_register_to_value (frame_info_ptr frame, int regnum,
1220 struct type *valtype, gdb_byte *out,
1221 int *optimizedp, int *unavailablep)
1222 {
1223 struct gdbarch *gdbarch = get_frame_arch (frame);
1224 gdb_byte in[IA64_FP_REGISTER_SIZE];
1225
1226 /* Convert to TYPE. */
1227 if (!get_frame_register_bytes (frame, regnum, 0,
1228 gdb::make_array_view (in,
1229 register_size (gdbarch,
1230 regnum)),
1231 optimizedp, unavailablep))
1232 return 0;
1233
1234 target_float_convert (in, ia64_ext_type (gdbarch), out, valtype);
1235 *optimizedp = *unavailablep = 0;
1236 return 1;
1237 }
1238
1239 static void
1240 ia64_value_to_register (frame_info_ptr frame, int regnum,
1241 struct type *valtype, const gdb_byte *in)
1242 {
1243 struct gdbarch *gdbarch = get_frame_arch (frame);
1244 gdb_byte out[IA64_FP_REGISTER_SIZE];
1245 target_float_convert (in, valtype, out, ia64_ext_type (gdbarch));
1246 put_frame_register (frame, regnum, out);
1247 }
1248
1249
1250 /* Limit the number of skipped non-prologue instructions since examining
1251 of the prologue is expensive. */
1252 static int max_skip_non_prologue_insns = 40;
1253
1254 /* Given PC representing the starting address of a function, and
1255 LIM_PC which is the (sloppy) limit to which to scan when looking
1256 for a prologue, attempt to further refine this limit by using
1257 the line data in the symbol table. If successful, a better guess
1258 on where the prologue ends is returned, otherwise the previous
1259 value of lim_pc is returned. TRUST_LIMIT is a pointer to a flag
1260 which will be set to indicate whether the returned limit may be
1261 used with no further scanning in the event that the function is
1262 frameless. */
1263
1264 /* FIXME: cagney/2004-02-14: This function and logic have largely been
1265 superseded by skip_prologue_using_sal. */
1266
1267 static CORE_ADDR
1268 refine_prologue_limit (CORE_ADDR pc, CORE_ADDR lim_pc, int *trust_limit)
1269 {
1270 struct symtab_and_line prologue_sal;
1271 CORE_ADDR start_pc = pc;
1272 CORE_ADDR end_pc;
1273
1274 /* The prologue can not possibly go past the function end itself,
1275 so we can already adjust LIM_PC accordingly. */
1276 if (find_pc_partial_function (pc, NULL, NULL, &end_pc) && end_pc < lim_pc)
1277 lim_pc = end_pc;
1278
1279 /* Start off not trusting the limit. */
1280 *trust_limit = 0;
1281
1282 prologue_sal = find_pc_line (pc, 0);
1283 if (prologue_sal.line != 0)
1284 {
1285 int i;
1286 CORE_ADDR addr = prologue_sal.end;
1287
1288 /* Handle the case in which compiler's optimizer/scheduler
1289 has moved instructions into the prologue. We scan ahead
1290 in the function looking for address ranges whose corresponding
1291 line number is less than or equal to the first one that we
1292 found for the function. (It can be less than when the
1293 scheduler puts a body instruction before the first prologue
1294 instruction.) */
1295 for (i = 2 * max_skip_non_prologue_insns;
1296 i > 0 && (lim_pc == 0 || addr < lim_pc);
1297 i--)
1298 {
1299 struct symtab_and_line sal;
1300
1301 sal = find_pc_line (addr, 0);
1302 if (sal.line == 0)
1303 break;
1304 if (sal.line <= prologue_sal.line
1305 && sal.symtab == prologue_sal.symtab)
1306 {
1307 prologue_sal = sal;
1308 }
1309 addr = sal.end;
1310 }
1311
1312 if (lim_pc == 0 || prologue_sal.end < lim_pc)
1313 {
1314 lim_pc = prologue_sal.end;
1315 if (start_pc == get_pc_function_start (lim_pc))
1316 *trust_limit = 1;
1317 }
1318 }
1319 return lim_pc;
1320 }
1321
1322 #define isScratch(_regnum_) ((_regnum_) == 2 || (_regnum_) == 3 \
1323 || (8 <= (_regnum_) && (_regnum_) <= 11) \
1324 || (14 <= (_regnum_) && (_regnum_) <= 31))
1325 #define imm9(_instr_) \
1326 ( ((((_instr_) & 0x01000000000LL) ? -1 : 0) << 8) \
1327 | (((_instr_) & 0x00008000000LL) >> 20) \
1328 | (((_instr_) & 0x00000001fc0LL) >> 6))
1329
1330 /* Allocate and initialize a frame cache. */
1331
1332 static struct ia64_frame_cache *
1333 ia64_alloc_frame_cache (void)
1334 {
1335 struct ia64_frame_cache *cache;
1336 int i;
1337
1338 cache = FRAME_OBSTACK_ZALLOC (struct ia64_frame_cache);
1339
1340 /* Base address. */
1341 cache->base = 0;
1342 cache->pc = 0;
1343 cache->cfm = 0;
1344 cache->prev_cfm = 0;
1345 cache->sof = 0;
1346 cache->sol = 0;
1347 cache->sor = 0;
1348 cache->bsp = 0;
1349 cache->fp_reg = 0;
1350 cache->frameless = 1;
1351
1352 for (i = 0; i < NUM_IA64_RAW_REGS; i++)
1353 cache->saved_regs[i] = 0;
1354
1355 return cache;
1356 }
1357
1358 static CORE_ADDR
1359 examine_prologue (CORE_ADDR pc, CORE_ADDR lim_pc,
1360 frame_info_ptr this_frame,
1361 struct ia64_frame_cache *cache)
1362 {
1363 CORE_ADDR next_pc;
1364 CORE_ADDR last_prologue_pc = pc;
1365 ia64_instruction_type it;
1366 long long instr;
1367 int cfm_reg = 0;
1368 int ret_reg = 0;
1369 int fp_reg = 0;
1370 int unat_save_reg = 0;
1371 int pr_save_reg = 0;
1372 int mem_stack_frame_size = 0;
1373 int spill_reg = 0;
1374 CORE_ADDR spill_addr = 0;
1375 char instores[8];
1376 char infpstores[8];
1377 char reg_contents[256];
1378 int trust_limit;
1379 int frameless = 1;
1380 int i;
1381 CORE_ADDR addr;
1382 gdb_byte buf[8];
1383 CORE_ADDR bof, sor, sol, sof, cfm, rrb_gr;
1384
1385 memset (instores, 0, sizeof instores);
1386 memset (infpstores, 0, sizeof infpstores);
1387 memset (reg_contents, 0, sizeof reg_contents);
1388
1389 if (cache->after_prologue != 0
1390 && cache->after_prologue <= lim_pc)
1391 return cache->after_prologue;
1392
1393 lim_pc = refine_prologue_limit (pc, lim_pc, &trust_limit);
1394 next_pc = fetch_instruction (pc, &it, &instr);
1395
1396 /* We want to check if we have a recognizable function start before we
1397 look ahead for a prologue. */
1398 if (pc < lim_pc && next_pc
1399 && it == M && ((instr & 0x1ee0000003fLL) == 0x02c00000000LL))
1400 {
1401 /* alloc - start of a regular function. */
1402 int sol_bits = (int) ((instr & 0x00007f00000LL) >> 20);
1403 int sof_bits = (int) ((instr & 0x000000fe000LL) >> 13);
1404 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1405
1406 /* Verify that the current cfm matches what we think is the
1407 function start. If we have somehow jumped within a function,
1408 we do not want to interpret the prologue and calculate the
1409 addresses of various registers such as the return address.
1410 We will instead treat the frame as frameless. */
1411 if (!this_frame ||
1412 (sof_bits == (cache->cfm & 0x7f) &&
1413 sol_bits == ((cache->cfm >> 7) & 0x7f)))
1414 frameless = 0;
1415
1416 cfm_reg = rN;
1417 last_prologue_pc = next_pc;
1418 pc = next_pc;
1419 }
1420 else
1421 {
1422 /* Look for a leaf routine. */
1423 if (pc < lim_pc && next_pc
1424 && (it == I || it == M)
1425 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1426 {
1427 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1428 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1429 | ((instr & 0x001f8000000LL) >> 20)
1430 | ((instr & 0x000000fe000LL) >> 13));
1431 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1432 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1433 int qp = (int) (instr & 0x0000000003fLL);
1434 if (qp == 0 && rN == 2 && imm == 0 && rM == 12 && fp_reg == 0)
1435 {
1436 /* mov r2, r12 - beginning of leaf routine. */
1437 fp_reg = rN;
1438 last_prologue_pc = next_pc;
1439 }
1440 }
1441
1442 /* If we don't recognize a regular function or leaf routine, we are
1443 done. */
1444 if (!fp_reg)
1445 {
1446 pc = lim_pc;
1447 if (trust_limit)
1448 last_prologue_pc = lim_pc;
1449 }
1450 }
1451
1452 /* Loop, looking for prologue instructions, keeping track of
1453 where preserved registers were spilled. */
1454 while (pc < lim_pc)
1455 {
1456 next_pc = fetch_instruction (pc, &it, &instr);
1457 if (next_pc == 0)
1458 break;
1459
1460 if (it == B && ((instr & 0x1e1f800003fLL) != 0x04000000000LL))
1461 {
1462 /* Exit loop upon hitting a non-nop branch instruction. */
1463 if (trust_limit)
1464 lim_pc = pc;
1465 break;
1466 }
1467 else if (((instr & 0x3fLL) != 0LL) &&
1468 (frameless || ret_reg != 0))
1469 {
1470 /* Exit loop upon hitting a predicated instruction if
1471 we already have the return register or if we are frameless. */
1472 if (trust_limit)
1473 lim_pc = pc;
1474 break;
1475 }
1476 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00188000000LL))
1477 {
1478 /* Move from BR */
1479 int b2 = (int) ((instr & 0x0000000e000LL) >> 13);
1480 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1481 int qp = (int) (instr & 0x0000000003f);
1482
1483 if (qp == 0 && b2 == 0 && rN >= 32 && ret_reg == 0)
1484 {
1485 ret_reg = rN;
1486 last_prologue_pc = next_pc;
1487 }
1488 }
1489 else if ((it == I || it == M)
1490 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1491 {
1492 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1493 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1494 | ((instr & 0x001f8000000LL) >> 20)
1495 | ((instr & 0x000000fe000LL) >> 13));
1496 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1497 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1498 int qp = (int) (instr & 0x0000000003fLL);
1499
1500 if (qp == 0 && rN >= 32 && imm == 0 && rM == 12 && fp_reg == 0)
1501 {
1502 /* mov rN, r12 */
1503 fp_reg = rN;
1504 last_prologue_pc = next_pc;
1505 }
1506 else if (qp == 0 && rN == 12 && rM == 12)
1507 {
1508 /* adds r12, -mem_stack_frame_size, r12 */
1509 mem_stack_frame_size -= imm;
1510 last_prologue_pc = next_pc;
1511 }
1512 else if (qp == 0 && rN == 2
1513 && ((rM == fp_reg && fp_reg != 0) || rM == 12))
1514 {
1515 CORE_ADDR saved_sp = 0;
1516 /* adds r2, spilloffset, rFramePointer
1517 or
1518 adds r2, spilloffset, r12
1519
1520 Get ready for stf.spill or st8.spill instructions.
1521 The address to start spilling at is loaded into r2.
1522 FIXME: Why r2? That's what gcc currently uses; it
1523 could well be different for other compilers. */
1524
1525 /* Hmm... whether or not this will work will depend on
1526 where the pc is. If it's still early in the prologue
1527 this'll be wrong. FIXME */
1528 if (this_frame)
1529 saved_sp = get_frame_register_unsigned (this_frame,
1530 sp_regnum);
1531 spill_addr = saved_sp
1532 + (rM == 12 ? 0 : mem_stack_frame_size)
1533 + imm;
1534 spill_reg = rN;
1535 last_prologue_pc = next_pc;
1536 }
1537 else if (qp == 0 && rM >= 32 && rM < 40 && !instores[rM-32] &&
1538 rN < 256 && imm == 0)
1539 {
1540 /* mov rN, rM where rM is an input register. */
1541 reg_contents[rN] = rM;
1542 last_prologue_pc = next_pc;
1543 }
1544 else if (frameless && qp == 0 && rN == fp_reg && imm == 0 &&
1545 rM == 2)
1546 {
1547 /* mov r12, r2 */
1548 last_prologue_pc = next_pc;
1549 break;
1550 }
1551 }
1552 else if (it == M
1553 && ( ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1554 || ((instr & 0x1ffc8000000LL) == 0x0cec0000000LL) ))
1555 {
1556 /* stf.spill [rN] = fM, imm9
1557 or
1558 stf.spill [rN] = fM */
1559
1560 int imm = imm9(instr);
1561 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1562 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1563 int qp = (int) (instr & 0x0000000003fLL);
1564 if (qp == 0 && rN == spill_reg && spill_addr != 0
1565 && ((2 <= fM && fM <= 5) || (16 <= fM && fM <= 31)))
1566 {
1567 cache->saved_regs[IA64_FR0_REGNUM + fM] = spill_addr;
1568
1569 if ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1570 spill_addr += imm;
1571 else
1572 spill_addr = 0; /* last one; must be done. */
1573 last_prologue_pc = next_pc;
1574 }
1575 }
1576 else if ((it == M && ((instr & 0x1eff8000000LL) == 0x02110000000LL))
1577 || (it == I && ((instr & 0x1eff8000000LL) == 0x00050000000LL)) )
1578 {
1579 /* mov.m rN = arM
1580 or
1581 mov.i rN = arM */
1582
1583 int arM = (int) ((instr & 0x00007f00000LL) >> 20);
1584 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1585 int qp = (int) (instr & 0x0000000003fLL);
1586 if (qp == 0 && isScratch (rN) && arM == 36 /* ar.unat */)
1587 {
1588 /* We have something like "mov.m r3 = ar.unat". Remember the
1589 r3 (or whatever) and watch for a store of this register... */
1590 unat_save_reg = rN;
1591 last_prologue_pc = next_pc;
1592 }
1593 }
1594 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00198000000LL))
1595 {
1596 /* mov rN = pr */
1597 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1598 int qp = (int) (instr & 0x0000000003fLL);
1599 if (qp == 0 && isScratch (rN))
1600 {
1601 pr_save_reg = rN;
1602 last_prologue_pc = next_pc;
1603 }
1604 }
1605 else if (it == M
1606 && ( ((instr & 0x1ffc8000000LL) == 0x08cc0000000LL)
1607 || ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)))
1608 {
1609 /* st8 [rN] = rM
1610 or
1611 st8 [rN] = rM, imm9 */
1612 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1613 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1614 int qp = (int) (instr & 0x0000000003fLL);
1615 int indirect = rM < 256 ? reg_contents[rM] : 0;
1616 if (qp == 0 && rN == spill_reg && spill_addr != 0
1617 && (rM == unat_save_reg || rM == pr_save_reg))
1618 {
1619 /* We've found a spill of either the UNAT register or the PR
1620 register. (Well, not exactly; what we've actually found is
1621 a spill of the register that UNAT or PR was moved to).
1622 Record that fact and move on... */
1623 if (rM == unat_save_reg)
1624 {
1625 /* Track UNAT register. */
1626 cache->saved_regs[IA64_UNAT_REGNUM] = spill_addr;
1627 unat_save_reg = 0;
1628 }
1629 else
1630 {
1631 /* Track PR register. */
1632 cache->saved_regs[IA64_PR_REGNUM] = spill_addr;
1633 pr_save_reg = 0;
1634 }
1635 if ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)
1636 /* st8 [rN] = rM, imm9 */
1637 spill_addr += imm9(instr);
1638 else
1639 spill_addr = 0; /* Must be done spilling. */
1640 last_prologue_pc = next_pc;
1641 }
1642 else if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1643 {
1644 /* Allow up to one store of each input register. */
1645 instores[rM-32] = 1;
1646 last_prologue_pc = next_pc;
1647 }
1648 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1649 !instores[indirect-32])
1650 {
1651 /* Allow an indirect store of an input register. */
1652 instores[indirect-32] = 1;
1653 last_prologue_pc = next_pc;
1654 }
1655 }
1656 else if (it == M && ((instr & 0x1ff08000000LL) == 0x08c00000000LL))
1657 {
1658 /* One of
1659 st1 [rN] = rM
1660 st2 [rN] = rM
1661 st4 [rN] = rM
1662 st8 [rN] = rM
1663 Note that the st8 case is handled in the clause above.
1664
1665 Advance over stores of input registers. One store per input
1666 register is permitted. */
1667 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1668 int qp = (int) (instr & 0x0000000003fLL);
1669 int indirect = rM < 256 ? reg_contents[rM] : 0;
1670 if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1671 {
1672 instores[rM-32] = 1;
1673 last_prologue_pc = next_pc;
1674 }
1675 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1676 !instores[indirect-32])
1677 {
1678 /* Allow an indirect store of an input register. */
1679 instores[indirect-32] = 1;
1680 last_prologue_pc = next_pc;
1681 }
1682 }
1683 else if (it == M && ((instr & 0x1ff88000000LL) == 0x0cc80000000LL))
1684 {
1685 /* Either
1686 stfs [rN] = fM
1687 or
1688 stfd [rN] = fM
1689
1690 Advance over stores of floating point input registers. Again
1691 one store per register is permitted. */
1692 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1693 int qp = (int) (instr & 0x0000000003fLL);
1694 if (qp == 0 && 8 <= fM && fM < 16 && !infpstores[fM - 8])
1695 {
1696 infpstores[fM-8] = 1;
1697 last_prologue_pc = next_pc;
1698 }
1699 }
1700 else if (it == M
1701 && ( ((instr & 0x1ffc8000000LL) == 0x08ec0000000LL)
1702 || ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)))
1703 {
1704 /* st8.spill [rN] = rM
1705 or
1706 st8.spill [rN] = rM, imm9 */
1707 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1708 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1709 int qp = (int) (instr & 0x0000000003fLL);
1710 if (qp == 0 && rN == spill_reg && 4 <= rM && rM <= 7)
1711 {
1712 /* We've found a spill of one of the preserved general purpose
1713 regs. Record the spill address and advance the spill
1714 register if appropriate. */
1715 cache->saved_regs[IA64_GR0_REGNUM + rM] = spill_addr;
1716 if ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)
1717 /* st8.spill [rN] = rM, imm9 */
1718 spill_addr += imm9(instr);
1719 else
1720 spill_addr = 0; /* Done spilling. */
1721 last_prologue_pc = next_pc;
1722 }
1723 }
1724
1725 pc = next_pc;
1726 }
1727
1728 /* If not frameless and we aren't called by skip_prologue, then we need
1729 to calculate registers for the previous frame which will be needed
1730 later. */
1731
1732 if (!frameless && this_frame)
1733 {
1734 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1735 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1736
1737 /* Extract the size of the rotating portion of the stack
1738 frame and the register rename base from the current
1739 frame marker. */
1740 cfm = cache->cfm;
1741 sor = cache->sor;
1742 sof = cache->sof;
1743 sol = cache->sol;
1744 rrb_gr = (cfm >> 18) & 0x7f;
1745
1746 /* Find the bof (beginning of frame). */
1747 bof = rse_address_add (cache->bsp, -sof);
1748
1749 for (i = 0, addr = bof;
1750 i < sof;
1751 i++, addr += 8)
1752 {
1753 if (IS_NaT_COLLECTION_ADDR (addr))
1754 {
1755 addr += 8;
1756 }
1757 if (i+32 == cfm_reg)
1758 cache->saved_regs[IA64_CFM_REGNUM] = addr;
1759 if (i+32 == ret_reg)
1760 cache->saved_regs[IA64_VRAP_REGNUM] = addr;
1761 if (i+32 == fp_reg)
1762 cache->saved_regs[IA64_VFP_REGNUM] = addr;
1763 }
1764
1765 /* For the previous argument registers we require the previous bof.
1766 If we can't find the previous cfm, then we can do nothing. */
1767 cfm = 0;
1768 if (cache->saved_regs[IA64_CFM_REGNUM] != 0)
1769 {
1770 cfm = read_memory_integer (cache->saved_regs[IA64_CFM_REGNUM],
1771 8, byte_order);
1772 }
1773 else if (cfm_reg != 0)
1774 {
1775 get_frame_register (this_frame, cfm_reg, buf);
1776 cfm = extract_unsigned_integer (buf, 8, byte_order);
1777 }
1778 cache->prev_cfm = cfm;
1779
1780 if (cfm != 0)
1781 {
1782 sor = ((cfm >> 14) & 0xf) * 8;
1783 sof = (cfm & 0x7f);
1784 sol = (cfm >> 7) & 0x7f;
1785 rrb_gr = (cfm >> 18) & 0x7f;
1786
1787 /* The previous bof only requires subtraction of the sol (size of
1788 locals) due to the overlap between output and input of
1789 subsequent frames. */
1790 bof = rse_address_add (bof, -sol);
1791
1792 for (i = 0, addr = bof;
1793 i < sof;
1794 i++, addr += 8)
1795 {
1796 if (IS_NaT_COLLECTION_ADDR (addr))
1797 {
1798 addr += 8;
1799 }
1800 if (i < sor)
1801 cache->saved_regs[IA64_GR32_REGNUM
1802 + ((i + (sor - rrb_gr)) % sor)]
1803 = addr;
1804 else
1805 cache->saved_regs[IA64_GR32_REGNUM + i] = addr;
1806 }
1807
1808 }
1809 }
1810
1811 /* Try and trust the lim_pc value whenever possible. */
1812 if (trust_limit && lim_pc >= last_prologue_pc)
1813 last_prologue_pc = lim_pc;
1814
1815 cache->frameless = frameless;
1816 cache->after_prologue = last_prologue_pc;
1817 cache->mem_stack_frame_size = mem_stack_frame_size;
1818 cache->fp_reg = fp_reg;
1819
1820 return last_prologue_pc;
1821 }
1822
1823 CORE_ADDR
1824 ia64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1825 {
1826 struct ia64_frame_cache cache;
1827 cache.base = 0;
1828 cache.after_prologue = 0;
1829 cache.cfm = 0;
1830 cache.bsp = 0;
1831
1832 /* Call examine_prologue with - as third argument since we don't
1833 have a next frame pointer to send. */
1834 return examine_prologue (pc, pc+1024, 0, &cache);
1835 }
1836
1837
1838 /* Normal frames. */
1839
1840 static struct ia64_frame_cache *
1841 ia64_frame_cache (frame_info_ptr this_frame, void **this_cache)
1842 {
1843 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1844 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1845 struct ia64_frame_cache *cache;
1846 gdb_byte buf[8];
1847 CORE_ADDR cfm;
1848
1849 if (*this_cache)
1850 return (struct ia64_frame_cache *) *this_cache;
1851
1852 cache = ia64_alloc_frame_cache ();
1853 *this_cache = cache;
1854
1855 get_frame_register (this_frame, sp_regnum, buf);
1856 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
1857
1858 /* We always want the bsp to point to the end of frame.
1859 This way, we can always get the beginning of frame (bof)
1860 by subtracting frame size. */
1861 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
1862 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
1863
1864 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
1865
1866 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
1867 cfm = extract_unsigned_integer (buf, 8, byte_order);
1868
1869 cache->sof = (cfm & 0x7f);
1870 cache->sol = (cfm >> 7) & 0x7f;
1871 cache->sor = ((cfm >> 14) & 0xf) * 8;
1872
1873 cache->cfm = cfm;
1874
1875 cache->pc = get_frame_func (this_frame);
1876
1877 if (cache->pc != 0)
1878 examine_prologue (cache->pc, get_frame_pc (this_frame), this_frame, cache);
1879
1880 cache->base = cache->saved_sp + cache->mem_stack_frame_size;
1881
1882 return cache;
1883 }
1884
1885 static void
1886 ia64_frame_this_id (frame_info_ptr this_frame, void **this_cache,
1887 struct frame_id *this_id)
1888 {
1889 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1890 struct ia64_frame_cache *cache =
1891 ia64_frame_cache (this_frame, this_cache);
1892
1893 /* If outermost frame, mark with null frame id. */
1894 if (cache->base != 0)
1895 (*this_id) = frame_id_build_special (cache->base, cache->pc, cache->bsp);
1896 if (gdbarch_debug >= 1)
1897 gdb_printf (gdb_stdlog,
1898 "regular frame id: code %s, stack %s, "
1899 "special %s, this_frame %s\n",
1900 paddress (gdbarch, this_id->code_addr),
1901 paddress (gdbarch, this_id->stack_addr),
1902 paddress (gdbarch, cache->bsp),
1903 host_address_to_string (this_frame.get ()));
1904 }
1905
1906 static struct value *
1907 ia64_frame_prev_register (frame_info_ptr this_frame, void **this_cache,
1908 int regnum)
1909 {
1910 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1911 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1912 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
1913 gdb_byte buf[8];
1914
1915 gdb_assert (regnum >= 0);
1916
1917 if (!target_has_registers ())
1918 error (_("No registers."));
1919
1920 if (regnum == gdbarch_sp_regnum (gdbarch))
1921 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1922
1923 else if (regnum == IA64_BSP_REGNUM)
1924 {
1925 struct value *val;
1926 CORE_ADDR prev_cfm, bsp, prev_bsp;
1927
1928 /* We want to calculate the previous bsp as the end of the previous
1929 register stack frame. This corresponds to what the hardware bsp
1930 register will be if we pop the frame back which is why we might
1931 have been called. We know the beginning of the current frame is
1932 cache->bsp - cache->sof. This value in the previous frame points
1933 to the start of the output registers. We can calculate the end of
1934 that frame by adding the size of output:
1935 (sof (size of frame) - sol (size of locals)). */
1936 val = ia64_frame_prev_register (this_frame, this_cache, IA64_CFM_REGNUM);
1937 prev_cfm = extract_unsigned_integer (value_contents_all (val).data (),
1938 8, byte_order);
1939 bsp = rse_address_add (cache->bsp, -(cache->sof));
1940 prev_bsp =
1941 rse_address_add (bsp, (prev_cfm & 0x7f) - ((prev_cfm >> 7) & 0x7f));
1942
1943 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
1944 }
1945
1946 else if (regnum == IA64_CFM_REGNUM)
1947 {
1948 CORE_ADDR addr = cache->saved_regs[IA64_CFM_REGNUM];
1949
1950 if (addr != 0)
1951 return frame_unwind_got_memory (this_frame, regnum, addr);
1952
1953 if (cache->prev_cfm)
1954 return frame_unwind_got_constant (this_frame, regnum, cache->prev_cfm);
1955
1956 if (cache->frameless)
1957 return frame_unwind_got_register (this_frame, IA64_PFS_REGNUM,
1958 IA64_PFS_REGNUM);
1959 return frame_unwind_got_register (this_frame, regnum, 0);
1960 }
1961
1962 else if (regnum == IA64_VFP_REGNUM)
1963 {
1964 /* If the function in question uses an automatic register (r32-r127)
1965 for the frame pointer, it'll be found by ia64_find_saved_register()
1966 above. If the function lacks one of these frame pointers, we can
1967 still provide a value since we know the size of the frame. */
1968 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1969 }
1970
1971 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1972 {
1973 struct value *pr_val;
1974 ULONGEST prN;
1975
1976 pr_val = ia64_frame_prev_register (this_frame, this_cache,
1977 IA64_PR_REGNUM);
1978 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1979 {
1980 /* Fetch predicate register rename base from current frame
1981 marker for this frame. */
1982 int rrb_pr = (cache->cfm >> 32) & 0x3f;
1983
1984 /* Adjust the register number to account for register rotation. */
1985 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1986 }
1987 prN = extract_bit_field (value_contents_all (pr_val).data (),
1988 regnum - VP0_REGNUM, 1);
1989 return frame_unwind_got_constant (this_frame, regnum, prN);
1990 }
1991
1992 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1993 {
1994 struct value *unat_val;
1995 ULONGEST unatN;
1996 unat_val = ia64_frame_prev_register (this_frame, this_cache,
1997 IA64_UNAT_REGNUM);
1998 unatN = extract_bit_field (value_contents_all (unat_val).data (),
1999 regnum - IA64_NAT0_REGNUM, 1);
2000 return frame_unwind_got_constant (this_frame, regnum, unatN);
2001 }
2002
2003 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2004 {
2005 int natval = 0;
2006 /* Find address of general register corresponding to nat bit we're
2007 interested in. */
2008 CORE_ADDR gr_addr;
2009
2010 gr_addr = cache->saved_regs[regnum - IA64_NAT0_REGNUM + IA64_GR0_REGNUM];
2011
2012 if (gr_addr != 0)
2013 {
2014 /* Compute address of nat collection bits. */
2015 CORE_ADDR nat_addr = gr_addr | 0x1f8;
2016 CORE_ADDR bsp;
2017 CORE_ADDR nat_collection;
2018 int nat_bit;
2019
2020 /* If our nat collection address is bigger than bsp, we have to get
2021 the nat collection from rnat. Otherwise, we fetch the nat
2022 collection from the computed address. */
2023 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2024 bsp = extract_unsigned_integer (buf, 8, byte_order);
2025 if (nat_addr >= bsp)
2026 {
2027 get_frame_register (this_frame, IA64_RNAT_REGNUM, buf);
2028 nat_collection = extract_unsigned_integer (buf, 8, byte_order);
2029 }
2030 else
2031 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
2032 nat_bit = (gr_addr >> 3) & 0x3f;
2033 natval = (nat_collection >> nat_bit) & 1;
2034 }
2035
2036 return frame_unwind_got_constant (this_frame, regnum, natval);
2037 }
2038
2039 else if (regnum == IA64_IP_REGNUM)
2040 {
2041 CORE_ADDR pc = 0;
2042 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2043
2044 if (addr != 0)
2045 {
2046 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2047 pc = extract_unsigned_integer (buf, 8, byte_order);
2048 }
2049 else if (cache->frameless)
2050 {
2051 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2052 pc = extract_unsigned_integer (buf, 8, byte_order);
2053 }
2054 pc &= ~0xf;
2055 return frame_unwind_got_constant (this_frame, regnum, pc);
2056 }
2057
2058 else if (regnum == IA64_PSR_REGNUM)
2059 {
2060 /* We don't know how to get the complete previous PSR, but we need it
2061 for the slot information when we unwind the pc (pc is formed of IP
2062 register plus slot information from PSR). To get the previous
2063 slot information, we mask it off the return address. */
2064 ULONGEST slot_num = 0;
2065 CORE_ADDR pc = 0;
2066 CORE_ADDR psr = 0;
2067 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2068
2069 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
2070 psr = extract_unsigned_integer (buf, 8, byte_order);
2071
2072 if (addr != 0)
2073 {
2074 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2075 pc = extract_unsigned_integer (buf, 8, byte_order);
2076 }
2077 else if (cache->frameless)
2078 {
2079 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2080 pc = extract_unsigned_integer (buf, 8, byte_order);
2081 }
2082 psr &= ~(3LL << 41);
2083 slot_num = pc & 0x3LL;
2084 psr |= (CORE_ADDR)slot_num << 41;
2085 return frame_unwind_got_constant (this_frame, regnum, psr);
2086 }
2087
2088 else if (regnum == IA64_BR0_REGNUM)
2089 {
2090 CORE_ADDR addr = cache->saved_regs[IA64_BR0_REGNUM];
2091
2092 if (addr != 0)
2093 return frame_unwind_got_memory (this_frame, regnum, addr);
2094
2095 return frame_unwind_got_constant (this_frame, regnum, 0);
2096 }
2097
2098 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2099 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2100 {
2101 CORE_ADDR addr = 0;
2102
2103 if (regnum >= V32_REGNUM)
2104 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2105 addr = cache->saved_regs[regnum];
2106 if (addr != 0)
2107 return frame_unwind_got_memory (this_frame, regnum, addr);
2108
2109 if (cache->frameless)
2110 {
2111 struct value *reg_val;
2112 CORE_ADDR prev_cfm, prev_bsp, prev_bof;
2113
2114 /* FIXME: brobecker/2008-05-01: Doesn't this seem redundant
2115 with the same code above? */
2116 if (regnum >= V32_REGNUM)
2117 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2118 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2119 IA64_CFM_REGNUM);
2120 prev_cfm = extract_unsigned_integer
2121 (value_contents_all (reg_val).data (), 8, byte_order);
2122 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2123 IA64_BSP_REGNUM);
2124 prev_bsp = extract_unsigned_integer
2125 (value_contents_all (reg_val).data (), 8, byte_order);
2126 prev_bof = rse_address_add (prev_bsp, -(prev_cfm & 0x7f));
2127
2128 addr = rse_address_add (prev_bof, (regnum - IA64_GR32_REGNUM));
2129 return frame_unwind_got_memory (this_frame, regnum, addr);
2130 }
2131
2132 return frame_unwind_got_constant (this_frame, regnum, 0);
2133 }
2134
2135 else /* All other registers. */
2136 {
2137 CORE_ADDR addr = 0;
2138
2139 if (IA64_FR32_REGNUM <= regnum && regnum <= IA64_FR127_REGNUM)
2140 {
2141 /* Fetch floating point register rename base from current
2142 frame marker for this frame. */
2143 int rrb_fr = (cache->cfm >> 25) & 0x7f;
2144
2145 /* Adjust the floating point register number to account for
2146 register rotation. */
2147 regnum = IA64_FR32_REGNUM
2148 + ((regnum - IA64_FR32_REGNUM) + rrb_fr) % 96;
2149 }
2150
2151 /* If we have stored a memory address, access the register. */
2152 addr = cache->saved_regs[regnum];
2153 if (addr != 0)
2154 return frame_unwind_got_memory (this_frame, regnum, addr);
2155 /* Otherwise, punt and get the current value of the register. */
2156 else
2157 return frame_unwind_got_register (this_frame, regnum, regnum);
2158 }
2159 }
2160
2161 static const struct frame_unwind ia64_frame_unwind =
2162 {
2163 "ia64 prologue",
2164 NORMAL_FRAME,
2165 default_frame_unwind_stop_reason,
2166 &ia64_frame_this_id,
2167 &ia64_frame_prev_register,
2168 NULL,
2169 default_frame_sniffer
2170 };
2171
2172 /* Signal trampolines. */
2173
2174 static void
2175 ia64_sigtramp_frame_init_saved_regs (frame_info_ptr this_frame,
2176 struct ia64_frame_cache *cache)
2177 {
2178 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2179 ia64_gdbarch_tdep *tdep = gdbarch_tdep<ia64_gdbarch_tdep> (gdbarch);
2180
2181 if (tdep->sigcontext_register_address)
2182 {
2183 int regno;
2184
2185 cache->saved_regs[IA64_VRAP_REGNUM]
2186 = tdep->sigcontext_register_address (gdbarch, cache->base,
2187 IA64_IP_REGNUM);
2188 cache->saved_regs[IA64_CFM_REGNUM]
2189 = tdep->sigcontext_register_address (gdbarch, cache->base,
2190 IA64_CFM_REGNUM);
2191 cache->saved_regs[IA64_PSR_REGNUM]
2192 = tdep->sigcontext_register_address (gdbarch, cache->base,
2193 IA64_PSR_REGNUM);
2194 cache->saved_regs[IA64_BSP_REGNUM]
2195 = tdep->sigcontext_register_address (gdbarch, cache->base,
2196 IA64_BSP_REGNUM);
2197 cache->saved_regs[IA64_RNAT_REGNUM]
2198 = tdep->sigcontext_register_address (gdbarch, cache->base,
2199 IA64_RNAT_REGNUM);
2200 cache->saved_regs[IA64_CCV_REGNUM]
2201 = tdep->sigcontext_register_address (gdbarch, cache->base,
2202 IA64_CCV_REGNUM);
2203 cache->saved_regs[IA64_UNAT_REGNUM]
2204 = tdep->sigcontext_register_address (gdbarch, cache->base,
2205 IA64_UNAT_REGNUM);
2206 cache->saved_regs[IA64_FPSR_REGNUM]
2207 = tdep->sigcontext_register_address (gdbarch, cache->base,
2208 IA64_FPSR_REGNUM);
2209 cache->saved_regs[IA64_PFS_REGNUM]
2210 = tdep->sigcontext_register_address (gdbarch, cache->base,
2211 IA64_PFS_REGNUM);
2212 cache->saved_regs[IA64_LC_REGNUM]
2213 = tdep->sigcontext_register_address (gdbarch, cache->base,
2214 IA64_LC_REGNUM);
2215
2216 for (regno = IA64_GR1_REGNUM; regno <= IA64_GR31_REGNUM; regno++)
2217 cache->saved_regs[regno] =
2218 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2219 for (regno = IA64_BR0_REGNUM; regno <= IA64_BR7_REGNUM; regno++)
2220 cache->saved_regs[regno] =
2221 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2222 for (regno = IA64_FR2_REGNUM; regno <= IA64_FR31_REGNUM; regno++)
2223 cache->saved_regs[regno] =
2224 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2225 }
2226 }
2227
2228 static struct ia64_frame_cache *
2229 ia64_sigtramp_frame_cache (frame_info_ptr this_frame, void **this_cache)
2230 {
2231 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2232 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2233 struct ia64_frame_cache *cache;
2234 gdb_byte buf[8];
2235
2236 if (*this_cache)
2237 return (struct ia64_frame_cache *) *this_cache;
2238
2239 cache = ia64_alloc_frame_cache ();
2240
2241 get_frame_register (this_frame, sp_regnum, buf);
2242 /* Note that frame size is hard-coded below. We cannot calculate it
2243 via prologue examination. */
2244 cache->base = extract_unsigned_integer (buf, 8, byte_order) + 16;
2245
2246 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2247 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
2248
2249 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2250 cache->cfm = extract_unsigned_integer (buf, 8, byte_order);
2251 cache->sof = cache->cfm & 0x7f;
2252
2253 ia64_sigtramp_frame_init_saved_regs (this_frame, cache);
2254
2255 *this_cache = cache;
2256 return cache;
2257 }
2258
2259 static void
2260 ia64_sigtramp_frame_this_id (frame_info_ptr this_frame,
2261 void **this_cache, struct frame_id *this_id)
2262 {
2263 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2264 struct ia64_frame_cache *cache =
2265 ia64_sigtramp_frame_cache (this_frame, this_cache);
2266
2267 (*this_id) = frame_id_build_special (cache->base,
2268 get_frame_pc (this_frame),
2269 cache->bsp);
2270 if (gdbarch_debug >= 1)
2271 gdb_printf (gdb_stdlog,
2272 "sigtramp frame id: code %s, stack %s, "
2273 "special %s, this_frame %s\n",
2274 paddress (gdbarch, this_id->code_addr),
2275 paddress (gdbarch, this_id->stack_addr),
2276 paddress (gdbarch, cache->bsp),
2277 host_address_to_string (this_frame.get ()));
2278 }
2279
2280 static struct value *
2281 ia64_sigtramp_frame_prev_register (frame_info_ptr this_frame,
2282 void **this_cache, int regnum)
2283 {
2284 struct ia64_frame_cache *cache =
2285 ia64_sigtramp_frame_cache (this_frame, this_cache);
2286
2287 gdb_assert (regnum >= 0);
2288
2289 if (!target_has_registers ())
2290 error (_("No registers."));
2291
2292 if (regnum == IA64_IP_REGNUM)
2293 {
2294 CORE_ADDR pc = 0;
2295 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2296
2297 if (addr != 0)
2298 {
2299 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2300 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2301 pc = read_memory_unsigned_integer (addr, 8, byte_order);
2302 }
2303 pc &= ~0xf;
2304 return frame_unwind_got_constant (this_frame, regnum, pc);
2305 }
2306
2307 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2308 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2309 {
2310 CORE_ADDR addr = 0;
2311
2312 if (regnum >= V32_REGNUM)
2313 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2314 addr = cache->saved_regs[regnum];
2315 if (addr != 0)
2316 return frame_unwind_got_memory (this_frame, regnum, addr);
2317
2318 return frame_unwind_got_constant (this_frame, regnum, 0);
2319 }
2320
2321 else /* All other registers not listed above. */
2322 {
2323 CORE_ADDR addr = cache->saved_regs[regnum];
2324
2325 if (addr != 0)
2326 return frame_unwind_got_memory (this_frame, regnum, addr);
2327
2328 return frame_unwind_got_constant (this_frame, regnum, 0);
2329 }
2330 }
2331
2332 static int
2333 ia64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2334 frame_info_ptr this_frame,
2335 void **this_cache)
2336 {
2337 gdbarch *arch = get_frame_arch (this_frame);
2338 ia64_gdbarch_tdep *tdep = gdbarch_tdep<ia64_gdbarch_tdep> (arch);
2339 if (tdep->pc_in_sigtramp)
2340 {
2341 CORE_ADDR pc = get_frame_pc (this_frame);
2342
2343 if (tdep->pc_in_sigtramp (pc))
2344 return 1;
2345 }
2346
2347 return 0;
2348 }
2349
2350 static const struct frame_unwind ia64_sigtramp_frame_unwind =
2351 {
2352 "ia64 sigtramp",
2353 SIGTRAMP_FRAME,
2354 default_frame_unwind_stop_reason,
2355 ia64_sigtramp_frame_this_id,
2356 ia64_sigtramp_frame_prev_register,
2357 NULL,
2358 ia64_sigtramp_frame_sniffer
2359 };
2360
2361 \f
2362
2363 static CORE_ADDR
2364 ia64_frame_base_address (frame_info_ptr this_frame, void **this_cache)
2365 {
2366 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
2367
2368 return cache->base;
2369 }
2370
2371 static const struct frame_base ia64_frame_base =
2372 {
2373 &ia64_frame_unwind,
2374 ia64_frame_base_address,
2375 ia64_frame_base_address,
2376 ia64_frame_base_address
2377 };
2378
2379 #ifdef HAVE_LIBUNWIND_IA64_H
2380
2381 struct ia64_unwind_table_entry
2382 {
2383 unw_word_t start_offset;
2384 unw_word_t end_offset;
2385 unw_word_t info_offset;
2386 };
2387
2388 static __inline__ uint64_t
2389 ia64_rse_slot_num (uint64_t addr)
2390 {
2391 return (addr >> 3) & 0x3f;
2392 }
2393
2394 /* Skip over a designated number of registers in the backing
2395 store, remembering every 64th position is for NAT. */
2396 static __inline__ uint64_t
2397 ia64_rse_skip_regs (uint64_t addr, long num_regs)
2398 {
2399 long delta = ia64_rse_slot_num(addr) + num_regs;
2400
2401 if (num_regs < 0)
2402 delta -= 0x3e;
2403 return addr + ((num_regs + delta/0x3f) << 3);
2404 }
2405
2406 /* Gdb ia64-libunwind-tdep callback function to convert from an ia64 gdb
2407 register number to a libunwind register number. */
2408 static int
2409 ia64_gdb2uw_regnum (int regnum)
2410 {
2411 if (regnum == sp_regnum)
2412 return UNW_IA64_SP;
2413 else if (regnum == IA64_BSP_REGNUM)
2414 return UNW_IA64_BSP;
2415 else if ((unsigned) (regnum - IA64_GR0_REGNUM) < 128)
2416 return UNW_IA64_GR + (regnum - IA64_GR0_REGNUM);
2417 else if ((unsigned) (regnum - V32_REGNUM) < 95)
2418 return UNW_IA64_GR + 32 + (regnum - V32_REGNUM);
2419 else if ((unsigned) (regnum - IA64_FR0_REGNUM) < 128)
2420 return UNW_IA64_FR + (regnum - IA64_FR0_REGNUM);
2421 else if ((unsigned) (regnum - IA64_PR0_REGNUM) < 64)
2422 return -1;
2423 else if ((unsigned) (regnum - IA64_BR0_REGNUM) < 8)
2424 return UNW_IA64_BR + (regnum - IA64_BR0_REGNUM);
2425 else if (regnum == IA64_PR_REGNUM)
2426 return UNW_IA64_PR;
2427 else if (regnum == IA64_IP_REGNUM)
2428 return UNW_REG_IP;
2429 else if (regnum == IA64_CFM_REGNUM)
2430 return UNW_IA64_CFM;
2431 else if ((unsigned) (regnum - IA64_AR0_REGNUM) < 128)
2432 return UNW_IA64_AR + (regnum - IA64_AR0_REGNUM);
2433 else if ((unsigned) (regnum - IA64_NAT0_REGNUM) < 128)
2434 return UNW_IA64_NAT + (regnum - IA64_NAT0_REGNUM);
2435 else
2436 return -1;
2437 }
2438
2439 /* Gdb ia64-libunwind-tdep callback function to convert from a libunwind
2440 register number to a ia64 gdb register number. */
2441 static int
2442 ia64_uw2gdb_regnum (int uw_regnum)
2443 {
2444 if (uw_regnum == UNW_IA64_SP)
2445 return sp_regnum;
2446 else if (uw_regnum == UNW_IA64_BSP)
2447 return IA64_BSP_REGNUM;
2448 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 32)
2449 return IA64_GR0_REGNUM + (uw_regnum - UNW_IA64_GR);
2450 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 128)
2451 return V32_REGNUM + (uw_regnum - (IA64_GR0_REGNUM + 32));
2452 else if ((unsigned) (uw_regnum - UNW_IA64_FR) < 128)
2453 return IA64_FR0_REGNUM + (uw_regnum - UNW_IA64_FR);
2454 else if ((unsigned) (uw_regnum - UNW_IA64_BR) < 8)
2455 return IA64_BR0_REGNUM + (uw_regnum - UNW_IA64_BR);
2456 else if (uw_regnum == UNW_IA64_PR)
2457 return IA64_PR_REGNUM;
2458 else if (uw_regnum == UNW_REG_IP)
2459 return IA64_IP_REGNUM;
2460 else if (uw_regnum == UNW_IA64_CFM)
2461 return IA64_CFM_REGNUM;
2462 else if ((unsigned) (uw_regnum - UNW_IA64_AR) < 128)
2463 return IA64_AR0_REGNUM + (uw_regnum - UNW_IA64_AR);
2464 else if ((unsigned) (uw_regnum - UNW_IA64_NAT) < 128)
2465 return IA64_NAT0_REGNUM + (uw_regnum - UNW_IA64_NAT);
2466 else
2467 return -1;
2468 }
2469
2470 /* Gdb ia64-libunwind-tdep callback function to reveal if register is
2471 a float register or not. */
2472 static int
2473 ia64_is_fpreg (int uw_regnum)
2474 {
2475 return unw_is_fpreg (uw_regnum);
2476 }
2477
2478 /* Libunwind callback accessor function for general registers. */
2479 static int
2480 ia64_access_reg (unw_addr_space_t as, unw_regnum_t uw_regnum, unw_word_t *val,
2481 int write, void *arg)
2482 {
2483 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2484 unw_word_t bsp, sof, cfm, psr, ip;
2485 struct frame_info *this_frame = (frame_info *) arg;
2486 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2487 ia64_gdbarch_tdep *tdep = gdbarch_tdep<ia64_gdbarch_tdep> (gdbarch);
2488
2489 /* We never call any libunwind routines that need to write registers. */
2490 gdb_assert (!write);
2491
2492 switch (uw_regnum)
2493 {
2494 case UNW_REG_IP:
2495 /* Libunwind expects to see the pc value which means the slot number
2496 from the psr must be merged with the ip word address. */
2497 ip = get_frame_register_unsigned (this_frame, IA64_IP_REGNUM);
2498 psr = get_frame_register_unsigned (this_frame, IA64_PSR_REGNUM);
2499 *val = ip | ((psr >> 41) & 0x3);
2500 break;
2501
2502 case UNW_IA64_AR_BSP:
2503 /* Libunwind expects to see the beginning of the current
2504 register frame so we must account for the fact that
2505 ptrace() will return a value for bsp that points *after*
2506 the current register frame. */
2507 bsp = get_frame_register_unsigned (this_frame, IA64_BSP_REGNUM);
2508 cfm = get_frame_register_unsigned (this_frame, IA64_CFM_REGNUM);
2509 sof = tdep->size_of_register_frame (this_frame, cfm);
2510 *val = ia64_rse_skip_regs (bsp, -sof);
2511 break;
2512
2513 case UNW_IA64_AR_BSPSTORE:
2514 /* Libunwind wants bspstore to be after the current register frame.
2515 This is what ptrace() and gdb treats as the regular bsp value. */
2516 *val = get_frame_register_unsigned (this_frame, IA64_BSP_REGNUM);
2517 break;
2518
2519 default:
2520 /* For all other registers, just unwind the value directly. */
2521 *val = get_frame_register_unsigned (this_frame, regnum);
2522 break;
2523 }
2524
2525 if (gdbarch_debug >= 1)
2526 gdb_printf (gdb_stdlog,
2527 " access_reg: from cache: %4s=%s\n",
2528 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2529 ? ia64_register_names[regnum] : "r??"),
2530 paddress (gdbarch, *val));
2531 return 0;
2532 }
2533
2534 /* Libunwind callback accessor function for floating-point registers. */
2535 static int
2536 ia64_access_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2537 unw_fpreg_t *val, int write, void *arg)
2538 {
2539 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2540 frame_info_ptr this_frame = (frame_info_ptr ) arg;
2541
2542 /* We never call any libunwind routines that need to write registers. */
2543 gdb_assert (!write);
2544
2545 get_frame_register (this_frame, regnum, (gdb_byte *) val);
2546
2547 return 0;
2548 }
2549
2550 /* Libunwind callback accessor function for top-level rse registers. */
2551 static int
2552 ia64_access_rse_reg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2553 unw_word_t *val, int write, void *arg)
2554 {
2555 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2556 unw_word_t bsp, sof, cfm, psr, ip;
2557 struct regcache *regcache = (struct regcache *) arg;
2558 struct gdbarch *gdbarch = regcache->arch ();
2559
2560 /* We never call any libunwind routines that need to write registers. */
2561 gdb_assert (!write);
2562
2563 switch (uw_regnum)
2564 {
2565 case UNW_REG_IP:
2566 /* Libunwind expects to see the pc value which means the slot number
2567 from the psr must be merged with the ip word address. */
2568 regcache_cooked_read_unsigned (regcache, IA64_IP_REGNUM, &ip);
2569 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr);
2570 *val = ip | ((psr >> 41) & 0x3);
2571 break;
2572
2573 case UNW_IA64_AR_BSP:
2574 /* Libunwind expects to see the beginning of the current
2575 register frame so we must account for the fact that
2576 ptrace() will return a value for bsp that points *after*
2577 the current register frame. */
2578 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
2579 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
2580 sof = (cfm & 0x7f);
2581 *val = ia64_rse_skip_regs (bsp, -sof);
2582 break;
2583
2584 case UNW_IA64_AR_BSPSTORE:
2585 /* Libunwind wants bspstore to be after the current register frame.
2586 This is what ptrace() and gdb treats as the regular bsp value. */
2587 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, val);
2588 break;
2589
2590 default:
2591 /* For all other registers, just unwind the value directly. */
2592 regcache_cooked_read_unsigned (regcache, regnum, val);
2593 break;
2594 }
2595
2596 if (gdbarch_debug >= 1)
2597 gdb_printf (gdb_stdlog,
2598 " access_rse_reg: from cache: %4s=%s\n",
2599 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2600 ? ia64_register_names[regnum] : "r??"),
2601 paddress (gdbarch, *val));
2602
2603 return 0;
2604 }
2605
2606 /* Libunwind callback accessor function for top-level fp registers. */
2607 static int
2608 ia64_access_rse_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2609 unw_fpreg_t *val, int write, void *arg)
2610 {
2611 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2612 struct regcache *regcache = (struct regcache *) arg;
2613
2614 /* We never call any libunwind routines that need to write registers. */
2615 gdb_assert (!write);
2616
2617 regcache->cooked_read (regnum, (gdb_byte *) val);
2618
2619 return 0;
2620 }
2621
2622 /* Libunwind callback accessor function for accessing memory. */
2623 static int
2624 ia64_access_mem (unw_addr_space_t as,
2625 unw_word_t addr, unw_word_t *val,
2626 int write, void *arg)
2627 {
2628 if (addr - KERNEL_START < ktab_size)
2629 {
2630 unw_word_t *laddr = (unw_word_t*) ((char *) ktab
2631 + (addr - KERNEL_START));
2632
2633 if (write)
2634 *laddr = *val;
2635 else
2636 *val = *laddr;
2637 return 0;
2638 }
2639
2640 /* XXX do we need to normalize byte-order here? */
2641 if (write)
2642 return target_write_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2643 else
2644 return target_read_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2645 }
2646
2647 /* Call low-level function to access the kernel unwind table. */
2648 static gdb::optional<gdb::byte_vector>
2649 getunwind_table ()
2650 {
2651 /* FIXME drow/2005-09-10: This code used to call
2652 ia64_linux_xfer_unwind_table directly to fetch the unwind table
2653 for the currently running ia64-linux kernel. That data should
2654 come from the core file and be accessed via the auxv vector; if
2655 we want to preserve fall back to the running kernel's table, then
2656 we should find a way to override the corefile layer's
2657 xfer_partial method. */
2658
2659 return target_read_alloc (current_inferior ()->top_target (),
2660 TARGET_OBJECT_UNWIND_TABLE, NULL);
2661 }
2662
2663 /* Get the kernel unwind table. */
2664 static int
2665 get_kernel_table (unw_word_t ip, unw_dyn_info_t *di)
2666 {
2667 static struct ia64_table_entry *etab;
2668
2669 if (!ktab)
2670 {
2671 ktab_buf = getunwind_table ();
2672 if (!ktab_buf)
2673 return -UNW_ENOINFO;
2674
2675 ktab = (struct ia64_table_entry *) ktab_buf->data ();
2676 ktab_size = ktab_buf->size ();
2677
2678 for (etab = ktab; etab->start_offset; ++etab)
2679 etab->info_offset += KERNEL_START;
2680 }
2681
2682 if (ip < ktab[0].start_offset || ip >= etab[-1].end_offset)
2683 return -UNW_ENOINFO;
2684
2685 di->format = UNW_INFO_FORMAT_TABLE;
2686 di->gp = 0;
2687 di->start_ip = ktab[0].start_offset;
2688 di->end_ip = etab[-1].end_offset;
2689 di->u.ti.name_ptr = (unw_word_t) "<kernel>";
2690 di->u.ti.segbase = 0;
2691 di->u.ti.table_len = ((char *) etab - (char *) ktab) / sizeof (unw_word_t);
2692 di->u.ti.table_data = (unw_word_t *) ktab;
2693
2694 if (gdbarch_debug >= 1)
2695 gdb_printf (gdb_stdlog, "get_kernel_table: found table `%s': "
2696 "segbase=%s, length=%s, gp=%s\n",
2697 (char *) di->u.ti.name_ptr,
2698 hex_string (di->u.ti.segbase),
2699 pulongest (di->u.ti.table_len),
2700 hex_string (di->gp));
2701 return 0;
2702 }
2703
2704 /* Find the unwind table entry for a specified address. */
2705 static int
2706 ia64_find_unwind_table (struct objfile *objfile, unw_word_t ip,
2707 unw_dyn_info_t *dip, void **buf)
2708 {
2709 Elf_Internal_Phdr *phdr, *p_text = NULL, *p_unwind = NULL;
2710 Elf_Internal_Ehdr *ehdr;
2711 unw_word_t segbase = 0;
2712 CORE_ADDR load_base;
2713 bfd *bfd;
2714 int i;
2715
2716 bfd = objfile->obfd;
2717
2718 ehdr = elf_tdata (bfd)->elf_header;
2719 phdr = elf_tdata (bfd)->phdr;
2720
2721 load_base = objfile->text_section_offset ();
2722
2723 for (i = 0; i < ehdr->e_phnum; ++i)
2724 {
2725 switch (phdr[i].p_type)
2726 {
2727 case PT_LOAD:
2728 if ((unw_word_t) (ip - load_base - phdr[i].p_vaddr)
2729 < phdr[i].p_memsz)
2730 p_text = phdr + i;
2731 break;
2732
2733 case PT_IA_64_UNWIND:
2734 p_unwind = phdr + i;
2735 break;
2736
2737 default:
2738 break;
2739 }
2740 }
2741
2742 if (!p_text || !p_unwind)
2743 return -UNW_ENOINFO;
2744
2745 /* Verify that the segment that contains the IP also contains
2746 the static unwind table. If not, we may be in the Linux kernel's
2747 DSO gate page in which case the unwind table is another segment.
2748 Otherwise, we are dealing with runtime-generated code, for which we
2749 have no info here. */
2750 segbase = p_text->p_vaddr + load_base;
2751
2752 if ((p_unwind->p_vaddr - p_text->p_vaddr) >= p_text->p_memsz)
2753 {
2754 int ok = 0;
2755 for (i = 0; i < ehdr->e_phnum; ++i)
2756 {
2757 if (phdr[i].p_type == PT_LOAD
2758 && (p_unwind->p_vaddr - phdr[i].p_vaddr) < phdr[i].p_memsz)
2759 {
2760 ok = 1;
2761 /* Get the segbase from the section containing the
2762 libunwind table. */
2763 segbase = phdr[i].p_vaddr + load_base;
2764 }
2765 }
2766 if (!ok)
2767 return -UNW_ENOINFO;
2768 }
2769
2770 dip->start_ip = p_text->p_vaddr + load_base;
2771 dip->end_ip = dip->start_ip + p_text->p_memsz;
2772 dip->gp = ia64_find_global_pointer (objfile->arch (), ip);
2773 dip->format = UNW_INFO_FORMAT_REMOTE_TABLE;
2774 dip->u.rti.name_ptr = (unw_word_t) bfd_get_filename (bfd);
2775 dip->u.rti.segbase = segbase;
2776 dip->u.rti.table_len = p_unwind->p_memsz / sizeof (unw_word_t);
2777 dip->u.rti.table_data = p_unwind->p_vaddr + load_base;
2778
2779 return 0;
2780 }
2781
2782 /* Libunwind callback accessor function to acquire procedure unwind-info. */
2783 static int
2784 ia64_find_proc_info_x (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
2785 int need_unwind_info, void *arg)
2786 {
2787 struct obj_section *sec = find_pc_section (ip);
2788 unw_dyn_info_t di;
2789 int ret;
2790 void *buf = NULL;
2791
2792 if (!sec)
2793 {
2794 /* XXX This only works if the host and the target architecture are
2795 both ia64 and if the have (more or less) the same kernel
2796 version. */
2797 if (get_kernel_table (ip, &di) < 0)
2798 return -UNW_ENOINFO;
2799
2800 if (gdbarch_debug >= 1)
2801 gdb_printf (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2802 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2803 "length=%s,data=%s)\n",
2804 hex_string (ip), (char *)di.u.ti.name_ptr,
2805 hex_string (di.u.ti.segbase),
2806 hex_string (di.start_ip), hex_string (di.end_ip),
2807 hex_string (di.gp),
2808 pulongest (di.u.ti.table_len),
2809 hex_string ((CORE_ADDR)di.u.ti.table_data));
2810 }
2811 else
2812 {
2813 ret = ia64_find_unwind_table (sec->objfile, ip, &di, &buf);
2814 if (ret < 0)
2815 return ret;
2816
2817 if (gdbarch_debug >= 1)
2818 gdb_printf (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2819 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2820 "length=%s,data=%s)\n",
2821 hex_string (ip), (char *)di.u.rti.name_ptr,
2822 hex_string (di.u.rti.segbase),
2823 hex_string (di.start_ip), hex_string (di.end_ip),
2824 hex_string (di.gp),
2825 pulongest (di.u.rti.table_len),
2826 hex_string (di.u.rti.table_data));
2827 }
2828
2829 ret = libunwind_search_unwind_table (&as, ip, &di, pi, need_unwind_info,
2830 arg);
2831
2832 /* We no longer need the dyn info storage so free it. */
2833 xfree (buf);
2834
2835 return ret;
2836 }
2837
2838 /* Libunwind callback accessor function for cleanup. */
2839 static void
2840 ia64_put_unwind_info (unw_addr_space_t as,
2841 unw_proc_info_t *pip, void *arg)
2842 {
2843 /* Nothing required for now. */
2844 }
2845
2846 /* Libunwind callback accessor function to get head of the dynamic
2847 unwind-info registration list. */
2848 static int
2849 ia64_get_dyn_info_list (unw_addr_space_t as,
2850 unw_word_t *dilap, void *arg)
2851 {
2852 struct obj_section *text_sec;
2853 unw_word_t ip, addr;
2854 unw_dyn_info_t di;
2855 int ret;
2856
2857 if (!libunwind_is_initialized ())
2858 return -UNW_ENOINFO;
2859
2860 for (objfile *objfile : current_program_space->objfiles ())
2861 {
2862 void *buf = NULL;
2863
2864 text_sec = objfile->sections + SECT_OFF_TEXT (objfile);
2865 ip = text_sec->addr ();
2866 ret = ia64_find_unwind_table (objfile, ip, &di, &buf);
2867 if (ret >= 0)
2868 {
2869 addr = libunwind_find_dyn_list (as, &di, arg);
2870 /* We no longer need the dyn info storage so free it. */
2871 xfree (buf);
2872
2873 if (addr)
2874 {
2875 if (gdbarch_debug >= 1)
2876 gdb_printf (gdb_stdlog,
2877 "dynamic unwind table in objfile %s "
2878 "at %s (gp=%s)\n",
2879 bfd_get_filename (objfile->obfd),
2880 hex_string (addr), hex_string (di.gp));
2881 *dilap = addr;
2882 return 0;
2883 }
2884 }
2885 }
2886 return -UNW_ENOINFO;
2887 }
2888
2889
2890 /* Frame interface functions for libunwind. */
2891
2892 static void
2893 ia64_libunwind_frame_this_id (frame_info_ptr this_frame, void **this_cache,
2894 struct frame_id *this_id)
2895 {
2896 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2897 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2898 struct frame_id id = outer_frame_id;
2899 gdb_byte buf[8];
2900 CORE_ADDR bsp;
2901
2902 libunwind_frame_this_id (this_frame, this_cache, &id);
2903 if (id == outer_frame_id)
2904 {
2905 (*this_id) = outer_frame_id;
2906 return;
2907 }
2908
2909 /* We must add the bsp as the special address for frame comparison
2910 purposes. */
2911 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2912 bsp = extract_unsigned_integer (buf, 8, byte_order);
2913
2914 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
2915
2916 if (gdbarch_debug >= 1)
2917 gdb_printf (gdb_stdlog,
2918 "libunwind frame id: code %s, stack %s, "
2919 "special %s, this_frame %s\n",
2920 paddress (gdbarch, id.code_addr),
2921 paddress (gdbarch, id.stack_addr),
2922 paddress (gdbarch, bsp),
2923 host_address_to_string (this_frame));
2924 }
2925
2926 static struct value *
2927 ia64_libunwind_frame_prev_register (frame_info_ptr this_frame,
2928 void **this_cache, int regnum)
2929 {
2930 int reg = regnum;
2931 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2932 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2933 struct value *val;
2934
2935 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2936 reg = IA64_PR_REGNUM;
2937 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2938 reg = IA64_UNAT_REGNUM;
2939
2940 /* Let libunwind do most of the work. */
2941 val = libunwind_frame_prev_register (this_frame, this_cache, reg);
2942
2943 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2944 {
2945 ULONGEST prN_val;
2946
2947 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
2948 {
2949 int rrb_pr = 0;
2950 ULONGEST cfm;
2951
2952 /* Fetch predicate register rename base from current frame
2953 marker for this frame. */
2954 cfm = get_frame_register_unsigned (this_frame, IA64_CFM_REGNUM);
2955 rrb_pr = (cfm >> 32) & 0x3f;
2956
2957 /* Adjust the register number to account for register rotation. */
2958 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
2959 }
2960 prN_val = extract_bit_field (value_contents_all (val).data (),
2961 regnum - VP0_REGNUM, 1);
2962 return frame_unwind_got_constant (this_frame, regnum, prN_val);
2963 }
2964
2965 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2966 {
2967 ULONGEST unatN_val;
2968
2969 unatN_val = extract_bit_field (value_contents_all (val).data (),
2970 regnum - IA64_NAT0_REGNUM, 1);
2971 return frame_unwind_got_constant (this_frame, regnum, unatN_val);
2972 }
2973
2974 else if (regnum == IA64_BSP_REGNUM)
2975 {
2976 struct value *cfm_val;
2977 CORE_ADDR prev_bsp, prev_cfm;
2978
2979 /* We want to calculate the previous bsp as the end of the previous
2980 register stack frame. This corresponds to what the hardware bsp
2981 register will be if we pop the frame back which is why we might
2982 have been called. We know that libunwind will pass us back the
2983 beginning of the current frame so we should just add sof to it. */
2984 prev_bsp = extract_unsigned_integer (value_contents_all (val).data (),
2985 8, byte_order);
2986 cfm_val = libunwind_frame_prev_register (this_frame, this_cache,
2987 IA64_CFM_REGNUM);
2988 prev_cfm = extract_unsigned_integer (value_contents_all (cfm_val).data (),
2989 8, byte_order);
2990 prev_bsp = rse_address_add (prev_bsp, (prev_cfm & 0x7f));
2991
2992 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
2993 }
2994 else
2995 return val;
2996 }
2997
2998 static int
2999 ia64_libunwind_frame_sniffer (const struct frame_unwind *self,
3000 frame_info_ptr this_frame,
3001 void **this_cache)
3002 {
3003 if (libunwind_is_initialized ()
3004 && libunwind_frame_sniffer (self, this_frame, this_cache))
3005 return 1;
3006
3007 return 0;
3008 }
3009
3010 static const struct frame_unwind ia64_libunwind_frame_unwind =
3011 {
3012 "ia64 libunwind",
3013 NORMAL_FRAME,
3014 default_frame_unwind_stop_reason,
3015 ia64_libunwind_frame_this_id,
3016 ia64_libunwind_frame_prev_register,
3017 NULL,
3018 ia64_libunwind_frame_sniffer,
3019 libunwind_frame_dealloc_cache
3020 };
3021
3022 static void
3023 ia64_libunwind_sigtramp_frame_this_id (frame_info_ptr this_frame,
3024 void **this_cache,
3025 struct frame_id *this_id)
3026 {
3027 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3028 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3029 gdb_byte buf[8];
3030 CORE_ADDR bsp;
3031 struct frame_id id = outer_frame_id;
3032
3033 libunwind_frame_this_id (this_frame, this_cache, &id);
3034 if (id == outer_frame_id)
3035 {
3036 (*this_id) = outer_frame_id;
3037 return;
3038 }
3039
3040 /* We must add the bsp as the special address for frame comparison
3041 purposes. */
3042 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3043 bsp = extract_unsigned_integer (buf, 8, byte_order);
3044
3045 /* For a sigtramp frame, we don't make the check for previous ip being 0. */
3046 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
3047
3048 if (gdbarch_debug >= 1)
3049 gdb_printf (gdb_stdlog,
3050 "libunwind sigtramp frame id: code %s, "
3051 "stack %s, special %s, this_frame %s\n",
3052 paddress (gdbarch, id.code_addr),
3053 paddress (gdbarch, id.stack_addr),
3054 paddress (gdbarch, bsp),
3055 host_address_to_string (this_frame));
3056 }
3057
3058 static struct value *
3059 ia64_libunwind_sigtramp_frame_prev_register (frame_info_ptr this_frame,
3060 void **this_cache, int regnum)
3061 {
3062 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3063 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3064 struct value *prev_ip_val;
3065 CORE_ADDR prev_ip;
3066
3067 /* If the previous frame pc value is 0, then we want to use the SIGCONTEXT
3068 method of getting previous registers. */
3069 prev_ip_val = libunwind_frame_prev_register (this_frame, this_cache,
3070 IA64_IP_REGNUM);
3071 prev_ip = extract_unsigned_integer (value_contents_all (prev_ip_val).data (),
3072 8, byte_order);
3073
3074 if (prev_ip == 0)
3075 {
3076 void *tmp_cache = NULL;
3077 return ia64_sigtramp_frame_prev_register (this_frame, &tmp_cache,
3078 regnum);
3079 }
3080 else
3081 return ia64_libunwind_frame_prev_register (this_frame, this_cache, regnum);
3082 }
3083
3084 static int
3085 ia64_libunwind_sigtramp_frame_sniffer (const struct frame_unwind *self,
3086 frame_info_ptr this_frame,
3087 void **this_cache)
3088 {
3089 if (libunwind_is_initialized ())
3090 {
3091 if (libunwind_sigtramp_frame_sniffer (self, this_frame, this_cache))
3092 return 1;
3093 return 0;
3094 }
3095 else
3096 return ia64_sigtramp_frame_sniffer (self, this_frame, this_cache);
3097 }
3098
3099 static const struct frame_unwind ia64_libunwind_sigtramp_frame_unwind =
3100 {
3101 "ia64 libunwind sigtramp",
3102 SIGTRAMP_FRAME,
3103 default_frame_unwind_stop_reason,
3104 ia64_libunwind_sigtramp_frame_this_id,
3105 ia64_libunwind_sigtramp_frame_prev_register,
3106 NULL,
3107 ia64_libunwind_sigtramp_frame_sniffer
3108 };
3109
3110 /* Set of libunwind callback acccessor functions. */
3111 unw_accessors_t ia64_unw_accessors =
3112 {
3113 ia64_find_proc_info_x,
3114 ia64_put_unwind_info,
3115 ia64_get_dyn_info_list,
3116 ia64_access_mem,
3117 ia64_access_reg,
3118 ia64_access_fpreg,
3119 /* resume */
3120 /* get_proc_name */
3121 };
3122
3123 /* Set of special libunwind callback acccessor functions specific for accessing
3124 the rse registers. At the top of the stack, we want libunwind to figure out
3125 how to read r32 - r127. Though usually they are found sequentially in
3126 memory starting from $bof, this is not always true. */
3127 unw_accessors_t ia64_unw_rse_accessors =
3128 {
3129 ia64_find_proc_info_x,
3130 ia64_put_unwind_info,
3131 ia64_get_dyn_info_list,
3132 ia64_access_mem,
3133 ia64_access_rse_reg,
3134 ia64_access_rse_fpreg,
3135 /* resume */
3136 /* get_proc_name */
3137 };
3138
3139 /* Set of ia64-libunwind-tdep gdb callbacks and data for generic
3140 ia64-libunwind-tdep code to use. */
3141 struct libunwind_descr ia64_libunwind_descr =
3142 {
3143 ia64_gdb2uw_regnum,
3144 ia64_uw2gdb_regnum,
3145 ia64_is_fpreg,
3146 &ia64_unw_accessors,
3147 &ia64_unw_rse_accessors,
3148 };
3149
3150 #endif /* HAVE_LIBUNWIND_IA64_H */
3151
3152 static int
3153 ia64_use_struct_convention (struct type *type)
3154 {
3155 struct type *float_elt_type;
3156
3157 /* Don't use the struct convention for anything but structure,
3158 union, or array types. */
3159 if (!(type->code () == TYPE_CODE_STRUCT
3160 || type->code () == TYPE_CODE_UNION
3161 || type->code () == TYPE_CODE_ARRAY))
3162 return 0;
3163
3164 /* HFAs are structures (or arrays) consisting entirely of floating
3165 point values of the same length. Up to 8 of these are returned
3166 in registers. Don't use the struct convention when this is the
3167 case. */
3168 float_elt_type = is_float_or_hfa_type (type);
3169 if (float_elt_type != NULL
3170 && type->length () / float_elt_type->length () <= 8)
3171 return 0;
3172
3173 /* Other structs of length 32 or less are returned in r8-r11.
3174 Don't use the struct convention for those either. */
3175 return type->length () > 32;
3176 }
3177
3178 /* Return non-zero if TYPE is a structure or union type. */
3179
3180 static int
3181 ia64_struct_type_p (const struct type *type)
3182 {
3183 return (type->code () == TYPE_CODE_STRUCT
3184 || type->code () == TYPE_CODE_UNION);
3185 }
3186
3187 static void
3188 ia64_extract_return_value (struct type *type, struct regcache *regcache,
3189 gdb_byte *valbuf)
3190 {
3191 struct gdbarch *gdbarch = regcache->arch ();
3192 struct type *float_elt_type;
3193
3194 float_elt_type = is_float_or_hfa_type (type);
3195 if (float_elt_type != NULL)
3196 {
3197 gdb_byte from[IA64_FP_REGISTER_SIZE];
3198 int offset = 0;
3199 int regnum = IA64_FR8_REGNUM;
3200 int n = type->length () / float_elt_type->length ();
3201
3202 while (n-- > 0)
3203 {
3204 regcache->cooked_read (regnum, from);
3205 target_float_convert (from, ia64_ext_type (gdbarch),
3206 valbuf + offset, float_elt_type);
3207 offset += float_elt_type->length ();
3208 regnum++;
3209 }
3210 }
3211 else if (!ia64_struct_type_p (type) && type->length () < 8)
3212 {
3213 /* This is an integral value, and its size is less than 8 bytes.
3214 These values are LSB-aligned, so extract the relevant bytes,
3215 and copy them into VALBUF. */
3216 /* brobecker/2005-12-30: Actually, all integral values are LSB aligned,
3217 so I suppose we should also add handling here for integral values
3218 whose size is greater than 8. But I wasn't able to create such
3219 a type, neither in C nor in Ada, so not worrying about these yet. */
3220 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3221 ULONGEST val;
3222
3223 regcache_cooked_read_unsigned (regcache, IA64_GR8_REGNUM, &val);
3224 store_unsigned_integer (valbuf, type->length (), byte_order, val);
3225 }
3226 else
3227 {
3228 ULONGEST val;
3229 int offset = 0;
3230 int regnum = IA64_GR8_REGNUM;
3231 int reglen = register_type (gdbarch, IA64_GR8_REGNUM)->length ();
3232 int n = type->length () / reglen;
3233 int m = type->length () % reglen;
3234
3235 while (n-- > 0)
3236 {
3237 ULONGEST regval;
3238 regcache_cooked_read_unsigned (regcache, regnum, &regval);
3239 memcpy ((char *)valbuf + offset, &regval, reglen);
3240 offset += reglen;
3241 regnum++;
3242 }
3243
3244 if (m)
3245 {
3246 regcache_cooked_read_unsigned (regcache, regnum, &val);
3247 memcpy ((char *)valbuf + offset, &val, m);
3248 }
3249 }
3250 }
3251
3252 static void
3253 ia64_store_return_value (struct type *type, struct regcache *regcache,
3254 const gdb_byte *valbuf)
3255 {
3256 struct gdbarch *gdbarch = regcache->arch ();
3257 struct type *float_elt_type;
3258
3259 float_elt_type = is_float_or_hfa_type (type);
3260 if (float_elt_type != NULL)
3261 {
3262 gdb_byte to[IA64_FP_REGISTER_SIZE];
3263 int offset = 0;
3264 int regnum = IA64_FR8_REGNUM;
3265 int n = type->length () / float_elt_type->length ();
3266
3267 while (n-- > 0)
3268 {
3269 target_float_convert (valbuf + offset, float_elt_type,
3270 to, ia64_ext_type (gdbarch));
3271 regcache->cooked_write (regnum, to);
3272 offset += float_elt_type->length ();
3273 regnum++;
3274 }
3275 }
3276 else
3277 {
3278 int offset = 0;
3279 int regnum = IA64_GR8_REGNUM;
3280 int reglen = register_type (gdbarch, IA64_GR8_REGNUM)->length ();
3281 int n = type->length () / reglen;
3282 int m = type->length () % reglen;
3283
3284 while (n-- > 0)
3285 {
3286 ULONGEST val;
3287 memcpy (&val, (char *)valbuf + offset, reglen);
3288 regcache_cooked_write_unsigned (regcache, regnum, val);
3289 offset += reglen;
3290 regnum++;
3291 }
3292
3293 if (m)
3294 {
3295 ULONGEST val;
3296 memcpy (&val, (char *)valbuf + offset, m);
3297 regcache_cooked_write_unsigned (regcache, regnum, val);
3298 }
3299 }
3300 }
3301
3302 static enum return_value_convention
3303 ia64_return_value (struct gdbarch *gdbarch, struct value *function,
3304 struct type *valtype, struct regcache *regcache,
3305 gdb_byte *readbuf, const gdb_byte *writebuf)
3306 {
3307 int struct_return = ia64_use_struct_convention (valtype);
3308
3309 if (writebuf != NULL)
3310 {
3311 gdb_assert (!struct_return);
3312 ia64_store_return_value (valtype, regcache, writebuf);
3313 }
3314
3315 if (readbuf != NULL)
3316 {
3317 gdb_assert (!struct_return);
3318 ia64_extract_return_value (valtype, regcache, readbuf);
3319 }
3320
3321 if (struct_return)
3322 return RETURN_VALUE_STRUCT_CONVENTION;
3323 else
3324 return RETURN_VALUE_REGISTER_CONVENTION;
3325 }
3326
3327 static int
3328 is_float_or_hfa_type_recurse (struct type *t, struct type **etp)
3329 {
3330 switch (t->code ())
3331 {
3332 case TYPE_CODE_FLT:
3333 if (*etp)
3334 return (*etp)->length () == t->length ();
3335 else
3336 {
3337 *etp = t;
3338 return 1;
3339 }
3340 break;
3341 case TYPE_CODE_ARRAY:
3342 return
3343 is_float_or_hfa_type_recurse (check_typedef (t->target_type ()),
3344 etp);
3345 break;
3346 case TYPE_CODE_STRUCT:
3347 {
3348 int i;
3349
3350 for (i = 0; i < t->num_fields (); i++)
3351 if (!is_float_or_hfa_type_recurse
3352 (check_typedef (t->field (i).type ()), etp))
3353 return 0;
3354 return 1;
3355 }
3356 break;
3357 default:
3358 break;
3359 }
3360
3361 return 0;
3362 }
3363
3364 /* Determine if the given type is one of the floating point types or
3365 and HFA (which is a struct, array, or combination thereof whose
3366 bottom-most elements are all of the same floating point type). */
3367
3368 static struct type *
3369 is_float_or_hfa_type (struct type *t)
3370 {
3371 struct type *et = 0;
3372
3373 return is_float_or_hfa_type_recurse (t, &et) ? et : 0;
3374 }
3375
3376
3377 /* Return 1 if the alignment of T is such that the next even slot
3378 should be used. Return 0, if the next available slot should
3379 be used. (See section 8.5.1 of the IA-64 Software Conventions
3380 and Runtime manual). */
3381
3382 static int
3383 slot_alignment_is_next_even (struct type *t)
3384 {
3385 switch (t->code ())
3386 {
3387 case TYPE_CODE_INT:
3388 case TYPE_CODE_FLT:
3389 if (t->length () > 8)
3390 return 1;
3391 else
3392 return 0;
3393 case TYPE_CODE_ARRAY:
3394 return
3395 slot_alignment_is_next_even (check_typedef (t->target_type ()));
3396 case TYPE_CODE_STRUCT:
3397 {
3398 int i;
3399
3400 for (i = 0; i < t->num_fields (); i++)
3401 if (slot_alignment_is_next_even
3402 (check_typedef (t->field (i).type ())))
3403 return 1;
3404 return 0;
3405 }
3406 default:
3407 return 0;
3408 }
3409 }
3410
3411 /* Attempt to find (and return) the global pointer for the given
3412 function.
3413
3414 This is a rather nasty bit of code searchs for the .dynamic section
3415 in the objfile corresponding to the pc of the function we're trying
3416 to call. Once it finds the addresses at which the .dynamic section
3417 lives in the child process, it scans the Elf64_Dyn entries for a
3418 DT_PLTGOT tag. If it finds one of these, the corresponding
3419 d_un.d_ptr value is the global pointer. */
3420
3421 static CORE_ADDR
3422 ia64_find_global_pointer_from_dynamic_section (struct gdbarch *gdbarch,
3423 CORE_ADDR faddr)
3424 {
3425 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3426 struct obj_section *faddr_sect;
3427
3428 faddr_sect = find_pc_section (faddr);
3429 if (faddr_sect != NULL)
3430 {
3431 struct obj_section *osect;
3432
3433 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3434 {
3435 if (strcmp (osect->the_bfd_section->name, ".dynamic") == 0)
3436 break;
3437 }
3438
3439 if (osect < faddr_sect->objfile->sections_end)
3440 {
3441 CORE_ADDR addr = osect->addr ();
3442 CORE_ADDR endaddr = osect->endaddr ();
3443
3444 while (addr < endaddr)
3445 {
3446 int status;
3447 LONGEST tag;
3448 gdb_byte buf[8];
3449
3450 status = target_read_memory (addr, buf, sizeof (buf));
3451 if (status != 0)
3452 break;
3453 tag = extract_signed_integer (buf, byte_order);
3454
3455 if (tag == DT_PLTGOT)
3456 {
3457 CORE_ADDR global_pointer;
3458
3459 status = target_read_memory (addr + 8, buf, sizeof (buf));
3460 if (status != 0)
3461 break;
3462 global_pointer = extract_unsigned_integer (buf, sizeof (buf),
3463 byte_order);
3464
3465 /* The payoff... */
3466 return global_pointer;
3467 }
3468
3469 if (tag == DT_NULL)
3470 break;
3471
3472 addr += 16;
3473 }
3474 }
3475 }
3476 return 0;
3477 }
3478
3479 /* Attempt to find (and return) the global pointer for the given
3480 function. We first try the find_global_pointer_from_solib routine
3481 from the gdbarch tdep vector, if provided. And if that does not
3482 work, then we try ia64_find_global_pointer_from_dynamic_section. */
3483
3484 static CORE_ADDR
3485 ia64_find_global_pointer (struct gdbarch *gdbarch, CORE_ADDR faddr)
3486 {
3487 ia64_gdbarch_tdep *tdep = gdbarch_tdep<ia64_gdbarch_tdep> (gdbarch);
3488 CORE_ADDR addr = 0;
3489
3490 if (tdep->find_global_pointer_from_solib)
3491 addr = tdep->find_global_pointer_from_solib (gdbarch, faddr);
3492 if (addr == 0)
3493 addr = ia64_find_global_pointer_from_dynamic_section (gdbarch, faddr);
3494 return addr;
3495 }
3496
3497 /* Given a function's address, attempt to find (and return) the
3498 corresponding (canonical) function descriptor. Return 0 if
3499 not found. */
3500 static CORE_ADDR
3501 find_extant_func_descr (struct gdbarch *gdbarch, CORE_ADDR faddr)
3502 {
3503 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3504 struct obj_section *faddr_sect;
3505
3506 /* Return early if faddr is already a function descriptor. */
3507 faddr_sect = find_pc_section (faddr);
3508 if (faddr_sect && strcmp (faddr_sect->the_bfd_section->name, ".opd") == 0)
3509 return faddr;
3510
3511 if (faddr_sect != NULL)
3512 {
3513 struct obj_section *osect;
3514 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3515 {
3516 if (strcmp (osect->the_bfd_section->name, ".opd") == 0)
3517 break;
3518 }
3519
3520 if (osect < faddr_sect->objfile->sections_end)
3521 {
3522 CORE_ADDR addr = osect->addr ();
3523 CORE_ADDR endaddr = osect->endaddr ();
3524
3525 while (addr < endaddr)
3526 {
3527 int status;
3528 LONGEST faddr2;
3529 gdb_byte buf[8];
3530
3531 status = target_read_memory (addr, buf, sizeof (buf));
3532 if (status != 0)
3533 break;
3534 faddr2 = extract_signed_integer (buf, byte_order);
3535
3536 if (faddr == faddr2)
3537 return addr;
3538
3539 addr += 16;
3540 }
3541 }
3542 }
3543 return 0;
3544 }
3545
3546 /* Attempt to find a function descriptor corresponding to the
3547 given address. If none is found, construct one on the
3548 stack using the address at fdaptr. */
3549
3550 static CORE_ADDR
3551 find_func_descr (struct regcache *regcache, CORE_ADDR faddr, CORE_ADDR *fdaptr)
3552 {
3553 struct gdbarch *gdbarch = regcache->arch ();
3554 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3555 CORE_ADDR fdesc;
3556
3557 fdesc = find_extant_func_descr (gdbarch, faddr);
3558
3559 if (fdesc == 0)
3560 {
3561 ULONGEST global_pointer;
3562 gdb_byte buf[16];
3563
3564 fdesc = *fdaptr;
3565 *fdaptr += 16;
3566
3567 global_pointer = ia64_find_global_pointer (gdbarch, faddr);
3568
3569 if (global_pointer == 0)
3570 regcache_cooked_read_unsigned (regcache,
3571 IA64_GR1_REGNUM, &global_pointer);
3572
3573 store_unsigned_integer (buf, 8, byte_order, faddr);
3574 store_unsigned_integer (buf + 8, 8, byte_order, global_pointer);
3575
3576 write_memory (fdesc, buf, 16);
3577 }
3578
3579 return fdesc;
3580 }
3581
3582 /* Use the following routine when printing out function pointers
3583 so the user can see the function address rather than just the
3584 function descriptor. */
3585 static CORE_ADDR
3586 ia64_convert_from_func_ptr_addr (struct gdbarch *gdbarch, CORE_ADDR addr,
3587 struct target_ops *targ)
3588 {
3589 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3590 struct obj_section *s;
3591 gdb_byte buf[8];
3592
3593 s = find_pc_section (addr);
3594
3595 /* check if ADDR points to a function descriptor. */
3596 if (s && strcmp (s->the_bfd_section->name, ".opd") == 0)
3597 return read_memory_unsigned_integer (addr, 8, byte_order);
3598
3599 /* Normally, functions live inside a section that is executable.
3600 So, if ADDR points to a non-executable section, then treat it
3601 as a function descriptor and return the target address iff
3602 the target address itself points to a section that is executable.
3603 Check first the memory of the whole length of 8 bytes is readable. */
3604 if (s && (s->the_bfd_section->flags & SEC_CODE) == 0
3605 && target_read_memory (addr, buf, 8) == 0)
3606 {
3607 CORE_ADDR pc = extract_unsigned_integer (buf, 8, byte_order);
3608 struct obj_section *pc_section = find_pc_section (pc);
3609
3610 if (pc_section && (pc_section->the_bfd_section->flags & SEC_CODE))
3611 return pc;
3612 }
3613
3614 /* There are also descriptors embedded in vtables. */
3615 if (s)
3616 {
3617 struct bound_minimal_symbol minsym;
3618
3619 minsym = lookup_minimal_symbol_by_pc (addr);
3620
3621 if (minsym.minsym
3622 && is_vtable_name (minsym.minsym->linkage_name ()))
3623 return read_memory_unsigned_integer (addr, 8, byte_order);
3624 }
3625
3626 return addr;
3627 }
3628
3629 static CORE_ADDR
3630 ia64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3631 {
3632 return sp & ~0xfLL;
3633 }
3634
3635 /* The default "allocate_new_rse_frame" ia64_infcall_ops routine for ia64. */
3636
3637 static void
3638 ia64_allocate_new_rse_frame (struct regcache *regcache, ULONGEST bsp, int sof)
3639 {
3640 ULONGEST cfm, pfs, new_bsp;
3641
3642 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
3643
3644 new_bsp = rse_address_add (bsp, sof);
3645 regcache_cooked_write_unsigned (regcache, IA64_BSP_REGNUM, new_bsp);
3646
3647 regcache_cooked_read_unsigned (regcache, IA64_PFS_REGNUM, &pfs);
3648 pfs &= 0xc000000000000000LL;
3649 pfs |= (cfm & 0xffffffffffffLL);
3650 regcache_cooked_write_unsigned (regcache, IA64_PFS_REGNUM, pfs);
3651
3652 cfm &= 0xc000000000000000LL;
3653 cfm |= sof;
3654 regcache_cooked_write_unsigned (regcache, IA64_CFM_REGNUM, cfm);
3655 }
3656
3657 /* The default "store_argument_in_slot" ia64_infcall_ops routine for
3658 ia64. */
3659
3660 static void
3661 ia64_store_argument_in_slot (struct regcache *regcache, CORE_ADDR bsp,
3662 int slotnum, gdb_byte *buf)
3663 {
3664 write_memory (rse_address_add (bsp, slotnum), buf, 8);
3665 }
3666
3667 /* The default "set_function_addr" ia64_infcall_ops routine for ia64. */
3668
3669 static void
3670 ia64_set_function_addr (struct regcache *regcache, CORE_ADDR func_addr)
3671 {
3672 /* Nothing needed. */
3673 }
3674
3675 static CORE_ADDR
3676 ia64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3677 struct regcache *regcache, CORE_ADDR bp_addr,
3678 int nargs, struct value **args, CORE_ADDR sp,
3679 function_call_return_method return_method,
3680 CORE_ADDR struct_addr)
3681 {
3682 ia64_gdbarch_tdep *tdep = gdbarch_tdep<ia64_gdbarch_tdep> (gdbarch);
3683 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3684 int argno;
3685 struct value *arg;
3686 struct type *type;
3687 int len, argoffset;
3688 int nslots, rseslots, memslots, slotnum, nfuncargs;
3689 int floatreg;
3690 ULONGEST bsp;
3691 CORE_ADDR funcdescaddr, global_pointer;
3692 CORE_ADDR func_addr = find_function_addr (function, NULL);
3693
3694 nslots = 0;
3695 nfuncargs = 0;
3696 /* Count the number of slots needed for the arguments. */
3697 for (argno = 0; argno < nargs; argno++)
3698 {
3699 arg = args[argno];
3700 type = check_typedef (value_type (arg));
3701 len = type->length ();
3702
3703 if ((nslots & 1) && slot_alignment_is_next_even (type))
3704 nslots++;
3705
3706 if (type->code () == TYPE_CODE_FUNC)
3707 nfuncargs++;
3708
3709 nslots += (len + 7) / 8;
3710 }
3711
3712 /* Divvy up the slots between the RSE and the memory stack. */
3713 rseslots = (nslots > 8) ? 8 : nslots;
3714 memslots = nslots - rseslots;
3715
3716 /* Allocate a new RSE frame. */
3717 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
3718 tdep->infcall_ops.allocate_new_rse_frame (regcache, bsp, rseslots);
3719
3720 /* We will attempt to find function descriptors in the .opd segment,
3721 but if we can't we'll construct them ourselves. That being the
3722 case, we'll need to reserve space on the stack for them. */
3723 funcdescaddr = sp - nfuncargs * 16;
3724 funcdescaddr &= ~0xfLL;
3725
3726 /* Adjust the stack pointer to it's new value. The calling conventions
3727 require us to have 16 bytes of scratch, plus whatever space is
3728 necessary for the memory slots and our function descriptors. */
3729 sp = sp - 16 - (memslots + nfuncargs) * 8;
3730 sp &= ~0xfLL; /* Maintain 16 byte alignment. */
3731
3732 /* Place the arguments where they belong. The arguments will be
3733 either placed in the RSE backing store or on the memory stack.
3734 In addition, floating point arguments or HFAs are placed in
3735 floating point registers. */
3736 slotnum = 0;
3737 floatreg = IA64_FR8_REGNUM;
3738 for (argno = 0; argno < nargs; argno++)
3739 {
3740 struct type *float_elt_type;
3741
3742 arg = args[argno];
3743 type = check_typedef (value_type (arg));
3744 len = type->length ();
3745
3746 /* Special handling for function parameters. */
3747 if (len == 8
3748 && type->code () == TYPE_CODE_PTR
3749 && type->target_type ()->code () == TYPE_CODE_FUNC)
3750 {
3751 gdb_byte val_buf[8];
3752 ULONGEST faddr = extract_unsigned_integer
3753 (value_contents (arg).data (), 8, byte_order);
3754 store_unsigned_integer (val_buf, 8, byte_order,
3755 find_func_descr (regcache, faddr,
3756 &funcdescaddr));
3757 if (slotnum < rseslots)
3758 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3759 slotnum, val_buf);
3760 else
3761 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3762 slotnum++;
3763 continue;
3764 }
3765
3766 /* Normal slots. */
3767
3768 /* Skip odd slot if necessary... */
3769 if ((slotnum & 1) && slot_alignment_is_next_even (type))
3770 slotnum++;
3771
3772 argoffset = 0;
3773 while (len > 0)
3774 {
3775 gdb_byte val_buf[8];
3776
3777 memset (val_buf, 0, 8);
3778 if (!ia64_struct_type_p (type) && len < 8)
3779 {
3780 /* Integral types are LSB-aligned, so we have to be careful
3781 to insert the argument on the correct side of the buffer.
3782 This is why we use store_unsigned_integer. */
3783 store_unsigned_integer
3784 (val_buf, 8, byte_order,
3785 extract_unsigned_integer (value_contents (arg).data (), len,
3786 byte_order));
3787 }
3788 else
3789 {
3790 /* This is either an 8bit integral type, or an aggregate.
3791 For 8bit integral type, there is no problem, we just
3792 copy the value over.
3793
3794 For aggregates, the only potentially tricky portion
3795 is to write the last one if it is less than 8 bytes.
3796 In this case, the data is Byte0-aligned. Happy news,
3797 this means that we don't need to differentiate the
3798 handling of 8byte blocks and less-than-8bytes blocks. */
3799 memcpy (val_buf, value_contents (arg).data () + argoffset,
3800 (len > 8) ? 8 : len);
3801 }
3802
3803 if (slotnum < rseslots)
3804 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3805 slotnum, val_buf);
3806 else
3807 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3808
3809 argoffset += 8;
3810 len -= 8;
3811 slotnum++;
3812 }
3813
3814 /* Handle floating point types (including HFAs). */
3815 float_elt_type = is_float_or_hfa_type (type);
3816 if (float_elt_type != NULL)
3817 {
3818 argoffset = 0;
3819 len = type->length ();
3820 while (len > 0 && floatreg < IA64_FR16_REGNUM)
3821 {
3822 gdb_byte to[IA64_FP_REGISTER_SIZE];
3823 target_float_convert (value_contents (arg).data () + argoffset,
3824 float_elt_type, to,
3825 ia64_ext_type (gdbarch));
3826 regcache->cooked_write (floatreg, to);
3827 floatreg++;
3828 argoffset += float_elt_type->length ();
3829 len -= float_elt_type->length ();
3830 }
3831 }
3832 }
3833
3834 /* Store the struct return value in r8 if necessary. */
3835 if (return_method == return_method_struct)
3836 regcache_cooked_write_unsigned (regcache, IA64_GR8_REGNUM,
3837 (ULONGEST) struct_addr);
3838
3839 global_pointer = ia64_find_global_pointer (gdbarch, func_addr);
3840
3841 if (global_pointer != 0)
3842 regcache_cooked_write_unsigned (regcache, IA64_GR1_REGNUM, global_pointer);
3843
3844 /* The following is not necessary on HP-UX, because we're using
3845 a dummy code sequence pushed on the stack to make the call, and
3846 this sequence doesn't need b0 to be set in order for our dummy
3847 breakpoint to be hit. Nonetheless, this doesn't interfere, and
3848 it's needed for other OSes, so we do this unconditionaly. */
3849 regcache_cooked_write_unsigned (regcache, IA64_BR0_REGNUM, bp_addr);
3850
3851 regcache_cooked_write_unsigned (regcache, sp_regnum, sp);
3852
3853 tdep->infcall_ops.set_function_addr (regcache, func_addr);
3854
3855 return sp;
3856 }
3857
3858 static const struct ia64_infcall_ops ia64_infcall_ops =
3859 {
3860 ia64_allocate_new_rse_frame,
3861 ia64_store_argument_in_slot,
3862 ia64_set_function_addr
3863 };
3864
3865 static struct frame_id
3866 ia64_dummy_id (struct gdbarch *gdbarch, frame_info_ptr this_frame)
3867 {
3868 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3869 gdb_byte buf[8];
3870 CORE_ADDR sp, bsp;
3871
3872 get_frame_register (this_frame, sp_regnum, buf);
3873 sp = extract_unsigned_integer (buf, 8, byte_order);
3874
3875 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3876 bsp = extract_unsigned_integer (buf, 8, byte_order);
3877
3878 if (gdbarch_debug >= 1)
3879 gdb_printf (gdb_stdlog,
3880 "dummy frame id: code %s, stack %s, special %s\n",
3881 paddress (gdbarch, get_frame_pc (this_frame)),
3882 paddress (gdbarch, sp), paddress (gdbarch, bsp));
3883
3884 return frame_id_build_special (sp, get_frame_pc (this_frame), bsp);
3885 }
3886
3887 static CORE_ADDR
3888 ia64_unwind_pc (struct gdbarch *gdbarch, frame_info_ptr next_frame)
3889 {
3890 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3891 gdb_byte buf[8];
3892 CORE_ADDR ip, psr, pc;
3893
3894 frame_unwind_register (next_frame, IA64_IP_REGNUM, buf);
3895 ip = extract_unsigned_integer (buf, 8, byte_order);
3896 frame_unwind_register (next_frame, IA64_PSR_REGNUM, buf);
3897 psr = extract_unsigned_integer (buf, 8, byte_order);
3898
3899 pc = (ip & ~0xf) | ((psr >> 41) & 3);
3900 return pc;
3901 }
3902
3903 static int
3904 ia64_print_insn (bfd_vma memaddr, struct disassemble_info *info)
3905 {
3906 info->bytes_per_line = SLOT_MULTIPLIER;
3907 return default_print_insn (memaddr, info);
3908 }
3909
3910 /* The default "size_of_register_frame" gdbarch_tdep routine for ia64. */
3911
3912 static int
3913 ia64_size_of_register_frame (frame_info_ptr this_frame, ULONGEST cfm)
3914 {
3915 return (cfm & 0x7f);
3916 }
3917
3918 static struct gdbarch *
3919 ia64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3920 {
3921 struct gdbarch *gdbarch;
3922
3923 /* If there is already a candidate, use it. */
3924 arches = gdbarch_list_lookup_by_info (arches, &info);
3925 if (arches != NULL)
3926 return arches->gdbarch;
3927
3928 ia64_gdbarch_tdep *tdep = new ia64_gdbarch_tdep;
3929 gdbarch = gdbarch_alloc (&info, tdep);
3930
3931 tdep->size_of_register_frame = ia64_size_of_register_frame;
3932
3933 /* According to the ia64 specs, instructions that store long double
3934 floats in memory use a long-double format different than that
3935 used in the floating registers. The memory format matches the
3936 x86 extended float format which is 80 bits. An OS may choose to
3937 use this format (e.g. GNU/Linux) or choose to use a different
3938 format for storing long doubles (e.g. HPUX). In the latter case,
3939 the setting of the format may be moved/overridden in an
3940 OS-specific tdep file. */
3941 set_gdbarch_long_double_format (gdbarch, floatformats_i387_ext);
3942
3943 set_gdbarch_short_bit (gdbarch, 16);
3944 set_gdbarch_int_bit (gdbarch, 32);
3945 set_gdbarch_long_bit (gdbarch, 64);
3946 set_gdbarch_long_long_bit (gdbarch, 64);
3947 set_gdbarch_float_bit (gdbarch, 32);
3948 set_gdbarch_double_bit (gdbarch, 64);
3949 set_gdbarch_long_double_bit (gdbarch, 128);
3950 set_gdbarch_ptr_bit (gdbarch, 64);
3951
3952 set_gdbarch_num_regs (gdbarch, NUM_IA64_RAW_REGS);
3953 set_gdbarch_num_pseudo_regs (gdbarch,
3954 LAST_PSEUDO_REGNUM - FIRST_PSEUDO_REGNUM);
3955 set_gdbarch_sp_regnum (gdbarch, sp_regnum);
3956 set_gdbarch_fp0_regnum (gdbarch, IA64_FR0_REGNUM);
3957
3958 set_gdbarch_register_name (gdbarch, ia64_register_name);
3959 set_gdbarch_register_type (gdbarch, ia64_register_type);
3960
3961 set_gdbarch_pseudo_register_read (gdbarch, ia64_pseudo_register_read);
3962 set_gdbarch_pseudo_register_write (gdbarch, ia64_pseudo_register_write);
3963 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, ia64_dwarf_reg_to_regnum);
3964 set_gdbarch_register_reggroup_p (gdbarch, ia64_register_reggroup_p);
3965 set_gdbarch_convert_register_p (gdbarch, ia64_convert_register_p);
3966 set_gdbarch_register_to_value (gdbarch, ia64_register_to_value);
3967 set_gdbarch_value_to_register (gdbarch, ia64_value_to_register);
3968
3969 set_gdbarch_skip_prologue (gdbarch, ia64_skip_prologue);
3970
3971 set_gdbarch_return_value (gdbarch, ia64_return_value);
3972
3973 set_gdbarch_memory_insert_breakpoint (gdbarch,
3974 ia64_memory_insert_breakpoint);
3975 set_gdbarch_memory_remove_breakpoint (gdbarch,
3976 ia64_memory_remove_breakpoint);
3977 set_gdbarch_breakpoint_from_pc (gdbarch, ia64_breakpoint_from_pc);
3978 set_gdbarch_breakpoint_kind_from_pc (gdbarch, ia64_breakpoint_kind_from_pc);
3979 set_gdbarch_read_pc (gdbarch, ia64_read_pc);
3980 set_gdbarch_write_pc (gdbarch, ia64_write_pc);
3981
3982 /* Settings for calling functions in the inferior. */
3983 set_gdbarch_push_dummy_call (gdbarch, ia64_push_dummy_call);
3984 tdep->infcall_ops = ia64_infcall_ops;
3985 set_gdbarch_frame_align (gdbarch, ia64_frame_align);
3986 set_gdbarch_dummy_id (gdbarch, ia64_dummy_id);
3987
3988 set_gdbarch_unwind_pc (gdbarch, ia64_unwind_pc);
3989 #ifdef HAVE_LIBUNWIND_IA64_H
3990 frame_unwind_append_unwinder (gdbarch,
3991 &ia64_libunwind_sigtramp_frame_unwind);
3992 frame_unwind_append_unwinder (gdbarch, &ia64_libunwind_frame_unwind);
3993 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
3994 libunwind_frame_set_descr (gdbarch, &ia64_libunwind_descr);
3995 #else
3996 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
3997 #endif
3998 frame_unwind_append_unwinder (gdbarch, &ia64_frame_unwind);
3999 frame_base_set_default (gdbarch, &ia64_frame_base);
4000
4001 /* Settings that should be unnecessary. */
4002 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
4003
4004 set_gdbarch_print_insn (gdbarch, ia64_print_insn);
4005 set_gdbarch_convert_from_func_ptr_addr (gdbarch,
4006 ia64_convert_from_func_ptr_addr);
4007
4008 /* The virtual table contains 16-byte descriptors, not pointers to
4009 descriptors. */
4010 set_gdbarch_vtable_function_descriptors (gdbarch, 1);
4011
4012 /* Hook in ABI-specific overrides, if they have been registered. */
4013 gdbarch_init_osabi (info, gdbarch);
4014
4015 return gdbarch;
4016 }
4017
4018 void _initialize_ia64_tdep ();
4019 void
4020 _initialize_ia64_tdep ()
4021 {
4022 gdbarch_register (bfd_arch_ia64, ia64_gdbarch_init, NULL);
4023 }