]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/ia64-tdep.c
gdb.opt/inline-locals.exp: Remove trailing parentheses in test names
[thirdparty/binutils-gdb.git] / gdb / ia64-tdep.c
1 /* Target-dependent code for the IA-64 for GDB, the GNU debugger.
2
3 Copyright (C) 1999-2017 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "gdbcore.h"
23 #include "arch-utils.h"
24 #include "floatformat.h"
25 #include "gdbtypes.h"
26 #include "regcache.h"
27 #include "reggroups.h"
28 #include "frame.h"
29 #include "frame-base.h"
30 #include "frame-unwind.h"
31 #include "target-float.h"
32 #include "value.h"
33 #include "objfiles.h"
34 #include "elf/common.h" /* for DT_PLTGOT value */
35 #include "elf-bfd.h"
36 #include "dis-asm.h"
37 #include "infcall.h"
38 #include "osabi.h"
39 #include "ia64-tdep.h"
40 #include "cp-abi.h"
41
42 #ifdef HAVE_LIBUNWIND_IA64_H
43 #include "elf/ia64.h" /* for PT_IA_64_UNWIND value */
44 #include "ia64-libunwind-tdep.h"
45
46 /* Note: KERNEL_START is supposed to be an address which is not going
47 to ever contain any valid unwind info. For ia64 linux, the choice
48 of 0xc000000000000000 is fairly safe since that's uncached space.
49
50 We use KERNEL_START as follows: after obtaining the kernel's
51 unwind table via getunwind(), we project its unwind data into
52 address-range KERNEL_START-(KERNEL_START+ktab_size) and then
53 when ia64_access_mem() sees a memory access to this
54 address-range, we redirect it to ktab instead.
55
56 None of this hackery is needed with a modern kernel/libcs
57 which uses the kernel virtual DSO to provide access to the
58 kernel's unwind info. In that case, ktab_size remains 0 and
59 hence the value of KERNEL_START doesn't matter. */
60
61 #define KERNEL_START 0xc000000000000000ULL
62
63 static size_t ktab_size = 0;
64 struct ia64_table_entry
65 {
66 uint64_t start_offset;
67 uint64_t end_offset;
68 uint64_t info_offset;
69 };
70
71 static struct ia64_table_entry *ktab = NULL;
72
73 #endif
74
75 /* An enumeration of the different IA-64 instruction types. */
76
77 typedef enum instruction_type
78 {
79 A, /* Integer ALU ; I-unit or M-unit */
80 I, /* Non-ALU integer; I-unit */
81 M, /* Memory ; M-unit */
82 F, /* Floating-point ; F-unit */
83 B, /* Branch ; B-unit */
84 L, /* Extended (L+X) ; I-unit */
85 X, /* Extended (L+X) ; I-unit */
86 undefined /* undefined or reserved */
87 } instruction_type;
88
89 /* We represent IA-64 PC addresses as the value of the instruction
90 pointer or'd with some bit combination in the low nibble which
91 represents the slot number in the bundle addressed by the
92 instruction pointer. The problem is that the Linux kernel
93 multiplies its slot numbers (for exceptions) by one while the
94 disassembler multiplies its slot numbers by 6. In addition, I've
95 heard it said that the simulator uses 1 as the multiplier.
96
97 I've fixed the disassembler so that the bytes_per_line field will
98 be the slot multiplier. If bytes_per_line comes in as zero, it
99 is set to six (which is how it was set up initially). -- objdump
100 displays pretty disassembly dumps with this value. For our purposes,
101 we'll set bytes_per_line to SLOT_MULTIPLIER. This is okay since we
102 never want to also display the raw bytes the way objdump does. */
103
104 #define SLOT_MULTIPLIER 1
105
106 /* Length in bytes of an instruction bundle. */
107
108 #define BUNDLE_LEN 16
109
110 /* See the saved memory layout comment for ia64_memory_insert_breakpoint. */
111
112 #if BREAKPOINT_MAX < BUNDLE_LEN - 2
113 # error "BREAKPOINT_MAX < BUNDLE_LEN - 2"
114 #endif
115
116 static gdbarch_init_ftype ia64_gdbarch_init;
117
118 static gdbarch_register_name_ftype ia64_register_name;
119 static gdbarch_register_type_ftype ia64_register_type;
120 static gdbarch_breakpoint_from_pc_ftype ia64_breakpoint_from_pc;
121 static gdbarch_skip_prologue_ftype ia64_skip_prologue;
122 static struct type *is_float_or_hfa_type (struct type *t);
123 static CORE_ADDR ia64_find_global_pointer (struct gdbarch *gdbarch,
124 CORE_ADDR faddr);
125
126 #define NUM_IA64_RAW_REGS 462
127
128 /* Big enough to hold a FP register in bytes. */
129 #define IA64_FP_REGISTER_SIZE 16
130
131 static int sp_regnum = IA64_GR12_REGNUM;
132
133 /* NOTE: we treat the register stack registers r32-r127 as
134 pseudo-registers because they may not be accessible via the ptrace
135 register get/set interfaces. */
136
137 enum pseudo_regs { FIRST_PSEUDO_REGNUM = NUM_IA64_RAW_REGS,
138 VBOF_REGNUM = IA64_NAT127_REGNUM + 1, V32_REGNUM,
139 V127_REGNUM = V32_REGNUM + 95,
140 VP0_REGNUM, VP16_REGNUM = VP0_REGNUM + 16,
141 VP63_REGNUM = VP0_REGNUM + 63, LAST_PSEUDO_REGNUM };
142
143 /* Array of register names; There should be ia64_num_regs strings in
144 the initializer. */
145
146 static const char *ia64_register_names[] =
147 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
148 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
149 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
150 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
151 "", "", "", "", "", "", "", "",
152 "", "", "", "", "", "", "", "",
153 "", "", "", "", "", "", "", "",
154 "", "", "", "", "", "", "", "",
155 "", "", "", "", "", "", "", "",
156 "", "", "", "", "", "", "", "",
157 "", "", "", "", "", "", "", "",
158 "", "", "", "", "", "", "", "",
159 "", "", "", "", "", "", "", "",
160 "", "", "", "", "", "", "", "",
161 "", "", "", "", "", "", "", "",
162 "", "", "", "", "", "", "", "",
163
164 "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
165 "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
166 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
167 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
168 "f32", "f33", "f34", "f35", "f36", "f37", "f38", "f39",
169 "f40", "f41", "f42", "f43", "f44", "f45", "f46", "f47",
170 "f48", "f49", "f50", "f51", "f52", "f53", "f54", "f55",
171 "f56", "f57", "f58", "f59", "f60", "f61", "f62", "f63",
172 "f64", "f65", "f66", "f67", "f68", "f69", "f70", "f71",
173 "f72", "f73", "f74", "f75", "f76", "f77", "f78", "f79",
174 "f80", "f81", "f82", "f83", "f84", "f85", "f86", "f87",
175 "f88", "f89", "f90", "f91", "f92", "f93", "f94", "f95",
176 "f96", "f97", "f98", "f99", "f100", "f101", "f102", "f103",
177 "f104", "f105", "f106", "f107", "f108", "f109", "f110", "f111",
178 "f112", "f113", "f114", "f115", "f116", "f117", "f118", "f119",
179 "f120", "f121", "f122", "f123", "f124", "f125", "f126", "f127",
180
181 "", "", "", "", "", "", "", "",
182 "", "", "", "", "", "", "", "",
183 "", "", "", "", "", "", "", "",
184 "", "", "", "", "", "", "", "",
185 "", "", "", "", "", "", "", "",
186 "", "", "", "", "", "", "", "",
187 "", "", "", "", "", "", "", "",
188 "", "", "", "", "", "", "", "",
189
190 "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7",
191
192 "vfp", "vrap",
193
194 "pr", "ip", "psr", "cfm",
195
196 "kr0", "kr1", "kr2", "kr3", "kr4", "kr5", "kr6", "kr7",
197 "", "", "", "", "", "", "", "",
198 "rsc", "bsp", "bspstore", "rnat",
199 "", "fcr", "", "",
200 "eflag", "csd", "ssd", "cflg", "fsr", "fir", "fdr", "",
201 "ccv", "", "", "", "unat", "", "", "",
202 "fpsr", "", "", "", "itc",
203 "", "", "", "", "", "", "", "", "", "",
204 "", "", "", "", "", "", "", "", "",
205 "pfs", "lc", "ec",
206 "", "", "", "", "", "", "", "", "", "",
207 "", "", "", "", "", "", "", "", "", "",
208 "", "", "", "", "", "", "", "", "", "",
209 "", "", "", "", "", "", "", "", "", "",
210 "", "", "", "", "", "", "", "", "", "",
211 "", "", "", "", "", "", "", "", "", "",
212 "",
213 "nat0", "nat1", "nat2", "nat3", "nat4", "nat5", "nat6", "nat7",
214 "nat8", "nat9", "nat10", "nat11", "nat12", "nat13", "nat14", "nat15",
215 "nat16", "nat17", "nat18", "nat19", "nat20", "nat21", "nat22", "nat23",
216 "nat24", "nat25", "nat26", "nat27", "nat28", "nat29", "nat30", "nat31",
217 "nat32", "nat33", "nat34", "nat35", "nat36", "nat37", "nat38", "nat39",
218 "nat40", "nat41", "nat42", "nat43", "nat44", "nat45", "nat46", "nat47",
219 "nat48", "nat49", "nat50", "nat51", "nat52", "nat53", "nat54", "nat55",
220 "nat56", "nat57", "nat58", "nat59", "nat60", "nat61", "nat62", "nat63",
221 "nat64", "nat65", "nat66", "nat67", "nat68", "nat69", "nat70", "nat71",
222 "nat72", "nat73", "nat74", "nat75", "nat76", "nat77", "nat78", "nat79",
223 "nat80", "nat81", "nat82", "nat83", "nat84", "nat85", "nat86", "nat87",
224 "nat88", "nat89", "nat90", "nat91", "nat92", "nat93", "nat94", "nat95",
225 "nat96", "nat97", "nat98", "nat99", "nat100","nat101","nat102","nat103",
226 "nat104","nat105","nat106","nat107","nat108","nat109","nat110","nat111",
227 "nat112","nat113","nat114","nat115","nat116","nat117","nat118","nat119",
228 "nat120","nat121","nat122","nat123","nat124","nat125","nat126","nat127",
229
230 "bof",
231
232 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
233 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
234 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
235 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
236 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
237 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
238 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
239 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
240 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
241 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
242 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
243 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
244
245 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7",
246 "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15",
247 "p16", "p17", "p18", "p19", "p20", "p21", "p22", "p23",
248 "p24", "p25", "p26", "p27", "p28", "p29", "p30", "p31",
249 "p32", "p33", "p34", "p35", "p36", "p37", "p38", "p39",
250 "p40", "p41", "p42", "p43", "p44", "p45", "p46", "p47",
251 "p48", "p49", "p50", "p51", "p52", "p53", "p54", "p55",
252 "p56", "p57", "p58", "p59", "p60", "p61", "p62", "p63",
253 };
254
255 struct ia64_frame_cache
256 {
257 CORE_ADDR base; /* frame pointer base for frame */
258 CORE_ADDR pc; /* function start pc for frame */
259 CORE_ADDR saved_sp; /* stack pointer for frame */
260 CORE_ADDR bsp; /* points at r32 for the current frame */
261 CORE_ADDR cfm; /* cfm value for current frame */
262 CORE_ADDR prev_cfm; /* cfm value for previous frame */
263 int frameless;
264 int sof; /* Size of frame (decoded from cfm value). */
265 int sol; /* Size of locals (decoded from cfm value). */
266 int sor; /* Number of rotating registers (decoded from
267 cfm value). */
268 CORE_ADDR after_prologue;
269 /* Address of first instruction after the last
270 prologue instruction; Note that there may
271 be instructions from the function's body
272 intermingled with the prologue. */
273 int mem_stack_frame_size;
274 /* Size of the memory stack frame (may be zero),
275 or -1 if it has not been determined yet. */
276 int fp_reg; /* Register number (if any) used a frame pointer
277 for this frame. 0 if no register is being used
278 as the frame pointer. */
279
280 /* Saved registers. */
281 CORE_ADDR saved_regs[NUM_IA64_RAW_REGS];
282
283 };
284
285 static int
286 floatformat_valid (const struct floatformat *fmt, const void *from)
287 {
288 return 1;
289 }
290
291 static const struct floatformat floatformat_ia64_ext_little =
292 {
293 floatformat_little, 82, 0, 1, 17, 65535, 0x1ffff, 18, 64,
294 floatformat_intbit_yes, "floatformat_ia64_ext_little", floatformat_valid, NULL
295 };
296
297 static const struct floatformat floatformat_ia64_ext_big =
298 {
299 floatformat_big, 82, 46, 47, 17, 65535, 0x1ffff, 64, 64,
300 floatformat_intbit_yes, "floatformat_ia64_ext_big", floatformat_valid
301 };
302
303 static const struct floatformat *floatformats_ia64_ext[2] =
304 {
305 &floatformat_ia64_ext_big,
306 &floatformat_ia64_ext_little
307 };
308
309 static struct type *
310 ia64_ext_type (struct gdbarch *gdbarch)
311 {
312 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
313
314 if (!tdep->ia64_ext_type)
315 tdep->ia64_ext_type
316 = arch_float_type (gdbarch, 128, "builtin_type_ia64_ext",
317 floatformats_ia64_ext);
318
319 return tdep->ia64_ext_type;
320 }
321
322 static int
323 ia64_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
324 struct reggroup *group)
325 {
326 int vector_p;
327 int float_p;
328 int raw_p;
329 if (group == all_reggroup)
330 return 1;
331 vector_p = TYPE_VECTOR (register_type (gdbarch, regnum));
332 float_p = TYPE_CODE (register_type (gdbarch, regnum)) == TYPE_CODE_FLT;
333 raw_p = regnum < NUM_IA64_RAW_REGS;
334 if (group == float_reggroup)
335 return float_p;
336 if (group == vector_reggroup)
337 return vector_p;
338 if (group == general_reggroup)
339 return (!vector_p && !float_p);
340 if (group == save_reggroup || group == restore_reggroup)
341 return raw_p;
342 return 0;
343 }
344
345 static const char *
346 ia64_register_name (struct gdbarch *gdbarch, int reg)
347 {
348 return ia64_register_names[reg];
349 }
350
351 struct type *
352 ia64_register_type (struct gdbarch *arch, int reg)
353 {
354 if (reg >= IA64_FR0_REGNUM && reg <= IA64_FR127_REGNUM)
355 return ia64_ext_type (arch);
356 else
357 return builtin_type (arch)->builtin_long;
358 }
359
360 static int
361 ia64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
362 {
363 if (reg >= IA64_GR32_REGNUM && reg <= IA64_GR127_REGNUM)
364 return V32_REGNUM + (reg - IA64_GR32_REGNUM);
365 return reg;
366 }
367
368
369 /* Extract ``len'' bits from an instruction bundle starting at
370 bit ``from''. */
371
372 static long long
373 extract_bit_field (const gdb_byte *bundle, int from, int len)
374 {
375 long long result = 0LL;
376 int to = from + len;
377 int from_byte = from / 8;
378 int to_byte = to / 8;
379 unsigned char *b = (unsigned char *) bundle;
380 unsigned char c;
381 int lshift;
382 int i;
383
384 c = b[from_byte];
385 if (from_byte == to_byte)
386 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
387 result = c >> (from % 8);
388 lshift = 8 - (from % 8);
389
390 for (i = from_byte+1; i < to_byte; i++)
391 {
392 result |= ((long long) b[i]) << lshift;
393 lshift += 8;
394 }
395
396 if (from_byte < to_byte && (to % 8 != 0))
397 {
398 c = b[to_byte];
399 c = ((unsigned char) (c << (8 - to % 8))) >> (8 - to % 8);
400 result |= ((long long) c) << lshift;
401 }
402
403 return result;
404 }
405
406 /* Replace the specified bits in an instruction bundle. */
407
408 static void
409 replace_bit_field (gdb_byte *bundle, long long val, int from, int len)
410 {
411 int to = from + len;
412 int from_byte = from / 8;
413 int to_byte = to / 8;
414 unsigned char *b = (unsigned char *) bundle;
415 unsigned char c;
416
417 if (from_byte == to_byte)
418 {
419 unsigned char left, right;
420 c = b[from_byte];
421 left = (c >> (to % 8)) << (to % 8);
422 right = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
423 c = (unsigned char) (val & 0xff);
424 c = (unsigned char) (c << (from % 8 + 8 - to % 8)) >> (8 - to % 8);
425 c |= right | left;
426 b[from_byte] = c;
427 }
428 else
429 {
430 int i;
431 c = b[from_byte];
432 c = ((unsigned char) (c << (8 - from % 8))) >> (8 - from % 8);
433 c = c | (val << (from % 8));
434 b[from_byte] = c;
435 val >>= 8 - from % 8;
436
437 for (i = from_byte+1; i < to_byte; i++)
438 {
439 c = val & 0xff;
440 val >>= 8;
441 b[i] = c;
442 }
443
444 if (to % 8 != 0)
445 {
446 unsigned char cv = (unsigned char) val;
447 c = b[to_byte];
448 c = c >> (to % 8) << (to % 8);
449 c |= ((unsigned char) (cv << (8 - to % 8))) >> (8 - to % 8);
450 b[to_byte] = c;
451 }
452 }
453 }
454
455 /* Return the contents of slot N (for N = 0, 1, or 2) in
456 and instruction bundle. */
457
458 static long long
459 slotN_contents (gdb_byte *bundle, int slotnum)
460 {
461 return extract_bit_field (bundle, 5+41*slotnum, 41);
462 }
463
464 /* Store an instruction in an instruction bundle. */
465
466 static void
467 replace_slotN_contents (gdb_byte *bundle, long long instr, int slotnum)
468 {
469 replace_bit_field (bundle, instr, 5+41*slotnum, 41);
470 }
471
472 static const enum instruction_type template_encoding_table[32][3] =
473 {
474 { M, I, I }, /* 00 */
475 { M, I, I }, /* 01 */
476 { M, I, I }, /* 02 */
477 { M, I, I }, /* 03 */
478 { M, L, X }, /* 04 */
479 { M, L, X }, /* 05 */
480 { undefined, undefined, undefined }, /* 06 */
481 { undefined, undefined, undefined }, /* 07 */
482 { M, M, I }, /* 08 */
483 { M, M, I }, /* 09 */
484 { M, M, I }, /* 0A */
485 { M, M, I }, /* 0B */
486 { M, F, I }, /* 0C */
487 { M, F, I }, /* 0D */
488 { M, M, F }, /* 0E */
489 { M, M, F }, /* 0F */
490 { M, I, B }, /* 10 */
491 { M, I, B }, /* 11 */
492 { M, B, B }, /* 12 */
493 { M, B, B }, /* 13 */
494 { undefined, undefined, undefined }, /* 14 */
495 { undefined, undefined, undefined }, /* 15 */
496 { B, B, B }, /* 16 */
497 { B, B, B }, /* 17 */
498 { M, M, B }, /* 18 */
499 { M, M, B }, /* 19 */
500 { undefined, undefined, undefined }, /* 1A */
501 { undefined, undefined, undefined }, /* 1B */
502 { M, F, B }, /* 1C */
503 { M, F, B }, /* 1D */
504 { undefined, undefined, undefined }, /* 1E */
505 { undefined, undefined, undefined }, /* 1F */
506 };
507
508 /* Fetch and (partially) decode an instruction at ADDR and return the
509 address of the next instruction to fetch. */
510
511 static CORE_ADDR
512 fetch_instruction (CORE_ADDR addr, instruction_type *it, long long *instr)
513 {
514 gdb_byte bundle[BUNDLE_LEN];
515 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER;
516 long long templ;
517 int val;
518
519 /* Warn about slot numbers greater than 2. We used to generate
520 an error here on the assumption that the user entered an invalid
521 address. But, sometimes GDB itself requests an invalid address.
522 This can (easily) happen when execution stops in a function for
523 which there are no symbols. The prologue scanner will attempt to
524 find the beginning of the function - if the nearest symbol
525 happens to not be aligned on a bundle boundary (16 bytes), the
526 resulting starting address will cause GDB to think that the slot
527 number is too large.
528
529 So we warn about it and set the slot number to zero. It is
530 not necessarily a fatal condition, particularly if debugging
531 at the assembly language level. */
532 if (slotnum > 2)
533 {
534 warning (_("Can't fetch instructions for slot numbers greater than 2.\n"
535 "Using slot 0 instead"));
536 slotnum = 0;
537 }
538
539 addr &= ~0x0f;
540
541 val = target_read_memory (addr, bundle, BUNDLE_LEN);
542
543 if (val != 0)
544 return 0;
545
546 *instr = slotN_contents (bundle, slotnum);
547 templ = extract_bit_field (bundle, 0, 5);
548 *it = template_encoding_table[(int)templ][slotnum];
549
550 if (slotnum == 2 || (slotnum == 1 && *it == L))
551 addr += 16;
552 else
553 addr += (slotnum + 1) * SLOT_MULTIPLIER;
554
555 return addr;
556 }
557
558 /* There are 5 different break instructions (break.i, break.b,
559 break.m, break.f, and break.x), but they all have the same
560 encoding. (The five bit template in the low five bits of the
561 instruction bundle distinguishes one from another.)
562
563 The runtime architecture manual specifies that break instructions
564 used for debugging purposes must have the upper two bits of the 21
565 bit immediate set to a 0 and a 1 respectively. A breakpoint
566 instruction encodes the most significant bit of its 21 bit
567 immediate at bit 36 of the 41 bit instruction. The penultimate msb
568 is at bit 25 which leads to the pattern below.
569
570 Originally, I had this set up to do, e.g, a "break.i 0x80000" But
571 it turns out that 0x80000 was used as the syscall break in the early
572 simulators. So I changed the pattern slightly to do "break.i 0x080001"
573 instead. But that didn't work either (I later found out that this
574 pattern was used by the simulator that I was using.) So I ended up
575 using the pattern seen below.
576
577 SHADOW_CONTENTS has byte-based addressing (PLACED_ADDRESS and SHADOW_LEN)
578 while we need bit-based addressing as the instructions length is 41 bits and
579 we must not modify/corrupt the adjacent slots in the same bundle.
580 Fortunately we may store larger memory incl. the adjacent bits with the
581 original memory content (not the possibly already stored breakpoints there).
582 We need to be careful in ia64_memory_remove_breakpoint to always restore
583 only the specific bits of this instruction ignoring any adjacent stored
584 bits.
585
586 We use the original addressing with the low nibble in the range <0..2> which
587 gets incorrectly interpreted by generic non-ia64 breakpoint_restore_shadows
588 as the direct byte offset of SHADOW_CONTENTS. We store whole BUNDLE_LEN
589 bytes just without these two possibly skipped bytes to not to exceed to the
590 next bundle.
591
592 If we would like to store the whole bundle to SHADOW_CONTENTS we would have
593 to store already the base address (`address & ~0x0f') into PLACED_ADDRESS.
594 In such case there is no other place where to store
595 SLOTNUM (`adress & 0x0f', value in the range <0..2>). We need to know
596 SLOTNUM in ia64_memory_remove_breakpoint.
597
598 There is one special case where we need to be extra careful:
599 L-X instructions, which are instructions that occupy 2 slots
600 (The L part is always in slot 1, and the X part is always in
601 slot 2). We must refuse to insert breakpoints for an address
602 that points at slot 2 of a bundle where an L-X instruction is
603 present, since there is logically no instruction at that address.
604 However, to make things more interesting, the opcode of L-X
605 instructions is located in slot 2. This means that, to insert
606 a breakpoint at an address that points to slot 1, we actually
607 need to write the breakpoint in slot 2! Slot 1 is actually
608 the extended operand, so writing the breakpoint there would not
609 have the desired effect. Another side-effect of this issue
610 is that we need to make sure that the shadow contents buffer
611 does save byte 15 of our instruction bundle (this is the tail
612 end of slot 2, which wouldn't be saved if we were to insert
613 the breakpoint in slot 1).
614
615 ia64 16-byte bundle layout:
616 | 5 bits | slot 0 with 41 bits | slot 1 with 41 bits | slot 2 with 41 bits |
617
618 The current addressing used by the code below:
619 original PC placed_address placed_size required covered
620 == bp_tgt->shadow_len reqd \subset covered
621 0xABCDE0 0xABCDE0 0x10 <0x0...0x5> <0x0..0xF>
622 0xABCDE1 0xABCDE1 0xF <0x5...0xA> <0x1..0xF>
623 0xABCDE2 0xABCDE2 0xE <0xA...0xF> <0x2..0xF>
624
625 L-X instructions are treated a little specially, as explained above:
626 0xABCDE1 0xABCDE1 0xF <0xA...0xF> <0x1..0xF>
627
628 `objdump -d' and some other tools show a bit unjustified offsets:
629 original PC byte where starts the instruction objdump offset
630 0xABCDE0 0xABCDE0 0xABCDE0
631 0xABCDE1 0xABCDE5 0xABCDE6
632 0xABCDE2 0xABCDEA 0xABCDEC
633 */
634
635 #define IA64_BREAKPOINT 0x00003333300LL
636
637 static int
638 ia64_memory_insert_breakpoint (struct gdbarch *gdbarch,
639 struct bp_target_info *bp_tgt)
640 {
641 CORE_ADDR addr = bp_tgt->placed_address = bp_tgt->reqstd_address;
642 gdb_byte bundle[BUNDLE_LEN];
643 int slotnum = (int) (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
644 long long instr_breakpoint;
645 int val;
646 int templ;
647
648 if (slotnum > 2)
649 error (_("Can't insert breakpoint for slot numbers greater than 2."));
650
651 addr &= ~0x0f;
652
653 /* Enable the automatic memory restoration from breakpoints while
654 we read our instruction bundle for the purpose of SHADOW_CONTENTS.
655 Otherwise, we could possibly store into the shadow parts of the adjacent
656 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
657 breakpoint instruction bits region. */
658 scoped_restore restore_memory_0
659 = make_scoped_restore_show_memory_breakpoints (0);
660 val = target_read_memory (addr, bundle, BUNDLE_LEN);
661 if (val != 0)
662 return val;
663
664 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
665 for addressing the SHADOW_CONTENTS placement. */
666 shadow_slotnum = slotnum;
667
668 /* Always cover the last byte of the bundle in case we are inserting
669 a breakpoint on an L-X instruction. */
670 bp_tgt->shadow_len = BUNDLE_LEN - shadow_slotnum;
671
672 templ = extract_bit_field (bundle, 0, 5);
673 if (template_encoding_table[templ][slotnum] == X)
674 {
675 /* X unit types can only be used in slot 2, and are actually
676 part of a 2-slot L-X instruction. We cannot break at this
677 address, as this is the second half of an instruction that
678 lives in slot 1 of that bundle. */
679 gdb_assert (slotnum == 2);
680 error (_("Can't insert breakpoint for non-existing slot X"));
681 }
682 if (template_encoding_table[templ][slotnum] == L)
683 {
684 /* L unit types can only be used in slot 1. But the associated
685 opcode for that instruction is in slot 2, so bump the slot number
686 accordingly. */
687 gdb_assert (slotnum == 1);
688 slotnum = 2;
689 }
690
691 /* Store the whole bundle, except for the initial skipped bytes by the slot
692 number interpreted as bytes offset in PLACED_ADDRESS. */
693 memcpy (bp_tgt->shadow_contents, bundle + shadow_slotnum,
694 bp_tgt->shadow_len);
695
696 /* Re-read the same bundle as above except that, this time, read it in order
697 to compute the new bundle inside which we will be inserting the
698 breakpoint. Therefore, disable the automatic memory restoration from
699 breakpoints while we read our instruction bundle. Otherwise, the general
700 restoration mechanism kicks in and we would possibly remove parts of the
701 adjacent placed breakpoints. It is due to our SHADOW_CONTENTS overlapping
702 the real breakpoint instruction bits region. */
703 scoped_restore restore_memory_1
704 = make_scoped_restore_show_memory_breakpoints (1);
705 val = target_read_memory (addr, bundle, BUNDLE_LEN);
706 if (val != 0)
707 return val;
708
709 /* Breakpoints already present in the code will get deteacted and not get
710 reinserted by bp_loc_is_permanent. Multiple breakpoints at the same
711 location cannot induce the internal error as they are optimized into
712 a single instance by update_global_location_list. */
713 instr_breakpoint = slotN_contents (bundle, slotnum);
714 if (instr_breakpoint == IA64_BREAKPOINT)
715 internal_error (__FILE__, __LINE__,
716 _("Address %s already contains a breakpoint."),
717 paddress (gdbarch, bp_tgt->placed_address));
718 replace_slotN_contents (bundle, IA64_BREAKPOINT, slotnum);
719
720 val = target_write_memory (addr + shadow_slotnum, bundle + shadow_slotnum,
721 bp_tgt->shadow_len);
722
723 return val;
724 }
725
726 static int
727 ia64_memory_remove_breakpoint (struct gdbarch *gdbarch,
728 struct bp_target_info *bp_tgt)
729 {
730 CORE_ADDR addr = bp_tgt->placed_address;
731 gdb_byte bundle_mem[BUNDLE_LEN], bundle_saved[BUNDLE_LEN];
732 int slotnum = (addr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
733 long long instr_breakpoint, instr_saved;
734 int val;
735 int templ;
736
737 addr &= ~0x0f;
738
739 /* Disable the automatic memory restoration from breakpoints while
740 we read our instruction bundle. Otherwise, the general restoration
741 mechanism kicks in and we would possibly remove parts of the adjacent
742 placed breakpoints. It is due to our SHADOW_CONTENTS overlapping the real
743 breakpoint instruction bits region. */
744 scoped_restore restore_memory_1
745 = make_scoped_restore_show_memory_breakpoints (1);
746 val = target_read_memory (addr, bundle_mem, BUNDLE_LEN);
747 if (val != 0)
748 return val;
749
750 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
751 for addressing the SHADOW_CONTENTS placement. */
752 shadow_slotnum = slotnum;
753
754 templ = extract_bit_field (bundle_mem, 0, 5);
755 if (template_encoding_table[templ][slotnum] == X)
756 {
757 /* X unit types can only be used in slot 2, and are actually
758 part of a 2-slot L-X instruction. We refuse to insert
759 breakpoints at this address, so there should be no reason
760 for us attempting to remove one there, except if the program's
761 code somehow got modified in memory. */
762 gdb_assert (slotnum == 2);
763 warning (_("Cannot remove breakpoint at address %s from non-existing "
764 "X-type slot, memory has changed underneath"),
765 paddress (gdbarch, bp_tgt->placed_address));
766 return -1;
767 }
768 if (template_encoding_table[templ][slotnum] == L)
769 {
770 /* L unit types can only be used in slot 1. But the breakpoint
771 was actually saved using slot 2, so update the slot number
772 accordingly. */
773 gdb_assert (slotnum == 1);
774 slotnum = 2;
775 }
776
777 gdb_assert (bp_tgt->shadow_len == BUNDLE_LEN - shadow_slotnum);
778
779 instr_breakpoint = slotN_contents (bundle_mem, slotnum);
780 if (instr_breakpoint != IA64_BREAKPOINT)
781 {
782 warning (_("Cannot remove breakpoint at address %s, "
783 "no break instruction at such address."),
784 paddress (gdbarch, bp_tgt->placed_address));
785 return -1;
786 }
787
788 /* Extract the original saved instruction from SLOTNUM normalizing its
789 bit-shift for INSTR_SAVED. */
790 memcpy (bundle_saved, bundle_mem, BUNDLE_LEN);
791 memcpy (bundle_saved + shadow_slotnum, bp_tgt->shadow_contents,
792 bp_tgt->shadow_len);
793 instr_saved = slotN_contents (bundle_saved, slotnum);
794
795 /* In BUNDLE_MEM, be careful to modify only the bits belonging to SLOTNUM
796 and not any of the other ones that are stored in SHADOW_CONTENTS. */
797 replace_slotN_contents (bundle_mem, instr_saved, slotnum);
798 val = target_write_raw_memory (addr, bundle_mem, BUNDLE_LEN);
799
800 return val;
801 }
802
803 /* Implement the breakpoint_kind_from_pc gdbarch method. */
804
805 static int
806 ia64_breakpoint_kind_from_pc (struct gdbarch *gdbarch, CORE_ADDR *pcptr)
807 {
808 /* A place holder of gdbarch method breakpoint_kind_from_pc. */
809 return 0;
810 }
811
812 /* As gdbarch_breakpoint_from_pc ranges have byte granularity and ia64
813 instruction slots ranges are bit-granular (41 bits) we have to provide an
814 extended range as described for ia64_memory_insert_breakpoint. We also take
815 care of preserving the `break' instruction 21-bit (or 62-bit) parameter to
816 make a match for permanent breakpoints. */
817
818 static const gdb_byte *
819 ia64_breakpoint_from_pc (struct gdbarch *gdbarch,
820 CORE_ADDR *pcptr, int *lenptr)
821 {
822 CORE_ADDR addr = *pcptr;
823 static gdb_byte bundle[BUNDLE_LEN];
824 int slotnum = (int) (*pcptr & 0x0f) / SLOT_MULTIPLIER, shadow_slotnum;
825 long long instr_fetched;
826 int val;
827 int templ;
828
829 if (slotnum > 2)
830 error (_("Can't insert breakpoint for slot numbers greater than 2."));
831
832 addr &= ~0x0f;
833
834 /* Enable the automatic memory restoration from breakpoints while
835 we read our instruction bundle to match bp_loc_is_permanent. */
836 {
837 scoped_restore restore_memory_0
838 = make_scoped_restore_show_memory_breakpoints (0);
839 val = target_read_memory (addr, bundle, BUNDLE_LEN);
840 }
841
842 /* The memory might be unreachable. This can happen, for instance,
843 when the user inserts a breakpoint at an invalid address. */
844 if (val != 0)
845 return NULL;
846
847 /* SHADOW_SLOTNUM saves the original slot number as expected by the caller
848 for addressing the SHADOW_CONTENTS placement. */
849 shadow_slotnum = slotnum;
850
851 /* Cover always the last byte of the bundle for the L-X slot case. */
852 *lenptr = BUNDLE_LEN - shadow_slotnum;
853
854 /* Check for L type instruction in slot 1, if present then bump up the slot
855 number to the slot 2. */
856 templ = extract_bit_field (bundle, 0, 5);
857 if (template_encoding_table[templ][slotnum] == X)
858 {
859 gdb_assert (slotnum == 2);
860 error (_("Can't insert breakpoint for non-existing slot X"));
861 }
862 if (template_encoding_table[templ][slotnum] == L)
863 {
864 gdb_assert (slotnum == 1);
865 slotnum = 2;
866 }
867
868 /* A break instruction has its all its opcode bits cleared except for
869 the parameter value. For L+X slot pair we are at the X slot (slot 2) so
870 we should not touch the L slot - the upper 41 bits of the parameter. */
871 instr_fetched = slotN_contents (bundle, slotnum);
872 instr_fetched &= 0x1003ffffc0LL;
873 replace_slotN_contents (bundle, instr_fetched, slotnum);
874
875 return bundle + shadow_slotnum;
876 }
877
878 static CORE_ADDR
879 ia64_read_pc (struct regcache *regcache)
880 {
881 ULONGEST psr_value, pc_value;
882 int slot_num;
883
884 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
885 regcache_cooked_read_unsigned (regcache, IA64_IP_REGNUM, &pc_value);
886 slot_num = (psr_value >> 41) & 3;
887
888 return pc_value | (slot_num * SLOT_MULTIPLIER);
889 }
890
891 void
892 ia64_write_pc (struct regcache *regcache, CORE_ADDR new_pc)
893 {
894 int slot_num = (int) (new_pc & 0xf) / SLOT_MULTIPLIER;
895 ULONGEST psr_value;
896
897 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr_value);
898 psr_value &= ~(3LL << 41);
899 psr_value |= (ULONGEST)(slot_num & 0x3) << 41;
900
901 new_pc &= ~0xfLL;
902
903 regcache_cooked_write_unsigned (regcache, IA64_PSR_REGNUM, psr_value);
904 regcache_cooked_write_unsigned (regcache, IA64_IP_REGNUM, new_pc);
905 }
906
907 #define IS_NaT_COLLECTION_ADDR(addr) ((((addr) >> 3) & 0x3f) == 0x3f)
908
909 /* Returns the address of the slot that's NSLOTS slots away from
910 the address ADDR. NSLOTS may be positive or negative. */
911 static CORE_ADDR
912 rse_address_add(CORE_ADDR addr, int nslots)
913 {
914 CORE_ADDR new_addr;
915 int mandatory_nat_slots = nslots / 63;
916 int direction = nslots < 0 ? -1 : 1;
917
918 new_addr = addr + 8 * (nslots + mandatory_nat_slots);
919
920 if ((new_addr >> 9) != ((addr + 8 * 64 * mandatory_nat_slots) >> 9))
921 new_addr += 8 * direction;
922
923 if (IS_NaT_COLLECTION_ADDR(new_addr))
924 new_addr += 8 * direction;
925
926 return new_addr;
927 }
928
929 static enum register_status
930 ia64_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
931 int regnum, gdb_byte *buf)
932 {
933 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
934 enum register_status status;
935
936 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
937 {
938 #ifdef HAVE_LIBUNWIND_IA64_H
939 /* First try and use the libunwind special reg accessor,
940 otherwise fallback to standard logic. */
941 if (!libunwind_is_initialized ()
942 || libunwind_get_reg_special (gdbarch, regcache, regnum, buf) != 0)
943 #endif
944 {
945 /* The fallback position is to assume that r32-r127 are
946 found sequentially in memory starting at $bof. This
947 isn't always true, but without libunwind, this is the
948 best we can do. */
949 enum register_status status;
950 ULONGEST cfm;
951 ULONGEST bsp;
952 CORE_ADDR reg;
953
954 status = regcache_cooked_read_unsigned (regcache,
955 IA64_BSP_REGNUM, &bsp);
956 if (status != REG_VALID)
957 return status;
958
959 status = regcache_cooked_read_unsigned (regcache,
960 IA64_CFM_REGNUM, &cfm);
961 if (status != REG_VALID)
962 return status;
963
964 /* The bsp points at the end of the register frame so we
965 subtract the size of frame from it to get start of
966 register frame. */
967 bsp = rse_address_add (bsp, -(cfm & 0x7f));
968
969 if ((cfm & 0x7f) > regnum - V32_REGNUM)
970 {
971 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
972 reg = read_memory_integer ((CORE_ADDR)reg_addr, 8, byte_order);
973 store_unsigned_integer (buf, register_size (gdbarch, regnum),
974 byte_order, reg);
975 }
976 else
977 store_unsigned_integer (buf, register_size (gdbarch, regnum),
978 byte_order, 0);
979 }
980 }
981 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
982 {
983 ULONGEST unatN_val;
984 ULONGEST unat;
985 status = regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
986 if (status != REG_VALID)
987 return status;
988 unatN_val = (unat & (1LL << (regnum - IA64_NAT0_REGNUM))) != 0;
989 store_unsigned_integer (buf, register_size (gdbarch, regnum),
990 byte_order, unatN_val);
991 }
992 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
993 {
994 ULONGEST natN_val = 0;
995 ULONGEST bsp;
996 ULONGEST cfm;
997 CORE_ADDR gr_addr = 0;
998 status = regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
999 if (status != REG_VALID)
1000 return status;
1001 status = regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1002 if (status != REG_VALID)
1003 return status;
1004
1005 /* The bsp points at the end of the register frame so we
1006 subtract the size of frame from it to get start of register frame. */
1007 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1008
1009 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1010 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1011
1012 if (gr_addr != 0)
1013 {
1014 /* Compute address of nat collection bits. */
1015 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1016 CORE_ADDR nat_collection;
1017 int nat_bit;
1018 /* If our nat collection address is bigger than bsp, we have to get
1019 the nat collection from rnat. Otherwise, we fetch the nat
1020 collection from the computed address. */
1021 if (nat_addr >= bsp)
1022 regcache_cooked_read_unsigned (regcache, IA64_RNAT_REGNUM,
1023 &nat_collection);
1024 else
1025 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1026 nat_bit = (gr_addr >> 3) & 0x3f;
1027 natN_val = (nat_collection >> nat_bit) & 1;
1028 }
1029
1030 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1031 byte_order, natN_val);
1032 }
1033 else if (regnum == VBOF_REGNUM)
1034 {
1035 /* A virtual register frame start is provided for user convenience.
1036 It can be calculated as the bsp - sof (sizeof frame). */
1037 ULONGEST bsp, vbsp;
1038 ULONGEST cfm;
1039 status = regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1040 if (status != REG_VALID)
1041 return status;
1042 status = regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1043 if (status != REG_VALID)
1044 return status;
1045
1046 /* The bsp points at the end of the register frame so we
1047 subtract the size of frame from it to get beginning of frame. */
1048 vbsp = rse_address_add (bsp, -(cfm & 0x7f));
1049 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1050 byte_order, vbsp);
1051 }
1052 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1053 {
1054 ULONGEST pr;
1055 ULONGEST cfm;
1056 ULONGEST prN_val;
1057 status = regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1058 if (status != REG_VALID)
1059 return status;
1060 status = regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1061 if (status != REG_VALID)
1062 return status;
1063
1064 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1065 {
1066 /* Fetch predicate register rename base from current frame
1067 marker for this frame. */
1068 int rrb_pr = (cfm >> 32) & 0x3f;
1069
1070 /* Adjust the register number to account for register rotation. */
1071 regnum = VP16_REGNUM
1072 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1073 }
1074 prN_val = (pr & (1LL << (regnum - VP0_REGNUM))) != 0;
1075 store_unsigned_integer (buf, register_size (gdbarch, regnum),
1076 byte_order, prN_val);
1077 }
1078 else
1079 memset (buf, 0, register_size (gdbarch, regnum));
1080
1081 return REG_VALID;
1082 }
1083
1084 static void
1085 ia64_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
1086 int regnum, const gdb_byte *buf)
1087 {
1088 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1089
1090 if (regnum >= V32_REGNUM && regnum <= V127_REGNUM)
1091 {
1092 ULONGEST bsp;
1093 ULONGEST cfm;
1094 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1095 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1096
1097 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1098
1099 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1100 {
1101 ULONGEST reg_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1102 write_memory (reg_addr, buf, 8);
1103 }
1104 }
1105 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1106 {
1107 ULONGEST unatN_val, unat, unatN_mask;
1108 regcache_cooked_read_unsigned (regcache, IA64_UNAT_REGNUM, &unat);
1109 unatN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1110 regnum),
1111 byte_order);
1112 unatN_mask = (1LL << (regnum - IA64_NAT0_REGNUM));
1113 if (unatN_val == 0)
1114 unat &= ~unatN_mask;
1115 else if (unatN_val == 1)
1116 unat |= unatN_mask;
1117 regcache_cooked_write_unsigned (regcache, IA64_UNAT_REGNUM, unat);
1118 }
1119 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
1120 {
1121 ULONGEST natN_val;
1122 ULONGEST bsp;
1123 ULONGEST cfm;
1124 CORE_ADDR gr_addr = 0;
1125 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
1126 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1127
1128 /* The bsp points at the end of the register frame so we
1129 subtract the size of frame from it to get start of register frame. */
1130 bsp = rse_address_add (bsp, -(cfm & 0x7f));
1131
1132 if ((cfm & 0x7f) > regnum - V32_REGNUM)
1133 gr_addr = rse_address_add (bsp, (regnum - V32_REGNUM));
1134
1135 natN_val = extract_unsigned_integer (buf, register_size (gdbarch,
1136 regnum),
1137 byte_order);
1138
1139 if (gr_addr != 0 && (natN_val == 0 || natN_val == 1))
1140 {
1141 /* Compute address of nat collection bits. */
1142 CORE_ADDR nat_addr = gr_addr | 0x1f8;
1143 CORE_ADDR nat_collection;
1144 int natN_bit = (gr_addr >> 3) & 0x3f;
1145 ULONGEST natN_mask = (1LL << natN_bit);
1146 /* If our nat collection address is bigger than bsp, we have to get
1147 the nat collection from rnat. Otherwise, we fetch the nat
1148 collection from the computed address. */
1149 if (nat_addr >= bsp)
1150 {
1151 regcache_cooked_read_unsigned (regcache,
1152 IA64_RNAT_REGNUM,
1153 &nat_collection);
1154 if (natN_val)
1155 nat_collection |= natN_mask;
1156 else
1157 nat_collection &= ~natN_mask;
1158 regcache_cooked_write_unsigned (regcache, IA64_RNAT_REGNUM,
1159 nat_collection);
1160 }
1161 else
1162 {
1163 gdb_byte nat_buf[8];
1164 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
1165 if (natN_val)
1166 nat_collection |= natN_mask;
1167 else
1168 nat_collection &= ~natN_mask;
1169 store_unsigned_integer (nat_buf, register_size (gdbarch, regnum),
1170 byte_order, nat_collection);
1171 write_memory (nat_addr, nat_buf, 8);
1172 }
1173 }
1174 }
1175 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1176 {
1177 ULONGEST pr;
1178 ULONGEST cfm;
1179 ULONGEST prN_val;
1180 ULONGEST prN_mask;
1181
1182 regcache_cooked_read_unsigned (regcache, IA64_PR_REGNUM, &pr);
1183 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
1184
1185 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1186 {
1187 /* Fetch predicate register rename base from current frame
1188 marker for this frame. */
1189 int rrb_pr = (cfm >> 32) & 0x3f;
1190
1191 /* Adjust the register number to account for register rotation. */
1192 regnum = VP16_REGNUM
1193 + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1194 }
1195 prN_val = extract_unsigned_integer (buf, register_size (gdbarch, regnum),
1196 byte_order);
1197 prN_mask = (1LL << (regnum - VP0_REGNUM));
1198 if (prN_val == 0)
1199 pr &= ~prN_mask;
1200 else if (prN_val == 1)
1201 pr |= prN_mask;
1202 regcache_cooked_write_unsigned (regcache, IA64_PR_REGNUM, pr);
1203 }
1204 }
1205
1206 /* The ia64 needs to convert between various ieee floating-point formats
1207 and the special ia64 floating point register format. */
1208
1209 static int
1210 ia64_convert_register_p (struct gdbarch *gdbarch, int regno, struct type *type)
1211 {
1212 return (regno >= IA64_FR0_REGNUM && regno <= IA64_FR127_REGNUM
1213 && TYPE_CODE (type) == TYPE_CODE_FLT
1214 && type != ia64_ext_type (gdbarch));
1215 }
1216
1217 static int
1218 ia64_register_to_value (struct frame_info *frame, int regnum,
1219 struct type *valtype, gdb_byte *out,
1220 int *optimizedp, int *unavailablep)
1221 {
1222 struct gdbarch *gdbarch = get_frame_arch (frame);
1223 gdb_byte in[IA64_FP_REGISTER_SIZE];
1224
1225 /* Convert to TYPE. */
1226 if (!get_frame_register_bytes (frame, regnum, 0,
1227 register_size (gdbarch, regnum),
1228 in, optimizedp, unavailablep))
1229 return 0;
1230
1231 target_float_convert (in, ia64_ext_type (gdbarch), out, valtype);
1232 *optimizedp = *unavailablep = 0;
1233 return 1;
1234 }
1235
1236 static void
1237 ia64_value_to_register (struct frame_info *frame, int regnum,
1238 struct type *valtype, const gdb_byte *in)
1239 {
1240 struct gdbarch *gdbarch = get_frame_arch (frame);
1241 gdb_byte out[IA64_FP_REGISTER_SIZE];
1242 target_float_convert (in, valtype, out, ia64_ext_type (gdbarch));
1243 put_frame_register (frame, regnum, out);
1244 }
1245
1246
1247 /* Limit the number of skipped non-prologue instructions since examining
1248 of the prologue is expensive. */
1249 static int max_skip_non_prologue_insns = 40;
1250
1251 /* Given PC representing the starting address of a function, and
1252 LIM_PC which is the (sloppy) limit to which to scan when looking
1253 for a prologue, attempt to further refine this limit by using
1254 the line data in the symbol table. If successful, a better guess
1255 on where the prologue ends is returned, otherwise the previous
1256 value of lim_pc is returned. TRUST_LIMIT is a pointer to a flag
1257 which will be set to indicate whether the returned limit may be
1258 used with no further scanning in the event that the function is
1259 frameless. */
1260
1261 /* FIXME: cagney/2004-02-14: This function and logic have largely been
1262 superseded by skip_prologue_using_sal. */
1263
1264 static CORE_ADDR
1265 refine_prologue_limit (CORE_ADDR pc, CORE_ADDR lim_pc, int *trust_limit)
1266 {
1267 struct symtab_and_line prologue_sal;
1268 CORE_ADDR start_pc = pc;
1269 CORE_ADDR end_pc;
1270
1271 /* The prologue can not possibly go past the function end itself,
1272 so we can already adjust LIM_PC accordingly. */
1273 if (find_pc_partial_function (pc, NULL, NULL, &end_pc) && end_pc < lim_pc)
1274 lim_pc = end_pc;
1275
1276 /* Start off not trusting the limit. */
1277 *trust_limit = 0;
1278
1279 prologue_sal = find_pc_line (pc, 0);
1280 if (prologue_sal.line != 0)
1281 {
1282 int i;
1283 CORE_ADDR addr = prologue_sal.end;
1284
1285 /* Handle the case in which compiler's optimizer/scheduler
1286 has moved instructions into the prologue. We scan ahead
1287 in the function looking for address ranges whose corresponding
1288 line number is less than or equal to the first one that we
1289 found for the function. (It can be less than when the
1290 scheduler puts a body instruction before the first prologue
1291 instruction.) */
1292 for (i = 2 * max_skip_non_prologue_insns;
1293 i > 0 && (lim_pc == 0 || addr < lim_pc);
1294 i--)
1295 {
1296 struct symtab_and_line sal;
1297
1298 sal = find_pc_line (addr, 0);
1299 if (sal.line == 0)
1300 break;
1301 if (sal.line <= prologue_sal.line
1302 && sal.symtab == prologue_sal.symtab)
1303 {
1304 prologue_sal = sal;
1305 }
1306 addr = sal.end;
1307 }
1308
1309 if (lim_pc == 0 || prologue_sal.end < lim_pc)
1310 {
1311 lim_pc = prologue_sal.end;
1312 if (start_pc == get_pc_function_start (lim_pc))
1313 *trust_limit = 1;
1314 }
1315 }
1316 return lim_pc;
1317 }
1318
1319 #define isScratch(_regnum_) ((_regnum_) == 2 || (_regnum_) == 3 \
1320 || (8 <= (_regnum_) && (_regnum_) <= 11) \
1321 || (14 <= (_regnum_) && (_regnum_) <= 31))
1322 #define imm9(_instr_) \
1323 ( ((((_instr_) & 0x01000000000LL) ? -1 : 0) << 8) \
1324 | (((_instr_) & 0x00008000000LL) >> 20) \
1325 | (((_instr_) & 0x00000001fc0LL) >> 6))
1326
1327 /* Allocate and initialize a frame cache. */
1328
1329 static struct ia64_frame_cache *
1330 ia64_alloc_frame_cache (void)
1331 {
1332 struct ia64_frame_cache *cache;
1333 int i;
1334
1335 cache = FRAME_OBSTACK_ZALLOC (struct ia64_frame_cache);
1336
1337 /* Base address. */
1338 cache->base = 0;
1339 cache->pc = 0;
1340 cache->cfm = 0;
1341 cache->prev_cfm = 0;
1342 cache->sof = 0;
1343 cache->sol = 0;
1344 cache->sor = 0;
1345 cache->bsp = 0;
1346 cache->fp_reg = 0;
1347 cache->frameless = 1;
1348
1349 for (i = 0; i < NUM_IA64_RAW_REGS; i++)
1350 cache->saved_regs[i] = 0;
1351
1352 return cache;
1353 }
1354
1355 static CORE_ADDR
1356 examine_prologue (CORE_ADDR pc, CORE_ADDR lim_pc,
1357 struct frame_info *this_frame,
1358 struct ia64_frame_cache *cache)
1359 {
1360 CORE_ADDR next_pc;
1361 CORE_ADDR last_prologue_pc = pc;
1362 instruction_type it;
1363 long long instr;
1364 int cfm_reg = 0;
1365 int ret_reg = 0;
1366 int fp_reg = 0;
1367 int unat_save_reg = 0;
1368 int pr_save_reg = 0;
1369 int mem_stack_frame_size = 0;
1370 int spill_reg = 0;
1371 CORE_ADDR spill_addr = 0;
1372 char instores[8];
1373 char infpstores[8];
1374 char reg_contents[256];
1375 int trust_limit;
1376 int frameless = 1;
1377 int i;
1378 CORE_ADDR addr;
1379 gdb_byte buf[8];
1380 CORE_ADDR bof, sor, sol, sof, cfm, rrb_gr;
1381
1382 memset (instores, 0, sizeof instores);
1383 memset (infpstores, 0, sizeof infpstores);
1384 memset (reg_contents, 0, sizeof reg_contents);
1385
1386 if (cache->after_prologue != 0
1387 && cache->after_prologue <= lim_pc)
1388 return cache->after_prologue;
1389
1390 lim_pc = refine_prologue_limit (pc, lim_pc, &trust_limit);
1391 next_pc = fetch_instruction (pc, &it, &instr);
1392
1393 /* We want to check if we have a recognizable function start before we
1394 look ahead for a prologue. */
1395 if (pc < lim_pc && next_pc
1396 && it == M && ((instr & 0x1ee0000003fLL) == 0x02c00000000LL))
1397 {
1398 /* alloc - start of a regular function. */
1399 int sol = (int) ((instr & 0x00007f00000LL) >> 20);
1400 int sof = (int) ((instr & 0x000000fe000LL) >> 13);
1401 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1402
1403 /* Verify that the current cfm matches what we think is the
1404 function start. If we have somehow jumped within a function,
1405 we do not want to interpret the prologue and calculate the
1406 addresses of various registers such as the return address.
1407 We will instead treat the frame as frameless. */
1408 if (!this_frame ||
1409 (sof == (cache->cfm & 0x7f) &&
1410 sol == ((cache->cfm >> 7) & 0x7f)))
1411 frameless = 0;
1412
1413 cfm_reg = rN;
1414 last_prologue_pc = next_pc;
1415 pc = next_pc;
1416 }
1417 else
1418 {
1419 /* Look for a leaf routine. */
1420 if (pc < lim_pc && next_pc
1421 && (it == I || it == M)
1422 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1423 {
1424 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1425 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1426 | ((instr & 0x001f8000000LL) >> 20)
1427 | ((instr & 0x000000fe000LL) >> 13));
1428 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1429 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1430 int qp = (int) (instr & 0x0000000003fLL);
1431 if (qp == 0 && rN == 2 && imm == 0 && rM == 12 && fp_reg == 0)
1432 {
1433 /* mov r2, r12 - beginning of leaf routine. */
1434 fp_reg = rN;
1435 last_prologue_pc = next_pc;
1436 }
1437 }
1438
1439 /* If we don't recognize a regular function or leaf routine, we are
1440 done. */
1441 if (!fp_reg)
1442 {
1443 pc = lim_pc;
1444 if (trust_limit)
1445 last_prologue_pc = lim_pc;
1446 }
1447 }
1448
1449 /* Loop, looking for prologue instructions, keeping track of
1450 where preserved registers were spilled. */
1451 while (pc < lim_pc)
1452 {
1453 next_pc = fetch_instruction (pc, &it, &instr);
1454 if (next_pc == 0)
1455 break;
1456
1457 if (it == B && ((instr & 0x1e1f800003fLL) != 0x04000000000LL))
1458 {
1459 /* Exit loop upon hitting a non-nop branch instruction. */
1460 if (trust_limit)
1461 lim_pc = pc;
1462 break;
1463 }
1464 else if (((instr & 0x3fLL) != 0LL) &&
1465 (frameless || ret_reg != 0))
1466 {
1467 /* Exit loop upon hitting a predicated instruction if
1468 we already have the return register or if we are frameless. */
1469 if (trust_limit)
1470 lim_pc = pc;
1471 break;
1472 }
1473 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00188000000LL))
1474 {
1475 /* Move from BR */
1476 int b2 = (int) ((instr & 0x0000000e000LL) >> 13);
1477 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1478 int qp = (int) (instr & 0x0000000003f);
1479
1480 if (qp == 0 && b2 == 0 && rN >= 32 && ret_reg == 0)
1481 {
1482 ret_reg = rN;
1483 last_prologue_pc = next_pc;
1484 }
1485 }
1486 else if ((it == I || it == M)
1487 && ((instr & 0x1ee00000000LL) == 0x10800000000LL))
1488 {
1489 /* adds rN = imm14, rM (or mov rN, rM when imm14 is 0) */
1490 int imm = (int) ((((instr & 0x01000000000LL) ? -1 : 0) << 13)
1491 | ((instr & 0x001f8000000LL) >> 20)
1492 | ((instr & 0x000000fe000LL) >> 13));
1493 int rM = (int) ((instr & 0x00007f00000LL) >> 20);
1494 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1495 int qp = (int) (instr & 0x0000000003fLL);
1496
1497 if (qp == 0 && rN >= 32 && imm == 0 && rM == 12 && fp_reg == 0)
1498 {
1499 /* mov rN, r12 */
1500 fp_reg = rN;
1501 last_prologue_pc = next_pc;
1502 }
1503 else if (qp == 0 && rN == 12 && rM == 12)
1504 {
1505 /* adds r12, -mem_stack_frame_size, r12 */
1506 mem_stack_frame_size -= imm;
1507 last_prologue_pc = next_pc;
1508 }
1509 else if (qp == 0 && rN == 2
1510 && ((rM == fp_reg && fp_reg != 0) || rM == 12))
1511 {
1512 CORE_ADDR saved_sp = 0;
1513 /* adds r2, spilloffset, rFramePointer
1514 or
1515 adds r2, spilloffset, r12
1516
1517 Get ready for stf.spill or st8.spill instructions.
1518 The address to start spilling at is loaded into r2.
1519 FIXME: Why r2? That's what gcc currently uses; it
1520 could well be different for other compilers. */
1521
1522 /* Hmm... whether or not this will work will depend on
1523 where the pc is. If it's still early in the prologue
1524 this'll be wrong. FIXME */
1525 if (this_frame)
1526 {
1527 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1528 saved_sp = get_frame_register_unsigned (this_frame,
1529 sp_regnum);
1530 }
1531 spill_addr = saved_sp
1532 + (rM == 12 ? 0 : mem_stack_frame_size)
1533 + imm;
1534 spill_reg = rN;
1535 last_prologue_pc = next_pc;
1536 }
1537 else if (qp == 0 && rM >= 32 && rM < 40 && !instores[rM-32] &&
1538 rN < 256 && imm == 0)
1539 {
1540 /* mov rN, rM where rM is an input register. */
1541 reg_contents[rN] = rM;
1542 last_prologue_pc = next_pc;
1543 }
1544 else if (frameless && qp == 0 && rN == fp_reg && imm == 0 &&
1545 rM == 2)
1546 {
1547 /* mov r12, r2 */
1548 last_prologue_pc = next_pc;
1549 break;
1550 }
1551 }
1552 else if (it == M
1553 && ( ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1554 || ((instr & 0x1ffc8000000LL) == 0x0cec0000000LL) ))
1555 {
1556 /* stf.spill [rN] = fM, imm9
1557 or
1558 stf.spill [rN] = fM */
1559
1560 int imm = imm9(instr);
1561 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1562 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1563 int qp = (int) (instr & 0x0000000003fLL);
1564 if (qp == 0 && rN == spill_reg && spill_addr != 0
1565 && ((2 <= fM && fM <= 5) || (16 <= fM && fM <= 31)))
1566 {
1567 cache->saved_regs[IA64_FR0_REGNUM + fM] = spill_addr;
1568
1569 if ((instr & 0x1efc0000000LL) == 0x0eec0000000LL)
1570 spill_addr += imm;
1571 else
1572 spill_addr = 0; /* last one; must be done. */
1573 last_prologue_pc = next_pc;
1574 }
1575 }
1576 else if ((it == M && ((instr & 0x1eff8000000LL) == 0x02110000000LL))
1577 || (it == I && ((instr & 0x1eff8000000LL) == 0x00050000000LL)) )
1578 {
1579 /* mov.m rN = arM
1580 or
1581 mov.i rN = arM */
1582
1583 int arM = (int) ((instr & 0x00007f00000LL) >> 20);
1584 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1585 int qp = (int) (instr & 0x0000000003fLL);
1586 if (qp == 0 && isScratch (rN) && arM == 36 /* ar.unat */)
1587 {
1588 /* We have something like "mov.m r3 = ar.unat". Remember the
1589 r3 (or whatever) and watch for a store of this register... */
1590 unat_save_reg = rN;
1591 last_prologue_pc = next_pc;
1592 }
1593 }
1594 else if (it == I && ((instr & 0x1eff8000000LL) == 0x00198000000LL))
1595 {
1596 /* mov rN = pr */
1597 int rN = (int) ((instr & 0x00000001fc0LL) >> 6);
1598 int qp = (int) (instr & 0x0000000003fLL);
1599 if (qp == 0 && isScratch (rN))
1600 {
1601 pr_save_reg = rN;
1602 last_prologue_pc = next_pc;
1603 }
1604 }
1605 else if (it == M
1606 && ( ((instr & 0x1ffc8000000LL) == 0x08cc0000000LL)
1607 || ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)))
1608 {
1609 /* st8 [rN] = rM
1610 or
1611 st8 [rN] = rM, imm9 */
1612 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1613 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1614 int qp = (int) (instr & 0x0000000003fLL);
1615 int indirect = rM < 256 ? reg_contents[rM] : 0;
1616 if (qp == 0 && rN == spill_reg && spill_addr != 0
1617 && (rM == unat_save_reg || rM == pr_save_reg))
1618 {
1619 /* We've found a spill of either the UNAT register or the PR
1620 register. (Well, not exactly; what we've actually found is
1621 a spill of the register that UNAT or PR was moved to).
1622 Record that fact and move on... */
1623 if (rM == unat_save_reg)
1624 {
1625 /* Track UNAT register. */
1626 cache->saved_regs[IA64_UNAT_REGNUM] = spill_addr;
1627 unat_save_reg = 0;
1628 }
1629 else
1630 {
1631 /* Track PR register. */
1632 cache->saved_regs[IA64_PR_REGNUM] = spill_addr;
1633 pr_save_reg = 0;
1634 }
1635 if ((instr & 0x1efc0000000LL) == 0x0acc0000000LL)
1636 /* st8 [rN] = rM, imm9 */
1637 spill_addr += imm9(instr);
1638 else
1639 spill_addr = 0; /* Must be done spilling. */
1640 last_prologue_pc = next_pc;
1641 }
1642 else if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1643 {
1644 /* Allow up to one store of each input register. */
1645 instores[rM-32] = 1;
1646 last_prologue_pc = next_pc;
1647 }
1648 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1649 !instores[indirect-32])
1650 {
1651 /* Allow an indirect store of an input register. */
1652 instores[indirect-32] = 1;
1653 last_prologue_pc = next_pc;
1654 }
1655 }
1656 else if (it == M && ((instr & 0x1ff08000000LL) == 0x08c00000000LL))
1657 {
1658 /* One of
1659 st1 [rN] = rM
1660 st2 [rN] = rM
1661 st4 [rN] = rM
1662 st8 [rN] = rM
1663 Note that the st8 case is handled in the clause above.
1664
1665 Advance over stores of input registers. One store per input
1666 register is permitted. */
1667 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1668 int qp = (int) (instr & 0x0000000003fLL);
1669 int indirect = rM < 256 ? reg_contents[rM] : 0;
1670 if (qp == 0 && 32 <= rM && rM < 40 && !instores[rM-32])
1671 {
1672 instores[rM-32] = 1;
1673 last_prologue_pc = next_pc;
1674 }
1675 else if (qp == 0 && 32 <= indirect && indirect < 40 &&
1676 !instores[indirect-32])
1677 {
1678 /* Allow an indirect store of an input register. */
1679 instores[indirect-32] = 1;
1680 last_prologue_pc = next_pc;
1681 }
1682 }
1683 else if (it == M && ((instr & 0x1ff88000000LL) == 0x0cc80000000LL))
1684 {
1685 /* Either
1686 stfs [rN] = fM
1687 or
1688 stfd [rN] = fM
1689
1690 Advance over stores of floating point input registers. Again
1691 one store per register is permitted. */
1692 int fM = (int) ((instr & 0x000000fe000LL) >> 13);
1693 int qp = (int) (instr & 0x0000000003fLL);
1694 if (qp == 0 && 8 <= fM && fM < 16 && !infpstores[fM - 8])
1695 {
1696 infpstores[fM-8] = 1;
1697 last_prologue_pc = next_pc;
1698 }
1699 }
1700 else if (it == M
1701 && ( ((instr & 0x1ffc8000000LL) == 0x08ec0000000LL)
1702 || ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)))
1703 {
1704 /* st8.spill [rN] = rM
1705 or
1706 st8.spill [rN] = rM, imm9 */
1707 int rN = (int) ((instr & 0x00007f00000LL) >> 20);
1708 int rM = (int) ((instr & 0x000000fe000LL) >> 13);
1709 int qp = (int) (instr & 0x0000000003fLL);
1710 if (qp == 0 && rN == spill_reg && 4 <= rM && rM <= 7)
1711 {
1712 /* We've found a spill of one of the preserved general purpose
1713 regs. Record the spill address and advance the spill
1714 register if appropriate. */
1715 cache->saved_regs[IA64_GR0_REGNUM + rM] = spill_addr;
1716 if ((instr & 0x1efc0000000LL) == 0x0aec0000000LL)
1717 /* st8.spill [rN] = rM, imm9 */
1718 spill_addr += imm9(instr);
1719 else
1720 spill_addr = 0; /* Done spilling. */
1721 last_prologue_pc = next_pc;
1722 }
1723 }
1724
1725 pc = next_pc;
1726 }
1727
1728 /* If not frameless and we aren't called by skip_prologue, then we need
1729 to calculate registers for the previous frame which will be needed
1730 later. */
1731
1732 if (!frameless && this_frame)
1733 {
1734 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1735 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1736
1737 /* Extract the size of the rotating portion of the stack
1738 frame and the register rename base from the current
1739 frame marker. */
1740 cfm = cache->cfm;
1741 sor = cache->sor;
1742 sof = cache->sof;
1743 sol = cache->sol;
1744 rrb_gr = (cfm >> 18) & 0x7f;
1745
1746 /* Find the bof (beginning of frame). */
1747 bof = rse_address_add (cache->bsp, -sof);
1748
1749 for (i = 0, addr = bof;
1750 i < sof;
1751 i++, addr += 8)
1752 {
1753 if (IS_NaT_COLLECTION_ADDR (addr))
1754 {
1755 addr += 8;
1756 }
1757 if (i+32 == cfm_reg)
1758 cache->saved_regs[IA64_CFM_REGNUM] = addr;
1759 if (i+32 == ret_reg)
1760 cache->saved_regs[IA64_VRAP_REGNUM] = addr;
1761 if (i+32 == fp_reg)
1762 cache->saved_regs[IA64_VFP_REGNUM] = addr;
1763 }
1764
1765 /* For the previous argument registers we require the previous bof.
1766 If we can't find the previous cfm, then we can do nothing. */
1767 cfm = 0;
1768 if (cache->saved_regs[IA64_CFM_REGNUM] != 0)
1769 {
1770 cfm = read_memory_integer (cache->saved_regs[IA64_CFM_REGNUM],
1771 8, byte_order);
1772 }
1773 else if (cfm_reg != 0)
1774 {
1775 get_frame_register (this_frame, cfm_reg, buf);
1776 cfm = extract_unsigned_integer (buf, 8, byte_order);
1777 }
1778 cache->prev_cfm = cfm;
1779
1780 if (cfm != 0)
1781 {
1782 sor = ((cfm >> 14) & 0xf) * 8;
1783 sof = (cfm & 0x7f);
1784 sol = (cfm >> 7) & 0x7f;
1785 rrb_gr = (cfm >> 18) & 0x7f;
1786
1787 /* The previous bof only requires subtraction of the sol (size of
1788 locals) due to the overlap between output and input of
1789 subsequent frames. */
1790 bof = rse_address_add (bof, -sol);
1791
1792 for (i = 0, addr = bof;
1793 i < sof;
1794 i++, addr += 8)
1795 {
1796 if (IS_NaT_COLLECTION_ADDR (addr))
1797 {
1798 addr += 8;
1799 }
1800 if (i < sor)
1801 cache->saved_regs[IA64_GR32_REGNUM
1802 + ((i + (sor - rrb_gr)) % sor)]
1803 = addr;
1804 else
1805 cache->saved_regs[IA64_GR32_REGNUM + i] = addr;
1806 }
1807
1808 }
1809 }
1810
1811 /* Try and trust the lim_pc value whenever possible. */
1812 if (trust_limit && lim_pc >= last_prologue_pc)
1813 last_prologue_pc = lim_pc;
1814
1815 cache->frameless = frameless;
1816 cache->after_prologue = last_prologue_pc;
1817 cache->mem_stack_frame_size = mem_stack_frame_size;
1818 cache->fp_reg = fp_reg;
1819
1820 return last_prologue_pc;
1821 }
1822
1823 CORE_ADDR
1824 ia64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
1825 {
1826 struct ia64_frame_cache cache;
1827 cache.base = 0;
1828 cache.after_prologue = 0;
1829 cache.cfm = 0;
1830 cache.bsp = 0;
1831
1832 /* Call examine_prologue with - as third argument since we don't
1833 have a next frame pointer to send. */
1834 return examine_prologue (pc, pc+1024, 0, &cache);
1835 }
1836
1837
1838 /* Normal frames. */
1839
1840 static struct ia64_frame_cache *
1841 ia64_frame_cache (struct frame_info *this_frame, void **this_cache)
1842 {
1843 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1844 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1845 struct ia64_frame_cache *cache;
1846 gdb_byte buf[8];
1847 CORE_ADDR cfm;
1848
1849 if (*this_cache)
1850 return (struct ia64_frame_cache *) *this_cache;
1851
1852 cache = ia64_alloc_frame_cache ();
1853 *this_cache = cache;
1854
1855 get_frame_register (this_frame, sp_regnum, buf);
1856 cache->saved_sp = extract_unsigned_integer (buf, 8, byte_order);
1857
1858 /* We always want the bsp to point to the end of frame.
1859 This way, we can always get the beginning of frame (bof)
1860 by subtracting frame size. */
1861 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
1862 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
1863
1864 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
1865
1866 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
1867 cfm = extract_unsigned_integer (buf, 8, byte_order);
1868
1869 cache->sof = (cfm & 0x7f);
1870 cache->sol = (cfm >> 7) & 0x7f;
1871 cache->sor = ((cfm >> 14) & 0xf) * 8;
1872
1873 cache->cfm = cfm;
1874
1875 cache->pc = get_frame_func (this_frame);
1876
1877 if (cache->pc != 0)
1878 examine_prologue (cache->pc, get_frame_pc (this_frame), this_frame, cache);
1879
1880 cache->base = cache->saved_sp + cache->mem_stack_frame_size;
1881
1882 return cache;
1883 }
1884
1885 static void
1886 ia64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1887 struct frame_id *this_id)
1888 {
1889 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1890 struct ia64_frame_cache *cache =
1891 ia64_frame_cache (this_frame, this_cache);
1892
1893 /* If outermost frame, mark with null frame id. */
1894 if (cache->base != 0)
1895 (*this_id) = frame_id_build_special (cache->base, cache->pc, cache->bsp);
1896 if (gdbarch_debug >= 1)
1897 fprintf_unfiltered (gdb_stdlog,
1898 "regular frame id: code %s, stack %s, "
1899 "special %s, this_frame %s\n",
1900 paddress (gdbarch, this_id->code_addr),
1901 paddress (gdbarch, this_id->stack_addr),
1902 paddress (gdbarch, cache->bsp),
1903 host_address_to_string (this_frame));
1904 }
1905
1906 static struct value *
1907 ia64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1908 int regnum)
1909 {
1910 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1911 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1912 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
1913 gdb_byte buf[8];
1914
1915 gdb_assert (regnum >= 0);
1916
1917 if (!target_has_registers)
1918 error (_("No registers."));
1919
1920 if (regnum == gdbarch_sp_regnum (gdbarch))
1921 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1922
1923 else if (regnum == IA64_BSP_REGNUM)
1924 {
1925 struct value *val;
1926 CORE_ADDR prev_cfm, bsp, prev_bsp;
1927
1928 /* We want to calculate the previous bsp as the end of the previous
1929 register stack frame. This corresponds to what the hardware bsp
1930 register will be if we pop the frame back which is why we might
1931 have been called. We know the beginning of the current frame is
1932 cache->bsp - cache->sof. This value in the previous frame points
1933 to the start of the output registers. We can calculate the end of
1934 that frame by adding the size of output:
1935 (sof (size of frame) - sol (size of locals)). */
1936 val = ia64_frame_prev_register (this_frame, this_cache, IA64_CFM_REGNUM);
1937 prev_cfm = extract_unsigned_integer (value_contents_all (val),
1938 8, byte_order);
1939 bsp = rse_address_add (cache->bsp, -(cache->sof));
1940 prev_bsp =
1941 rse_address_add (bsp, (prev_cfm & 0x7f) - ((prev_cfm >> 7) & 0x7f));
1942
1943 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
1944 }
1945
1946 else if (regnum == IA64_CFM_REGNUM)
1947 {
1948 CORE_ADDR addr = cache->saved_regs[IA64_CFM_REGNUM];
1949
1950 if (addr != 0)
1951 return frame_unwind_got_memory (this_frame, regnum, addr);
1952
1953 if (cache->prev_cfm)
1954 return frame_unwind_got_constant (this_frame, regnum, cache->prev_cfm);
1955
1956 if (cache->frameless)
1957 return frame_unwind_got_register (this_frame, IA64_PFS_REGNUM,
1958 IA64_PFS_REGNUM);
1959 return frame_unwind_got_register (this_frame, regnum, 0);
1960 }
1961
1962 else if (regnum == IA64_VFP_REGNUM)
1963 {
1964 /* If the function in question uses an automatic register (r32-r127)
1965 for the frame pointer, it'll be found by ia64_find_saved_register()
1966 above. If the function lacks one of these frame pointers, we can
1967 still provide a value since we know the size of the frame. */
1968 return frame_unwind_got_constant (this_frame, regnum, cache->base);
1969 }
1970
1971 else if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
1972 {
1973 struct value *pr_val;
1974 ULONGEST prN;
1975
1976 pr_val = ia64_frame_prev_register (this_frame, this_cache,
1977 IA64_PR_REGNUM);
1978 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
1979 {
1980 /* Fetch predicate register rename base from current frame
1981 marker for this frame. */
1982 int rrb_pr = (cache->cfm >> 32) & 0x3f;
1983
1984 /* Adjust the register number to account for register rotation. */
1985 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
1986 }
1987 prN = extract_bit_field (value_contents_all (pr_val),
1988 regnum - VP0_REGNUM, 1);
1989 return frame_unwind_got_constant (this_frame, regnum, prN);
1990 }
1991
1992 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT31_REGNUM)
1993 {
1994 struct value *unat_val;
1995 ULONGEST unatN;
1996 unat_val = ia64_frame_prev_register (this_frame, this_cache,
1997 IA64_UNAT_REGNUM);
1998 unatN = extract_bit_field (value_contents_all (unat_val),
1999 regnum - IA64_NAT0_REGNUM, 1);
2000 return frame_unwind_got_constant (this_frame, regnum, unatN);
2001 }
2002
2003 else if (IA64_NAT32_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2004 {
2005 int natval = 0;
2006 /* Find address of general register corresponding to nat bit we're
2007 interested in. */
2008 CORE_ADDR gr_addr;
2009
2010 gr_addr = cache->saved_regs[regnum - IA64_NAT0_REGNUM + IA64_GR0_REGNUM];
2011
2012 if (gr_addr != 0)
2013 {
2014 /* Compute address of nat collection bits. */
2015 CORE_ADDR nat_addr = gr_addr | 0x1f8;
2016 CORE_ADDR bsp;
2017 CORE_ADDR nat_collection;
2018 int nat_bit;
2019
2020 /* If our nat collection address is bigger than bsp, we have to get
2021 the nat collection from rnat. Otherwise, we fetch the nat
2022 collection from the computed address. */
2023 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2024 bsp = extract_unsigned_integer (buf, 8, byte_order);
2025 if (nat_addr >= bsp)
2026 {
2027 get_frame_register (this_frame, IA64_RNAT_REGNUM, buf);
2028 nat_collection = extract_unsigned_integer (buf, 8, byte_order);
2029 }
2030 else
2031 nat_collection = read_memory_integer (nat_addr, 8, byte_order);
2032 nat_bit = (gr_addr >> 3) & 0x3f;
2033 natval = (nat_collection >> nat_bit) & 1;
2034 }
2035
2036 return frame_unwind_got_constant (this_frame, regnum, natval);
2037 }
2038
2039 else if (regnum == IA64_IP_REGNUM)
2040 {
2041 CORE_ADDR pc = 0;
2042 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2043
2044 if (addr != 0)
2045 {
2046 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2047 pc = extract_unsigned_integer (buf, 8, byte_order);
2048 }
2049 else if (cache->frameless)
2050 {
2051 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2052 pc = extract_unsigned_integer (buf, 8, byte_order);
2053 }
2054 pc &= ~0xf;
2055 return frame_unwind_got_constant (this_frame, regnum, pc);
2056 }
2057
2058 else if (regnum == IA64_PSR_REGNUM)
2059 {
2060 /* We don't know how to get the complete previous PSR, but we need it
2061 for the slot information when we unwind the pc (pc is formed of IP
2062 register plus slot information from PSR). To get the previous
2063 slot information, we mask it off the return address. */
2064 ULONGEST slot_num = 0;
2065 CORE_ADDR pc = 0;
2066 CORE_ADDR psr = 0;
2067 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2068
2069 get_frame_register (this_frame, IA64_PSR_REGNUM, buf);
2070 psr = extract_unsigned_integer (buf, 8, byte_order);
2071
2072 if (addr != 0)
2073 {
2074 read_memory (addr, buf, register_size (gdbarch, IA64_IP_REGNUM));
2075 pc = extract_unsigned_integer (buf, 8, byte_order);
2076 }
2077 else if (cache->frameless)
2078 {
2079 get_frame_register (this_frame, IA64_BR0_REGNUM, buf);
2080 pc = extract_unsigned_integer (buf, 8, byte_order);
2081 }
2082 psr &= ~(3LL << 41);
2083 slot_num = pc & 0x3LL;
2084 psr |= (CORE_ADDR)slot_num << 41;
2085 return frame_unwind_got_constant (this_frame, regnum, psr);
2086 }
2087
2088 else if (regnum == IA64_BR0_REGNUM)
2089 {
2090 CORE_ADDR addr = cache->saved_regs[IA64_BR0_REGNUM];
2091
2092 if (addr != 0)
2093 return frame_unwind_got_memory (this_frame, regnum, addr);
2094
2095 return frame_unwind_got_constant (this_frame, regnum, 0);
2096 }
2097
2098 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2099 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2100 {
2101 CORE_ADDR addr = 0;
2102
2103 if (regnum >= V32_REGNUM)
2104 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2105 addr = cache->saved_regs[regnum];
2106 if (addr != 0)
2107 return frame_unwind_got_memory (this_frame, regnum, addr);
2108
2109 if (cache->frameless)
2110 {
2111 struct value *reg_val;
2112 CORE_ADDR prev_cfm, prev_bsp, prev_bof;
2113
2114 /* FIXME: brobecker/2008-05-01: Doesn't this seem redundant
2115 with the same code above? */
2116 if (regnum >= V32_REGNUM)
2117 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2118 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2119 IA64_CFM_REGNUM);
2120 prev_cfm = extract_unsigned_integer (value_contents_all (reg_val),
2121 8, byte_order);
2122 reg_val = ia64_frame_prev_register (this_frame, this_cache,
2123 IA64_BSP_REGNUM);
2124 prev_bsp = extract_unsigned_integer (value_contents_all (reg_val),
2125 8, byte_order);
2126 prev_bof = rse_address_add (prev_bsp, -(prev_cfm & 0x7f));
2127
2128 addr = rse_address_add (prev_bof, (regnum - IA64_GR32_REGNUM));
2129 return frame_unwind_got_memory (this_frame, regnum, addr);
2130 }
2131
2132 return frame_unwind_got_constant (this_frame, regnum, 0);
2133 }
2134
2135 else /* All other registers. */
2136 {
2137 CORE_ADDR addr = 0;
2138
2139 if (IA64_FR32_REGNUM <= regnum && regnum <= IA64_FR127_REGNUM)
2140 {
2141 /* Fetch floating point register rename base from current
2142 frame marker for this frame. */
2143 int rrb_fr = (cache->cfm >> 25) & 0x7f;
2144
2145 /* Adjust the floating point register number to account for
2146 register rotation. */
2147 regnum = IA64_FR32_REGNUM
2148 + ((regnum - IA64_FR32_REGNUM) + rrb_fr) % 96;
2149 }
2150
2151 /* If we have stored a memory address, access the register. */
2152 addr = cache->saved_regs[regnum];
2153 if (addr != 0)
2154 return frame_unwind_got_memory (this_frame, regnum, addr);
2155 /* Otherwise, punt and get the current value of the register. */
2156 else
2157 return frame_unwind_got_register (this_frame, regnum, regnum);
2158 }
2159 }
2160
2161 static const struct frame_unwind ia64_frame_unwind =
2162 {
2163 NORMAL_FRAME,
2164 default_frame_unwind_stop_reason,
2165 &ia64_frame_this_id,
2166 &ia64_frame_prev_register,
2167 NULL,
2168 default_frame_sniffer
2169 };
2170
2171 /* Signal trampolines. */
2172
2173 static void
2174 ia64_sigtramp_frame_init_saved_regs (struct frame_info *this_frame,
2175 struct ia64_frame_cache *cache)
2176 {
2177 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2178 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2179
2180 if (tdep->sigcontext_register_address)
2181 {
2182 int regno;
2183
2184 cache->saved_regs[IA64_VRAP_REGNUM]
2185 = tdep->sigcontext_register_address (gdbarch, cache->base,
2186 IA64_IP_REGNUM);
2187 cache->saved_regs[IA64_CFM_REGNUM]
2188 = tdep->sigcontext_register_address (gdbarch, cache->base,
2189 IA64_CFM_REGNUM);
2190 cache->saved_regs[IA64_PSR_REGNUM]
2191 = tdep->sigcontext_register_address (gdbarch, cache->base,
2192 IA64_PSR_REGNUM);
2193 cache->saved_regs[IA64_BSP_REGNUM]
2194 = tdep->sigcontext_register_address (gdbarch, cache->base,
2195 IA64_BSP_REGNUM);
2196 cache->saved_regs[IA64_RNAT_REGNUM]
2197 = tdep->sigcontext_register_address (gdbarch, cache->base,
2198 IA64_RNAT_REGNUM);
2199 cache->saved_regs[IA64_CCV_REGNUM]
2200 = tdep->sigcontext_register_address (gdbarch, cache->base,
2201 IA64_CCV_REGNUM);
2202 cache->saved_regs[IA64_UNAT_REGNUM]
2203 = tdep->sigcontext_register_address (gdbarch, cache->base,
2204 IA64_UNAT_REGNUM);
2205 cache->saved_regs[IA64_FPSR_REGNUM]
2206 = tdep->sigcontext_register_address (gdbarch, cache->base,
2207 IA64_FPSR_REGNUM);
2208 cache->saved_regs[IA64_PFS_REGNUM]
2209 = tdep->sigcontext_register_address (gdbarch, cache->base,
2210 IA64_PFS_REGNUM);
2211 cache->saved_regs[IA64_LC_REGNUM]
2212 = tdep->sigcontext_register_address (gdbarch, cache->base,
2213 IA64_LC_REGNUM);
2214
2215 for (regno = IA64_GR1_REGNUM; regno <= IA64_GR31_REGNUM; regno++)
2216 cache->saved_regs[regno] =
2217 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2218 for (regno = IA64_BR0_REGNUM; regno <= IA64_BR7_REGNUM; regno++)
2219 cache->saved_regs[regno] =
2220 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2221 for (regno = IA64_FR2_REGNUM; regno <= IA64_FR31_REGNUM; regno++)
2222 cache->saved_regs[regno] =
2223 tdep->sigcontext_register_address (gdbarch, cache->base, regno);
2224 }
2225 }
2226
2227 static struct ia64_frame_cache *
2228 ia64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2229 {
2230 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2231 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2232 struct ia64_frame_cache *cache;
2233 gdb_byte buf[8];
2234
2235 if (*this_cache)
2236 return (struct ia64_frame_cache *) *this_cache;
2237
2238 cache = ia64_alloc_frame_cache ();
2239
2240 get_frame_register (this_frame, sp_regnum, buf);
2241 /* Note that frame size is hard-coded below. We cannot calculate it
2242 via prologue examination. */
2243 cache->base = extract_unsigned_integer (buf, 8, byte_order) + 16;
2244
2245 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2246 cache->bsp = extract_unsigned_integer (buf, 8, byte_order);
2247
2248 get_frame_register (this_frame, IA64_CFM_REGNUM, buf);
2249 cache->cfm = extract_unsigned_integer (buf, 8, byte_order);
2250 cache->sof = cache->cfm & 0x7f;
2251
2252 ia64_sigtramp_frame_init_saved_regs (this_frame, cache);
2253
2254 *this_cache = cache;
2255 return cache;
2256 }
2257
2258 static void
2259 ia64_sigtramp_frame_this_id (struct frame_info *this_frame,
2260 void **this_cache, struct frame_id *this_id)
2261 {
2262 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2263 struct ia64_frame_cache *cache =
2264 ia64_sigtramp_frame_cache (this_frame, this_cache);
2265
2266 (*this_id) = frame_id_build_special (cache->base,
2267 get_frame_pc (this_frame),
2268 cache->bsp);
2269 if (gdbarch_debug >= 1)
2270 fprintf_unfiltered (gdb_stdlog,
2271 "sigtramp frame id: code %s, stack %s, "
2272 "special %s, this_frame %s\n",
2273 paddress (gdbarch, this_id->code_addr),
2274 paddress (gdbarch, this_id->stack_addr),
2275 paddress (gdbarch, cache->bsp),
2276 host_address_to_string (this_frame));
2277 }
2278
2279 static struct value *
2280 ia64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2281 void **this_cache, int regnum)
2282 {
2283 struct ia64_frame_cache *cache =
2284 ia64_sigtramp_frame_cache (this_frame, this_cache);
2285
2286 gdb_assert (regnum >= 0);
2287
2288 if (!target_has_registers)
2289 error (_("No registers."));
2290
2291 if (regnum == IA64_IP_REGNUM)
2292 {
2293 CORE_ADDR pc = 0;
2294 CORE_ADDR addr = cache->saved_regs[IA64_VRAP_REGNUM];
2295
2296 if (addr != 0)
2297 {
2298 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2299 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2300 pc = read_memory_unsigned_integer (addr, 8, byte_order);
2301 }
2302 pc &= ~0xf;
2303 return frame_unwind_got_constant (this_frame, regnum, pc);
2304 }
2305
2306 else if ((regnum >= IA64_GR32_REGNUM && regnum <= IA64_GR127_REGNUM)
2307 || (regnum >= V32_REGNUM && regnum <= V127_REGNUM))
2308 {
2309 CORE_ADDR addr = 0;
2310
2311 if (regnum >= V32_REGNUM)
2312 regnum = IA64_GR32_REGNUM + (regnum - V32_REGNUM);
2313 addr = cache->saved_regs[regnum];
2314 if (addr != 0)
2315 return frame_unwind_got_memory (this_frame, regnum, addr);
2316
2317 return frame_unwind_got_constant (this_frame, regnum, 0);
2318 }
2319
2320 else /* All other registers not listed above. */
2321 {
2322 CORE_ADDR addr = cache->saved_regs[regnum];
2323
2324 if (addr != 0)
2325 return frame_unwind_got_memory (this_frame, regnum, addr);
2326
2327 return frame_unwind_got_constant (this_frame, regnum, 0);
2328 }
2329 }
2330
2331 static int
2332 ia64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2333 struct frame_info *this_frame,
2334 void **this_cache)
2335 {
2336 struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2337 if (tdep->pc_in_sigtramp)
2338 {
2339 CORE_ADDR pc = get_frame_pc (this_frame);
2340
2341 if (tdep->pc_in_sigtramp (pc))
2342 return 1;
2343 }
2344
2345 return 0;
2346 }
2347
2348 static const struct frame_unwind ia64_sigtramp_frame_unwind =
2349 {
2350 SIGTRAMP_FRAME,
2351 default_frame_unwind_stop_reason,
2352 ia64_sigtramp_frame_this_id,
2353 ia64_sigtramp_frame_prev_register,
2354 NULL,
2355 ia64_sigtramp_frame_sniffer
2356 };
2357
2358 \f
2359
2360 static CORE_ADDR
2361 ia64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2362 {
2363 struct ia64_frame_cache *cache = ia64_frame_cache (this_frame, this_cache);
2364
2365 return cache->base;
2366 }
2367
2368 static const struct frame_base ia64_frame_base =
2369 {
2370 &ia64_frame_unwind,
2371 ia64_frame_base_address,
2372 ia64_frame_base_address,
2373 ia64_frame_base_address
2374 };
2375
2376 #ifdef HAVE_LIBUNWIND_IA64_H
2377
2378 struct ia64_unwind_table_entry
2379 {
2380 unw_word_t start_offset;
2381 unw_word_t end_offset;
2382 unw_word_t info_offset;
2383 };
2384
2385 static __inline__ uint64_t
2386 ia64_rse_slot_num (uint64_t addr)
2387 {
2388 return (addr >> 3) & 0x3f;
2389 }
2390
2391 /* Skip over a designated number of registers in the backing
2392 store, remembering every 64th position is for NAT. */
2393 static __inline__ uint64_t
2394 ia64_rse_skip_regs (uint64_t addr, long num_regs)
2395 {
2396 long delta = ia64_rse_slot_num(addr) + num_regs;
2397
2398 if (num_regs < 0)
2399 delta -= 0x3e;
2400 return addr + ((num_regs + delta/0x3f) << 3);
2401 }
2402
2403 /* Gdb ia64-libunwind-tdep callback function to convert from an ia64 gdb
2404 register number to a libunwind register number. */
2405 static int
2406 ia64_gdb2uw_regnum (int regnum)
2407 {
2408 if (regnum == sp_regnum)
2409 return UNW_IA64_SP;
2410 else if (regnum == IA64_BSP_REGNUM)
2411 return UNW_IA64_BSP;
2412 else if ((unsigned) (regnum - IA64_GR0_REGNUM) < 128)
2413 return UNW_IA64_GR + (regnum - IA64_GR0_REGNUM);
2414 else if ((unsigned) (regnum - V32_REGNUM) < 95)
2415 return UNW_IA64_GR + 32 + (regnum - V32_REGNUM);
2416 else if ((unsigned) (regnum - IA64_FR0_REGNUM) < 128)
2417 return UNW_IA64_FR + (regnum - IA64_FR0_REGNUM);
2418 else if ((unsigned) (regnum - IA64_PR0_REGNUM) < 64)
2419 return -1;
2420 else if ((unsigned) (regnum - IA64_BR0_REGNUM) < 8)
2421 return UNW_IA64_BR + (regnum - IA64_BR0_REGNUM);
2422 else if (regnum == IA64_PR_REGNUM)
2423 return UNW_IA64_PR;
2424 else if (regnum == IA64_IP_REGNUM)
2425 return UNW_REG_IP;
2426 else if (regnum == IA64_CFM_REGNUM)
2427 return UNW_IA64_CFM;
2428 else if ((unsigned) (regnum - IA64_AR0_REGNUM) < 128)
2429 return UNW_IA64_AR + (regnum - IA64_AR0_REGNUM);
2430 else if ((unsigned) (regnum - IA64_NAT0_REGNUM) < 128)
2431 return UNW_IA64_NAT + (regnum - IA64_NAT0_REGNUM);
2432 else
2433 return -1;
2434 }
2435
2436 /* Gdb ia64-libunwind-tdep callback function to convert from a libunwind
2437 register number to a ia64 gdb register number. */
2438 static int
2439 ia64_uw2gdb_regnum (int uw_regnum)
2440 {
2441 if (uw_regnum == UNW_IA64_SP)
2442 return sp_regnum;
2443 else if (uw_regnum == UNW_IA64_BSP)
2444 return IA64_BSP_REGNUM;
2445 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 32)
2446 return IA64_GR0_REGNUM + (uw_regnum - UNW_IA64_GR);
2447 else if ((unsigned) (uw_regnum - UNW_IA64_GR) < 128)
2448 return V32_REGNUM + (uw_regnum - (IA64_GR0_REGNUM + 32));
2449 else if ((unsigned) (uw_regnum - UNW_IA64_FR) < 128)
2450 return IA64_FR0_REGNUM + (uw_regnum - UNW_IA64_FR);
2451 else if ((unsigned) (uw_regnum - UNW_IA64_BR) < 8)
2452 return IA64_BR0_REGNUM + (uw_regnum - UNW_IA64_BR);
2453 else if (uw_regnum == UNW_IA64_PR)
2454 return IA64_PR_REGNUM;
2455 else if (uw_regnum == UNW_REG_IP)
2456 return IA64_IP_REGNUM;
2457 else if (uw_regnum == UNW_IA64_CFM)
2458 return IA64_CFM_REGNUM;
2459 else if ((unsigned) (uw_regnum - UNW_IA64_AR) < 128)
2460 return IA64_AR0_REGNUM + (uw_regnum - UNW_IA64_AR);
2461 else if ((unsigned) (uw_regnum - UNW_IA64_NAT) < 128)
2462 return IA64_NAT0_REGNUM + (uw_regnum - UNW_IA64_NAT);
2463 else
2464 return -1;
2465 }
2466
2467 /* Gdb ia64-libunwind-tdep callback function to reveal if register is
2468 a float register or not. */
2469 static int
2470 ia64_is_fpreg (int uw_regnum)
2471 {
2472 return unw_is_fpreg (uw_regnum);
2473 }
2474
2475 /* Libunwind callback accessor function for general registers. */
2476 static int
2477 ia64_access_reg (unw_addr_space_t as, unw_regnum_t uw_regnum, unw_word_t *val,
2478 int write, void *arg)
2479 {
2480 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2481 unw_word_t bsp, sof, cfm, psr, ip;
2482 struct frame_info *this_frame = (struct frame_info *) arg;
2483 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2484 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2485 long new_sof, old_sof;
2486
2487 /* We never call any libunwind routines that need to write registers. */
2488 gdb_assert (!write);
2489
2490 switch (uw_regnum)
2491 {
2492 case UNW_REG_IP:
2493 /* Libunwind expects to see the pc value which means the slot number
2494 from the psr must be merged with the ip word address. */
2495 ip = get_frame_register_unsigned (this_frame, IA64_IP_REGNUM);
2496 psr = get_frame_register_unsigned (this_frame, IA64_PSR_REGNUM);
2497 *val = ip | ((psr >> 41) & 0x3);
2498 break;
2499
2500 case UNW_IA64_AR_BSP:
2501 /* Libunwind expects to see the beginning of the current
2502 register frame so we must account for the fact that
2503 ptrace() will return a value for bsp that points *after*
2504 the current register frame. */
2505 bsp = get_frame_register_unsigned (this_frame, IA64_BSP_REGNUM);
2506 cfm = get_frame_register_unsigned (this_frame, IA64_CFM_REGNUM);
2507 sof = gdbarch_tdep (gdbarch)->size_of_register_frame (this_frame, cfm);
2508 *val = ia64_rse_skip_regs (bsp, -sof);
2509 break;
2510
2511 case UNW_IA64_AR_BSPSTORE:
2512 /* Libunwind wants bspstore to be after the current register frame.
2513 This is what ptrace() and gdb treats as the regular bsp value. */
2514 *val = get_frame_register_unsigned (this_frame, IA64_BSP_REGNUM);
2515 break;
2516
2517 default:
2518 /* For all other registers, just unwind the value directly. */
2519 *val = get_frame_register_unsigned (this_frame, regnum);
2520 break;
2521 }
2522
2523 if (gdbarch_debug >= 1)
2524 fprintf_unfiltered (gdb_stdlog,
2525 " access_reg: from cache: %4s=%s\n",
2526 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2527 ? ia64_register_names[regnum] : "r??"),
2528 paddress (gdbarch, *val));
2529 return 0;
2530 }
2531
2532 /* Libunwind callback accessor function for floating-point registers. */
2533 static int
2534 ia64_access_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2535 unw_fpreg_t *val, int write, void *arg)
2536 {
2537 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2538 struct frame_info *this_frame = (struct frame_info *) arg;
2539
2540 /* We never call any libunwind routines that need to write registers. */
2541 gdb_assert (!write);
2542
2543 get_frame_register (this_frame, regnum, (gdb_byte *) val);
2544
2545 return 0;
2546 }
2547
2548 /* Libunwind callback accessor function for top-level rse registers. */
2549 static int
2550 ia64_access_rse_reg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2551 unw_word_t *val, int write, void *arg)
2552 {
2553 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2554 unw_word_t bsp, sof, cfm, psr, ip;
2555 struct regcache *regcache = (struct regcache *) arg;
2556 struct gdbarch *gdbarch = regcache->arch ();
2557 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2558 long new_sof, old_sof;
2559
2560 /* We never call any libunwind routines that need to write registers. */
2561 gdb_assert (!write);
2562
2563 switch (uw_regnum)
2564 {
2565 case UNW_REG_IP:
2566 /* Libunwind expects to see the pc value which means the slot number
2567 from the psr must be merged with the ip word address. */
2568 regcache_cooked_read_unsigned (regcache, IA64_IP_REGNUM, &ip);
2569 regcache_cooked_read_unsigned (regcache, IA64_PSR_REGNUM, &psr);
2570 *val = ip | ((psr >> 41) & 0x3);
2571 break;
2572
2573 case UNW_IA64_AR_BSP:
2574 /* Libunwind expects to see the beginning of the current
2575 register frame so we must account for the fact that
2576 ptrace() will return a value for bsp that points *after*
2577 the current register frame. */
2578 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
2579 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
2580 sof = (cfm & 0x7f);
2581 *val = ia64_rse_skip_regs (bsp, -sof);
2582 break;
2583
2584 case UNW_IA64_AR_BSPSTORE:
2585 /* Libunwind wants bspstore to be after the current register frame.
2586 This is what ptrace() and gdb treats as the regular bsp value. */
2587 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, val);
2588 break;
2589
2590 default:
2591 /* For all other registers, just unwind the value directly. */
2592 regcache_cooked_read_unsigned (regcache, regnum, val);
2593 break;
2594 }
2595
2596 if (gdbarch_debug >= 1)
2597 fprintf_unfiltered (gdb_stdlog,
2598 " access_rse_reg: from cache: %4s=%s\n",
2599 (((unsigned) regnum <= IA64_NAT127_REGNUM)
2600 ? ia64_register_names[regnum] : "r??"),
2601 paddress (gdbarch, *val));
2602
2603 return 0;
2604 }
2605
2606 /* Libunwind callback accessor function for top-level fp registers. */
2607 static int
2608 ia64_access_rse_fpreg (unw_addr_space_t as, unw_regnum_t uw_regnum,
2609 unw_fpreg_t *val, int write, void *arg)
2610 {
2611 int regnum = ia64_uw2gdb_regnum (uw_regnum);
2612 struct regcache *regcache = (struct regcache *) arg;
2613
2614 /* We never call any libunwind routines that need to write registers. */
2615 gdb_assert (!write);
2616
2617 regcache_cooked_read (regcache, regnum, (gdb_byte *) val);
2618
2619 return 0;
2620 }
2621
2622 /* Libunwind callback accessor function for accessing memory. */
2623 static int
2624 ia64_access_mem (unw_addr_space_t as,
2625 unw_word_t addr, unw_word_t *val,
2626 int write, void *arg)
2627 {
2628 if (addr - KERNEL_START < ktab_size)
2629 {
2630 unw_word_t *laddr = (unw_word_t*) ((char *) ktab
2631 + (addr - KERNEL_START));
2632
2633 if (write)
2634 *laddr = *val;
2635 else
2636 *val = *laddr;
2637 return 0;
2638 }
2639
2640 /* XXX do we need to normalize byte-order here? */
2641 if (write)
2642 return target_write_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2643 else
2644 return target_read_memory (addr, (gdb_byte *) val, sizeof (unw_word_t));
2645 }
2646
2647 /* Call low-level function to access the kernel unwind table. */
2648 static LONGEST
2649 getunwind_table (gdb_byte **buf_p)
2650 {
2651 LONGEST x;
2652
2653 /* FIXME drow/2005-09-10: This code used to call
2654 ia64_linux_xfer_unwind_table directly to fetch the unwind table
2655 for the currently running ia64-linux kernel. That data should
2656 come from the core file and be accessed via the auxv vector; if
2657 we want to preserve fall back to the running kernel's table, then
2658 we should find a way to override the corefile layer's
2659 xfer_partial method. */
2660
2661 x = target_read_alloc (&current_target, TARGET_OBJECT_UNWIND_TABLE,
2662 NULL, buf_p);
2663
2664 return x;
2665 }
2666
2667 /* Get the kernel unwind table. */
2668 static int
2669 get_kernel_table (unw_word_t ip, unw_dyn_info_t *di)
2670 {
2671 static struct ia64_table_entry *etab;
2672
2673 if (!ktab)
2674 {
2675 gdb_byte *ktab_buf;
2676 LONGEST size;
2677
2678 size = getunwind_table (&ktab_buf);
2679 if (size <= 0)
2680 return -UNW_ENOINFO;
2681
2682 ktab = (struct ia64_table_entry *) ktab_buf;
2683 ktab_size = size;
2684
2685 for (etab = ktab; etab->start_offset; ++etab)
2686 etab->info_offset += KERNEL_START;
2687 }
2688
2689 if (ip < ktab[0].start_offset || ip >= etab[-1].end_offset)
2690 return -UNW_ENOINFO;
2691
2692 di->format = UNW_INFO_FORMAT_TABLE;
2693 di->gp = 0;
2694 di->start_ip = ktab[0].start_offset;
2695 di->end_ip = etab[-1].end_offset;
2696 di->u.ti.name_ptr = (unw_word_t) "<kernel>";
2697 di->u.ti.segbase = 0;
2698 di->u.ti.table_len = ((char *) etab - (char *) ktab) / sizeof (unw_word_t);
2699 di->u.ti.table_data = (unw_word_t *) ktab;
2700
2701 if (gdbarch_debug >= 1)
2702 fprintf_unfiltered (gdb_stdlog, "get_kernel_table: found table `%s': "
2703 "segbase=%s, length=%s, gp=%s\n",
2704 (char *) di->u.ti.name_ptr,
2705 hex_string (di->u.ti.segbase),
2706 pulongest (di->u.ti.table_len),
2707 hex_string (di->gp));
2708 return 0;
2709 }
2710
2711 /* Find the unwind table entry for a specified address. */
2712 static int
2713 ia64_find_unwind_table (struct objfile *objfile, unw_word_t ip,
2714 unw_dyn_info_t *dip, void **buf)
2715 {
2716 Elf_Internal_Phdr *phdr, *p_text = NULL, *p_unwind = NULL;
2717 Elf_Internal_Ehdr *ehdr;
2718 unw_word_t segbase = 0;
2719 CORE_ADDR load_base;
2720 bfd *bfd;
2721 int i;
2722
2723 bfd = objfile->obfd;
2724
2725 ehdr = elf_tdata (bfd)->elf_header;
2726 phdr = elf_tdata (bfd)->phdr;
2727
2728 load_base = ANOFFSET (objfile->section_offsets, SECT_OFF_TEXT (objfile));
2729
2730 for (i = 0; i < ehdr->e_phnum; ++i)
2731 {
2732 switch (phdr[i].p_type)
2733 {
2734 case PT_LOAD:
2735 if ((unw_word_t) (ip - load_base - phdr[i].p_vaddr)
2736 < phdr[i].p_memsz)
2737 p_text = phdr + i;
2738 break;
2739
2740 case PT_IA_64_UNWIND:
2741 p_unwind = phdr + i;
2742 break;
2743
2744 default:
2745 break;
2746 }
2747 }
2748
2749 if (!p_text || !p_unwind)
2750 return -UNW_ENOINFO;
2751
2752 /* Verify that the segment that contains the IP also contains
2753 the static unwind table. If not, we may be in the Linux kernel's
2754 DSO gate page in which case the unwind table is another segment.
2755 Otherwise, we are dealing with runtime-generated code, for which we
2756 have no info here. */
2757 segbase = p_text->p_vaddr + load_base;
2758
2759 if ((p_unwind->p_vaddr - p_text->p_vaddr) >= p_text->p_memsz)
2760 {
2761 int ok = 0;
2762 for (i = 0; i < ehdr->e_phnum; ++i)
2763 {
2764 if (phdr[i].p_type == PT_LOAD
2765 && (p_unwind->p_vaddr - phdr[i].p_vaddr) < phdr[i].p_memsz)
2766 {
2767 ok = 1;
2768 /* Get the segbase from the section containing the
2769 libunwind table. */
2770 segbase = phdr[i].p_vaddr + load_base;
2771 }
2772 }
2773 if (!ok)
2774 return -UNW_ENOINFO;
2775 }
2776
2777 dip->start_ip = p_text->p_vaddr + load_base;
2778 dip->end_ip = dip->start_ip + p_text->p_memsz;
2779 dip->gp = ia64_find_global_pointer (get_objfile_arch (objfile), ip);
2780 dip->format = UNW_INFO_FORMAT_REMOTE_TABLE;
2781 dip->u.rti.name_ptr = (unw_word_t) bfd_get_filename (bfd);
2782 dip->u.rti.segbase = segbase;
2783 dip->u.rti.table_len = p_unwind->p_memsz / sizeof (unw_word_t);
2784 dip->u.rti.table_data = p_unwind->p_vaddr + load_base;
2785
2786 return 0;
2787 }
2788
2789 /* Libunwind callback accessor function to acquire procedure unwind-info. */
2790 static int
2791 ia64_find_proc_info_x (unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
2792 int need_unwind_info, void *arg)
2793 {
2794 struct obj_section *sec = find_pc_section (ip);
2795 unw_dyn_info_t di;
2796 int ret;
2797 void *buf = NULL;
2798
2799 if (!sec)
2800 {
2801 /* XXX This only works if the host and the target architecture are
2802 both ia64 and if the have (more or less) the same kernel
2803 version. */
2804 if (get_kernel_table (ip, &di) < 0)
2805 return -UNW_ENOINFO;
2806
2807 if (gdbarch_debug >= 1)
2808 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2809 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2810 "length=%s,data=%s)\n",
2811 hex_string (ip), (char *)di.u.ti.name_ptr,
2812 hex_string (di.u.ti.segbase),
2813 hex_string (di.start_ip), hex_string (di.end_ip),
2814 hex_string (di.gp),
2815 pulongest (di.u.ti.table_len),
2816 hex_string ((CORE_ADDR)di.u.ti.table_data));
2817 }
2818 else
2819 {
2820 ret = ia64_find_unwind_table (sec->objfile, ip, &di, &buf);
2821 if (ret < 0)
2822 return ret;
2823
2824 if (gdbarch_debug >= 1)
2825 fprintf_unfiltered (gdb_stdlog, "ia64_find_proc_info_x: %s -> "
2826 "(name=`%s',segbase=%s,start=%s,end=%s,gp=%s,"
2827 "length=%s,data=%s)\n",
2828 hex_string (ip), (char *)di.u.rti.name_ptr,
2829 hex_string (di.u.rti.segbase),
2830 hex_string (di.start_ip), hex_string (di.end_ip),
2831 hex_string (di.gp),
2832 pulongest (di.u.rti.table_len),
2833 hex_string (di.u.rti.table_data));
2834 }
2835
2836 ret = libunwind_search_unwind_table (&as, ip, &di, pi, need_unwind_info,
2837 arg);
2838
2839 /* We no longer need the dyn info storage so free it. */
2840 xfree (buf);
2841
2842 return ret;
2843 }
2844
2845 /* Libunwind callback accessor function for cleanup. */
2846 static void
2847 ia64_put_unwind_info (unw_addr_space_t as,
2848 unw_proc_info_t *pip, void *arg)
2849 {
2850 /* Nothing required for now. */
2851 }
2852
2853 /* Libunwind callback accessor function to get head of the dynamic
2854 unwind-info registration list. */
2855 static int
2856 ia64_get_dyn_info_list (unw_addr_space_t as,
2857 unw_word_t *dilap, void *arg)
2858 {
2859 struct obj_section *text_sec;
2860 struct objfile *objfile;
2861 unw_word_t ip, addr;
2862 unw_dyn_info_t di;
2863 int ret;
2864
2865 if (!libunwind_is_initialized ())
2866 return -UNW_ENOINFO;
2867
2868 for (objfile = object_files; objfile; objfile = objfile->next)
2869 {
2870 void *buf = NULL;
2871
2872 text_sec = objfile->sections + SECT_OFF_TEXT (objfile);
2873 ip = obj_section_addr (text_sec);
2874 ret = ia64_find_unwind_table (objfile, ip, &di, &buf);
2875 if (ret >= 0)
2876 {
2877 addr = libunwind_find_dyn_list (as, &di, arg);
2878 /* We no longer need the dyn info storage so free it. */
2879 xfree (buf);
2880
2881 if (addr)
2882 {
2883 if (gdbarch_debug >= 1)
2884 fprintf_unfiltered (gdb_stdlog,
2885 "dynamic unwind table in objfile %s "
2886 "at %s (gp=%s)\n",
2887 bfd_get_filename (objfile->obfd),
2888 hex_string (addr), hex_string (di.gp));
2889 *dilap = addr;
2890 return 0;
2891 }
2892 }
2893 }
2894 return -UNW_ENOINFO;
2895 }
2896
2897
2898 /* Frame interface functions for libunwind. */
2899
2900 static void
2901 ia64_libunwind_frame_this_id (struct frame_info *this_frame, void **this_cache,
2902 struct frame_id *this_id)
2903 {
2904 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2905 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2906 struct frame_id id = outer_frame_id;
2907 gdb_byte buf[8];
2908 CORE_ADDR bsp;
2909
2910 libunwind_frame_this_id (this_frame, this_cache, &id);
2911 if (frame_id_eq (id, outer_frame_id))
2912 {
2913 (*this_id) = outer_frame_id;
2914 return;
2915 }
2916
2917 /* We must add the bsp as the special address for frame comparison
2918 purposes. */
2919 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
2920 bsp = extract_unsigned_integer (buf, 8, byte_order);
2921
2922 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
2923
2924 if (gdbarch_debug >= 1)
2925 fprintf_unfiltered (gdb_stdlog,
2926 "libunwind frame id: code %s, stack %s, "
2927 "special %s, this_frame %s\n",
2928 paddress (gdbarch, id.code_addr),
2929 paddress (gdbarch, id.stack_addr),
2930 paddress (gdbarch, bsp),
2931 host_address_to_string (this_frame));
2932 }
2933
2934 static struct value *
2935 ia64_libunwind_frame_prev_register (struct frame_info *this_frame,
2936 void **this_cache, int regnum)
2937 {
2938 int reg = regnum;
2939 struct gdbarch *gdbarch = get_frame_arch (this_frame);
2940 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2941 struct value *val;
2942
2943 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2944 reg = IA64_PR_REGNUM;
2945 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2946 reg = IA64_UNAT_REGNUM;
2947
2948 /* Let libunwind do most of the work. */
2949 val = libunwind_frame_prev_register (this_frame, this_cache, reg);
2950
2951 if (VP0_REGNUM <= regnum && regnum <= VP63_REGNUM)
2952 {
2953 ULONGEST prN_val;
2954
2955 if (VP16_REGNUM <= regnum && regnum <= VP63_REGNUM)
2956 {
2957 int rrb_pr = 0;
2958 ULONGEST cfm;
2959
2960 /* Fetch predicate register rename base from current frame
2961 marker for this frame. */
2962 cfm = get_frame_register_unsigned (this_frame, IA64_CFM_REGNUM);
2963 rrb_pr = (cfm >> 32) & 0x3f;
2964
2965 /* Adjust the register number to account for register rotation. */
2966 regnum = VP16_REGNUM + ((regnum - VP16_REGNUM) + rrb_pr) % 48;
2967 }
2968 prN_val = extract_bit_field (value_contents_all (val),
2969 regnum - VP0_REGNUM, 1);
2970 return frame_unwind_got_constant (this_frame, regnum, prN_val);
2971 }
2972
2973 else if (IA64_NAT0_REGNUM <= regnum && regnum <= IA64_NAT127_REGNUM)
2974 {
2975 ULONGEST unatN_val;
2976
2977 unatN_val = extract_bit_field (value_contents_all (val),
2978 regnum - IA64_NAT0_REGNUM, 1);
2979 return frame_unwind_got_constant (this_frame, regnum, unatN_val);
2980 }
2981
2982 else if (regnum == IA64_BSP_REGNUM)
2983 {
2984 struct value *cfm_val;
2985 CORE_ADDR prev_bsp, prev_cfm;
2986
2987 /* We want to calculate the previous bsp as the end of the previous
2988 register stack frame. This corresponds to what the hardware bsp
2989 register will be if we pop the frame back which is why we might
2990 have been called. We know that libunwind will pass us back the
2991 beginning of the current frame so we should just add sof to it. */
2992 prev_bsp = extract_unsigned_integer (value_contents_all (val),
2993 8, byte_order);
2994 cfm_val = libunwind_frame_prev_register (this_frame, this_cache,
2995 IA64_CFM_REGNUM);
2996 prev_cfm = extract_unsigned_integer (value_contents_all (cfm_val),
2997 8, byte_order);
2998 prev_bsp = rse_address_add (prev_bsp, (prev_cfm & 0x7f));
2999
3000 return frame_unwind_got_constant (this_frame, regnum, prev_bsp);
3001 }
3002 else
3003 return val;
3004 }
3005
3006 static int
3007 ia64_libunwind_frame_sniffer (const struct frame_unwind *self,
3008 struct frame_info *this_frame,
3009 void **this_cache)
3010 {
3011 if (libunwind_is_initialized ()
3012 && libunwind_frame_sniffer (self, this_frame, this_cache))
3013 return 1;
3014
3015 return 0;
3016 }
3017
3018 static const struct frame_unwind ia64_libunwind_frame_unwind =
3019 {
3020 NORMAL_FRAME,
3021 default_frame_unwind_stop_reason,
3022 ia64_libunwind_frame_this_id,
3023 ia64_libunwind_frame_prev_register,
3024 NULL,
3025 ia64_libunwind_frame_sniffer,
3026 libunwind_frame_dealloc_cache
3027 };
3028
3029 static void
3030 ia64_libunwind_sigtramp_frame_this_id (struct frame_info *this_frame,
3031 void **this_cache,
3032 struct frame_id *this_id)
3033 {
3034 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3035 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3036 gdb_byte buf[8];
3037 CORE_ADDR bsp;
3038 struct frame_id id = outer_frame_id;
3039 CORE_ADDR prev_ip;
3040
3041 libunwind_frame_this_id (this_frame, this_cache, &id);
3042 if (frame_id_eq (id, outer_frame_id))
3043 {
3044 (*this_id) = outer_frame_id;
3045 return;
3046 }
3047
3048 /* We must add the bsp as the special address for frame comparison
3049 purposes. */
3050 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3051 bsp = extract_unsigned_integer (buf, 8, byte_order);
3052
3053 /* For a sigtramp frame, we don't make the check for previous ip being 0. */
3054 (*this_id) = frame_id_build_special (id.stack_addr, id.code_addr, bsp);
3055
3056 if (gdbarch_debug >= 1)
3057 fprintf_unfiltered (gdb_stdlog,
3058 "libunwind sigtramp frame id: code %s, "
3059 "stack %s, special %s, this_frame %s\n",
3060 paddress (gdbarch, id.code_addr),
3061 paddress (gdbarch, id.stack_addr),
3062 paddress (gdbarch, bsp),
3063 host_address_to_string (this_frame));
3064 }
3065
3066 static struct value *
3067 ia64_libunwind_sigtramp_frame_prev_register (struct frame_info *this_frame,
3068 void **this_cache, int regnum)
3069 {
3070 struct gdbarch *gdbarch = get_frame_arch (this_frame);
3071 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3072 struct value *prev_ip_val;
3073 CORE_ADDR prev_ip;
3074
3075 /* If the previous frame pc value is 0, then we want to use the SIGCONTEXT
3076 method of getting previous registers. */
3077 prev_ip_val = libunwind_frame_prev_register (this_frame, this_cache,
3078 IA64_IP_REGNUM);
3079 prev_ip = extract_unsigned_integer (value_contents_all (prev_ip_val),
3080 8, byte_order);
3081
3082 if (prev_ip == 0)
3083 {
3084 void *tmp_cache = NULL;
3085 return ia64_sigtramp_frame_prev_register (this_frame, &tmp_cache,
3086 regnum);
3087 }
3088 else
3089 return ia64_libunwind_frame_prev_register (this_frame, this_cache, regnum);
3090 }
3091
3092 static int
3093 ia64_libunwind_sigtramp_frame_sniffer (const struct frame_unwind *self,
3094 struct frame_info *this_frame,
3095 void **this_cache)
3096 {
3097 if (libunwind_is_initialized ())
3098 {
3099 if (libunwind_sigtramp_frame_sniffer (self, this_frame, this_cache))
3100 return 1;
3101 return 0;
3102 }
3103 else
3104 return ia64_sigtramp_frame_sniffer (self, this_frame, this_cache);
3105 }
3106
3107 static const struct frame_unwind ia64_libunwind_sigtramp_frame_unwind =
3108 {
3109 SIGTRAMP_FRAME,
3110 default_frame_unwind_stop_reason,
3111 ia64_libunwind_sigtramp_frame_this_id,
3112 ia64_libunwind_sigtramp_frame_prev_register,
3113 NULL,
3114 ia64_libunwind_sigtramp_frame_sniffer
3115 };
3116
3117 /* Set of libunwind callback acccessor functions. */
3118 unw_accessors_t ia64_unw_accessors =
3119 {
3120 ia64_find_proc_info_x,
3121 ia64_put_unwind_info,
3122 ia64_get_dyn_info_list,
3123 ia64_access_mem,
3124 ia64_access_reg,
3125 ia64_access_fpreg,
3126 /* resume */
3127 /* get_proc_name */
3128 };
3129
3130 /* Set of special libunwind callback acccessor functions specific for accessing
3131 the rse registers. At the top of the stack, we want libunwind to figure out
3132 how to read r32 - r127. Though usually they are found sequentially in
3133 memory starting from $bof, this is not always true. */
3134 unw_accessors_t ia64_unw_rse_accessors =
3135 {
3136 ia64_find_proc_info_x,
3137 ia64_put_unwind_info,
3138 ia64_get_dyn_info_list,
3139 ia64_access_mem,
3140 ia64_access_rse_reg,
3141 ia64_access_rse_fpreg,
3142 /* resume */
3143 /* get_proc_name */
3144 };
3145
3146 /* Set of ia64-libunwind-tdep gdb callbacks and data for generic
3147 ia64-libunwind-tdep code to use. */
3148 struct libunwind_descr ia64_libunwind_descr =
3149 {
3150 ia64_gdb2uw_regnum,
3151 ia64_uw2gdb_regnum,
3152 ia64_is_fpreg,
3153 &ia64_unw_accessors,
3154 &ia64_unw_rse_accessors,
3155 };
3156
3157 #endif /* HAVE_LIBUNWIND_IA64_H */
3158
3159 static int
3160 ia64_use_struct_convention (struct type *type)
3161 {
3162 struct type *float_elt_type;
3163
3164 /* Don't use the struct convention for anything but structure,
3165 union, or array types. */
3166 if (!(TYPE_CODE (type) == TYPE_CODE_STRUCT
3167 || TYPE_CODE (type) == TYPE_CODE_UNION
3168 || TYPE_CODE (type) == TYPE_CODE_ARRAY))
3169 return 0;
3170
3171 /* HFAs are structures (or arrays) consisting entirely of floating
3172 point values of the same length. Up to 8 of these are returned
3173 in registers. Don't use the struct convention when this is the
3174 case. */
3175 float_elt_type = is_float_or_hfa_type (type);
3176 if (float_elt_type != NULL
3177 && TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type) <= 8)
3178 return 0;
3179
3180 /* Other structs of length 32 or less are returned in r8-r11.
3181 Don't use the struct convention for those either. */
3182 return TYPE_LENGTH (type) > 32;
3183 }
3184
3185 /* Return non-zero if TYPE is a structure or union type. */
3186
3187 static int
3188 ia64_struct_type_p (const struct type *type)
3189 {
3190 return (TYPE_CODE (type) == TYPE_CODE_STRUCT
3191 || TYPE_CODE (type) == TYPE_CODE_UNION);
3192 }
3193
3194 static void
3195 ia64_extract_return_value (struct type *type, struct regcache *regcache,
3196 gdb_byte *valbuf)
3197 {
3198 struct gdbarch *gdbarch = regcache->arch ();
3199 struct type *float_elt_type;
3200
3201 float_elt_type = is_float_or_hfa_type (type);
3202 if (float_elt_type != NULL)
3203 {
3204 gdb_byte from[IA64_FP_REGISTER_SIZE];
3205 int offset = 0;
3206 int regnum = IA64_FR8_REGNUM;
3207 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3208
3209 while (n-- > 0)
3210 {
3211 regcache_cooked_read (regcache, regnum, from);
3212 target_float_convert (from, ia64_ext_type (gdbarch),
3213 valbuf + offset, float_elt_type);
3214 offset += TYPE_LENGTH (float_elt_type);
3215 regnum++;
3216 }
3217 }
3218 else if (!ia64_struct_type_p (type) && TYPE_LENGTH (type) < 8)
3219 {
3220 /* This is an integral value, and its size is less than 8 bytes.
3221 These values are LSB-aligned, so extract the relevant bytes,
3222 and copy them into VALBUF. */
3223 /* brobecker/2005-12-30: Actually, all integral values are LSB aligned,
3224 so I suppose we should also add handling here for integral values
3225 whose size is greater than 8. But I wasn't able to create such
3226 a type, neither in C nor in Ada, so not worrying about these yet. */
3227 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3228 ULONGEST val;
3229
3230 regcache_cooked_read_unsigned (regcache, IA64_GR8_REGNUM, &val);
3231 store_unsigned_integer (valbuf, TYPE_LENGTH (type), byte_order, val);
3232 }
3233 else
3234 {
3235 ULONGEST val;
3236 int offset = 0;
3237 int regnum = IA64_GR8_REGNUM;
3238 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3239 int n = TYPE_LENGTH (type) / reglen;
3240 int m = TYPE_LENGTH (type) % reglen;
3241
3242 while (n-- > 0)
3243 {
3244 ULONGEST val;
3245 regcache_cooked_read_unsigned (regcache, regnum, &val);
3246 memcpy ((char *)valbuf + offset, &val, reglen);
3247 offset += reglen;
3248 regnum++;
3249 }
3250
3251 if (m)
3252 {
3253 regcache_cooked_read_unsigned (regcache, regnum, &val);
3254 memcpy ((char *)valbuf + offset, &val, m);
3255 }
3256 }
3257 }
3258
3259 static void
3260 ia64_store_return_value (struct type *type, struct regcache *regcache,
3261 const gdb_byte *valbuf)
3262 {
3263 struct gdbarch *gdbarch = regcache->arch ();
3264 struct type *float_elt_type;
3265
3266 float_elt_type = is_float_or_hfa_type (type);
3267 if (float_elt_type != NULL)
3268 {
3269 gdb_byte to[IA64_FP_REGISTER_SIZE];
3270 int offset = 0;
3271 int regnum = IA64_FR8_REGNUM;
3272 int n = TYPE_LENGTH (type) / TYPE_LENGTH (float_elt_type);
3273
3274 while (n-- > 0)
3275 {
3276 target_float_convert (valbuf + offset, float_elt_type,
3277 to, ia64_ext_type (gdbarch));
3278 regcache_cooked_write (regcache, regnum, to);
3279 offset += TYPE_LENGTH (float_elt_type);
3280 regnum++;
3281 }
3282 }
3283 else
3284 {
3285 ULONGEST val;
3286 int offset = 0;
3287 int regnum = IA64_GR8_REGNUM;
3288 int reglen = TYPE_LENGTH (register_type (gdbarch, IA64_GR8_REGNUM));
3289 int n = TYPE_LENGTH (type) / reglen;
3290 int m = TYPE_LENGTH (type) % reglen;
3291
3292 while (n-- > 0)
3293 {
3294 ULONGEST val;
3295 memcpy (&val, (char *)valbuf + offset, reglen);
3296 regcache_cooked_write_unsigned (regcache, regnum, val);
3297 offset += reglen;
3298 regnum++;
3299 }
3300
3301 if (m)
3302 {
3303 memcpy (&val, (char *)valbuf + offset, m);
3304 regcache_cooked_write_unsigned (regcache, regnum, val);
3305 }
3306 }
3307 }
3308
3309 static enum return_value_convention
3310 ia64_return_value (struct gdbarch *gdbarch, struct value *function,
3311 struct type *valtype, struct regcache *regcache,
3312 gdb_byte *readbuf, const gdb_byte *writebuf)
3313 {
3314 int struct_return = ia64_use_struct_convention (valtype);
3315
3316 if (writebuf != NULL)
3317 {
3318 gdb_assert (!struct_return);
3319 ia64_store_return_value (valtype, regcache, writebuf);
3320 }
3321
3322 if (readbuf != NULL)
3323 {
3324 gdb_assert (!struct_return);
3325 ia64_extract_return_value (valtype, regcache, readbuf);
3326 }
3327
3328 if (struct_return)
3329 return RETURN_VALUE_STRUCT_CONVENTION;
3330 else
3331 return RETURN_VALUE_REGISTER_CONVENTION;
3332 }
3333
3334 static int
3335 is_float_or_hfa_type_recurse (struct type *t, struct type **etp)
3336 {
3337 switch (TYPE_CODE (t))
3338 {
3339 case TYPE_CODE_FLT:
3340 if (*etp)
3341 return TYPE_LENGTH (*etp) == TYPE_LENGTH (t);
3342 else
3343 {
3344 *etp = t;
3345 return 1;
3346 }
3347 break;
3348 case TYPE_CODE_ARRAY:
3349 return
3350 is_float_or_hfa_type_recurse (check_typedef (TYPE_TARGET_TYPE (t)),
3351 etp);
3352 break;
3353 case TYPE_CODE_STRUCT:
3354 {
3355 int i;
3356
3357 for (i = 0; i < TYPE_NFIELDS (t); i++)
3358 if (!is_float_or_hfa_type_recurse
3359 (check_typedef (TYPE_FIELD_TYPE (t, i)), etp))
3360 return 0;
3361 return 1;
3362 }
3363 break;
3364 default:
3365 return 0;
3366 break;
3367 }
3368 }
3369
3370 /* Determine if the given type is one of the floating point types or
3371 and HFA (which is a struct, array, or combination thereof whose
3372 bottom-most elements are all of the same floating point type). */
3373
3374 static struct type *
3375 is_float_or_hfa_type (struct type *t)
3376 {
3377 struct type *et = 0;
3378
3379 return is_float_or_hfa_type_recurse (t, &et) ? et : 0;
3380 }
3381
3382
3383 /* Return 1 if the alignment of T is such that the next even slot
3384 should be used. Return 0, if the next available slot should
3385 be used. (See section 8.5.1 of the IA-64 Software Conventions
3386 and Runtime manual). */
3387
3388 static int
3389 slot_alignment_is_next_even (struct type *t)
3390 {
3391 switch (TYPE_CODE (t))
3392 {
3393 case TYPE_CODE_INT:
3394 case TYPE_CODE_FLT:
3395 if (TYPE_LENGTH (t) > 8)
3396 return 1;
3397 else
3398 return 0;
3399 case TYPE_CODE_ARRAY:
3400 return
3401 slot_alignment_is_next_even (check_typedef (TYPE_TARGET_TYPE (t)));
3402 case TYPE_CODE_STRUCT:
3403 {
3404 int i;
3405
3406 for (i = 0; i < TYPE_NFIELDS (t); i++)
3407 if (slot_alignment_is_next_even
3408 (check_typedef (TYPE_FIELD_TYPE (t, i))))
3409 return 1;
3410 return 0;
3411 }
3412 default:
3413 return 0;
3414 }
3415 }
3416
3417 /* Attempt to find (and return) the global pointer for the given
3418 function.
3419
3420 This is a rather nasty bit of code searchs for the .dynamic section
3421 in the objfile corresponding to the pc of the function we're trying
3422 to call. Once it finds the addresses at which the .dynamic section
3423 lives in the child process, it scans the Elf64_Dyn entries for a
3424 DT_PLTGOT tag. If it finds one of these, the corresponding
3425 d_un.d_ptr value is the global pointer. */
3426
3427 static CORE_ADDR
3428 ia64_find_global_pointer_from_dynamic_section (struct gdbarch *gdbarch,
3429 CORE_ADDR faddr)
3430 {
3431 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3432 struct obj_section *faddr_sect;
3433
3434 faddr_sect = find_pc_section (faddr);
3435 if (faddr_sect != NULL)
3436 {
3437 struct obj_section *osect;
3438
3439 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3440 {
3441 if (strcmp (osect->the_bfd_section->name, ".dynamic") == 0)
3442 break;
3443 }
3444
3445 if (osect < faddr_sect->objfile->sections_end)
3446 {
3447 CORE_ADDR addr, endaddr;
3448
3449 addr = obj_section_addr (osect);
3450 endaddr = obj_section_endaddr (osect);
3451
3452 while (addr < endaddr)
3453 {
3454 int status;
3455 LONGEST tag;
3456 gdb_byte buf[8];
3457
3458 status = target_read_memory (addr, buf, sizeof (buf));
3459 if (status != 0)
3460 break;
3461 tag = extract_signed_integer (buf, sizeof (buf), byte_order);
3462
3463 if (tag == DT_PLTGOT)
3464 {
3465 CORE_ADDR global_pointer;
3466
3467 status = target_read_memory (addr + 8, buf, sizeof (buf));
3468 if (status != 0)
3469 break;
3470 global_pointer = extract_unsigned_integer (buf, sizeof (buf),
3471 byte_order);
3472
3473 /* The payoff... */
3474 return global_pointer;
3475 }
3476
3477 if (tag == DT_NULL)
3478 break;
3479
3480 addr += 16;
3481 }
3482 }
3483 }
3484 return 0;
3485 }
3486
3487 /* Attempt to find (and return) the global pointer for the given
3488 function. We first try the find_global_pointer_from_solib routine
3489 from the gdbarch tdep vector, if provided. And if that does not
3490 work, then we try ia64_find_global_pointer_from_dynamic_section. */
3491
3492 static CORE_ADDR
3493 ia64_find_global_pointer (struct gdbarch *gdbarch, CORE_ADDR faddr)
3494 {
3495 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3496 CORE_ADDR addr = 0;
3497
3498 if (tdep->find_global_pointer_from_solib)
3499 addr = tdep->find_global_pointer_from_solib (gdbarch, faddr);
3500 if (addr == 0)
3501 addr = ia64_find_global_pointer_from_dynamic_section (gdbarch, faddr);
3502 return addr;
3503 }
3504
3505 /* Given a function's address, attempt to find (and return) the
3506 corresponding (canonical) function descriptor. Return 0 if
3507 not found. */
3508 static CORE_ADDR
3509 find_extant_func_descr (struct gdbarch *gdbarch, CORE_ADDR faddr)
3510 {
3511 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3512 struct obj_section *faddr_sect;
3513
3514 /* Return early if faddr is already a function descriptor. */
3515 faddr_sect = find_pc_section (faddr);
3516 if (faddr_sect && strcmp (faddr_sect->the_bfd_section->name, ".opd") == 0)
3517 return faddr;
3518
3519 if (faddr_sect != NULL)
3520 {
3521 struct obj_section *osect;
3522 ALL_OBJFILE_OSECTIONS (faddr_sect->objfile, osect)
3523 {
3524 if (strcmp (osect->the_bfd_section->name, ".opd") == 0)
3525 break;
3526 }
3527
3528 if (osect < faddr_sect->objfile->sections_end)
3529 {
3530 CORE_ADDR addr, endaddr;
3531
3532 addr = obj_section_addr (osect);
3533 endaddr = obj_section_endaddr (osect);
3534
3535 while (addr < endaddr)
3536 {
3537 int status;
3538 LONGEST faddr2;
3539 gdb_byte buf[8];
3540
3541 status = target_read_memory (addr, buf, sizeof (buf));
3542 if (status != 0)
3543 break;
3544 faddr2 = extract_signed_integer (buf, sizeof (buf), byte_order);
3545
3546 if (faddr == faddr2)
3547 return addr;
3548
3549 addr += 16;
3550 }
3551 }
3552 }
3553 return 0;
3554 }
3555
3556 /* Attempt to find a function descriptor corresponding to the
3557 given address. If none is found, construct one on the
3558 stack using the address at fdaptr. */
3559
3560 static CORE_ADDR
3561 find_func_descr (struct regcache *regcache, CORE_ADDR faddr, CORE_ADDR *fdaptr)
3562 {
3563 struct gdbarch *gdbarch = regcache->arch ();
3564 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3565 CORE_ADDR fdesc;
3566
3567 fdesc = find_extant_func_descr (gdbarch, faddr);
3568
3569 if (fdesc == 0)
3570 {
3571 ULONGEST global_pointer;
3572 gdb_byte buf[16];
3573
3574 fdesc = *fdaptr;
3575 *fdaptr += 16;
3576
3577 global_pointer = ia64_find_global_pointer (gdbarch, faddr);
3578
3579 if (global_pointer == 0)
3580 regcache_cooked_read_unsigned (regcache,
3581 IA64_GR1_REGNUM, &global_pointer);
3582
3583 store_unsigned_integer (buf, 8, byte_order, faddr);
3584 store_unsigned_integer (buf + 8, 8, byte_order, global_pointer);
3585
3586 write_memory (fdesc, buf, 16);
3587 }
3588
3589 return fdesc;
3590 }
3591
3592 /* Use the following routine when printing out function pointers
3593 so the user can see the function address rather than just the
3594 function descriptor. */
3595 static CORE_ADDR
3596 ia64_convert_from_func_ptr_addr (struct gdbarch *gdbarch, CORE_ADDR addr,
3597 struct target_ops *targ)
3598 {
3599 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3600 struct obj_section *s;
3601 gdb_byte buf[8];
3602
3603 s = find_pc_section (addr);
3604
3605 /* check if ADDR points to a function descriptor. */
3606 if (s && strcmp (s->the_bfd_section->name, ".opd") == 0)
3607 return read_memory_unsigned_integer (addr, 8, byte_order);
3608
3609 /* Normally, functions live inside a section that is executable.
3610 So, if ADDR points to a non-executable section, then treat it
3611 as a function descriptor and return the target address iff
3612 the target address itself points to a section that is executable.
3613 Check first the memory of the whole length of 8 bytes is readable. */
3614 if (s && (s->the_bfd_section->flags & SEC_CODE) == 0
3615 && target_read_memory (addr, buf, 8) == 0)
3616 {
3617 CORE_ADDR pc = extract_unsigned_integer (buf, 8, byte_order);
3618 struct obj_section *pc_section = find_pc_section (pc);
3619
3620 if (pc_section && (pc_section->the_bfd_section->flags & SEC_CODE))
3621 return pc;
3622 }
3623
3624 /* There are also descriptors embedded in vtables. */
3625 if (s)
3626 {
3627 struct bound_minimal_symbol minsym;
3628
3629 minsym = lookup_minimal_symbol_by_pc (addr);
3630
3631 if (minsym.minsym
3632 && is_vtable_name (MSYMBOL_LINKAGE_NAME (minsym.minsym)))
3633 return read_memory_unsigned_integer (addr, 8, byte_order);
3634 }
3635
3636 return addr;
3637 }
3638
3639 static CORE_ADDR
3640 ia64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
3641 {
3642 return sp & ~0xfLL;
3643 }
3644
3645 /* The default "allocate_new_rse_frame" ia64_infcall_ops routine for ia64. */
3646
3647 static void
3648 ia64_allocate_new_rse_frame (struct regcache *regcache, ULONGEST bsp, int sof)
3649 {
3650 ULONGEST cfm, pfs, new_bsp;
3651
3652 regcache_cooked_read_unsigned (regcache, IA64_CFM_REGNUM, &cfm);
3653
3654 new_bsp = rse_address_add (bsp, sof);
3655 regcache_cooked_write_unsigned (regcache, IA64_BSP_REGNUM, new_bsp);
3656
3657 regcache_cooked_read_unsigned (regcache, IA64_PFS_REGNUM, &pfs);
3658 pfs &= 0xc000000000000000LL;
3659 pfs |= (cfm & 0xffffffffffffLL);
3660 regcache_cooked_write_unsigned (regcache, IA64_PFS_REGNUM, pfs);
3661
3662 cfm &= 0xc000000000000000LL;
3663 cfm |= sof;
3664 regcache_cooked_write_unsigned (regcache, IA64_CFM_REGNUM, cfm);
3665 }
3666
3667 /* The default "store_argument_in_slot" ia64_infcall_ops routine for
3668 ia64. */
3669
3670 static void
3671 ia64_store_argument_in_slot (struct regcache *regcache, CORE_ADDR bsp,
3672 int slotnum, gdb_byte *buf)
3673 {
3674 write_memory (rse_address_add (bsp, slotnum), buf, 8);
3675 }
3676
3677 /* The default "set_function_addr" ia64_infcall_ops routine for ia64. */
3678
3679 static void
3680 ia64_set_function_addr (struct regcache *regcache, CORE_ADDR func_addr)
3681 {
3682 /* Nothing needed. */
3683 }
3684
3685 static CORE_ADDR
3686 ia64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
3687 struct regcache *regcache, CORE_ADDR bp_addr,
3688 int nargs, struct value **args, CORE_ADDR sp,
3689 int struct_return, CORE_ADDR struct_addr)
3690 {
3691 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
3692 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3693 int argno;
3694 struct value *arg;
3695 struct type *type;
3696 int len, argoffset;
3697 int nslots, rseslots, memslots, slotnum, nfuncargs;
3698 int floatreg;
3699 ULONGEST bsp;
3700 CORE_ADDR funcdescaddr, global_pointer;
3701 CORE_ADDR func_addr = find_function_addr (function, NULL);
3702
3703 nslots = 0;
3704 nfuncargs = 0;
3705 /* Count the number of slots needed for the arguments. */
3706 for (argno = 0; argno < nargs; argno++)
3707 {
3708 arg = args[argno];
3709 type = check_typedef (value_type (arg));
3710 len = TYPE_LENGTH (type);
3711
3712 if ((nslots & 1) && slot_alignment_is_next_even (type))
3713 nslots++;
3714
3715 if (TYPE_CODE (type) == TYPE_CODE_FUNC)
3716 nfuncargs++;
3717
3718 nslots += (len + 7) / 8;
3719 }
3720
3721 /* Divvy up the slots between the RSE and the memory stack. */
3722 rseslots = (nslots > 8) ? 8 : nslots;
3723 memslots = nslots - rseslots;
3724
3725 /* Allocate a new RSE frame. */
3726 regcache_cooked_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
3727 tdep->infcall_ops.allocate_new_rse_frame (regcache, bsp, rseslots);
3728
3729 /* We will attempt to find function descriptors in the .opd segment,
3730 but if we can't we'll construct them ourselves. That being the
3731 case, we'll need to reserve space on the stack for them. */
3732 funcdescaddr = sp - nfuncargs * 16;
3733 funcdescaddr &= ~0xfLL;
3734
3735 /* Adjust the stack pointer to it's new value. The calling conventions
3736 require us to have 16 bytes of scratch, plus whatever space is
3737 necessary for the memory slots and our function descriptors. */
3738 sp = sp - 16 - (memslots + nfuncargs) * 8;
3739 sp &= ~0xfLL; /* Maintain 16 byte alignment. */
3740
3741 /* Place the arguments where they belong. The arguments will be
3742 either placed in the RSE backing store or on the memory stack.
3743 In addition, floating point arguments or HFAs are placed in
3744 floating point registers. */
3745 slotnum = 0;
3746 floatreg = IA64_FR8_REGNUM;
3747 for (argno = 0; argno < nargs; argno++)
3748 {
3749 struct type *float_elt_type;
3750
3751 arg = args[argno];
3752 type = check_typedef (value_type (arg));
3753 len = TYPE_LENGTH (type);
3754
3755 /* Special handling for function parameters. */
3756 if (len == 8
3757 && TYPE_CODE (type) == TYPE_CODE_PTR
3758 && TYPE_CODE (TYPE_TARGET_TYPE (type)) == TYPE_CODE_FUNC)
3759 {
3760 gdb_byte val_buf[8];
3761 ULONGEST faddr = extract_unsigned_integer (value_contents (arg),
3762 8, byte_order);
3763 store_unsigned_integer (val_buf, 8, byte_order,
3764 find_func_descr (regcache, faddr,
3765 &funcdescaddr));
3766 if (slotnum < rseslots)
3767 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3768 slotnum, val_buf);
3769 else
3770 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3771 slotnum++;
3772 continue;
3773 }
3774
3775 /* Normal slots. */
3776
3777 /* Skip odd slot if necessary... */
3778 if ((slotnum & 1) && slot_alignment_is_next_even (type))
3779 slotnum++;
3780
3781 argoffset = 0;
3782 while (len > 0)
3783 {
3784 gdb_byte val_buf[8];
3785
3786 memset (val_buf, 0, 8);
3787 if (!ia64_struct_type_p (type) && len < 8)
3788 {
3789 /* Integral types are LSB-aligned, so we have to be careful
3790 to insert the argument on the correct side of the buffer.
3791 This is why we use store_unsigned_integer. */
3792 store_unsigned_integer
3793 (val_buf, 8, byte_order,
3794 extract_unsigned_integer (value_contents (arg), len,
3795 byte_order));
3796 }
3797 else
3798 {
3799 /* This is either an 8bit integral type, or an aggregate.
3800 For 8bit integral type, there is no problem, we just
3801 copy the value over.
3802
3803 For aggregates, the only potentially tricky portion
3804 is to write the last one if it is less than 8 bytes.
3805 In this case, the data is Byte0-aligned. Happy news,
3806 this means that we don't need to differentiate the
3807 handling of 8byte blocks and less-than-8bytes blocks. */
3808 memcpy (val_buf, value_contents (arg) + argoffset,
3809 (len > 8) ? 8 : len);
3810 }
3811
3812 if (slotnum < rseslots)
3813 tdep->infcall_ops.store_argument_in_slot (regcache, bsp,
3814 slotnum, val_buf);
3815 else
3816 write_memory (sp + 16 + 8 * (slotnum - rseslots), val_buf, 8);
3817
3818 argoffset += 8;
3819 len -= 8;
3820 slotnum++;
3821 }
3822
3823 /* Handle floating point types (including HFAs). */
3824 float_elt_type = is_float_or_hfa_type (type);
3825 if (float_elt_type != NULL)
3826 {
3827 argoffset = 0;
3828 len = TYPE_LENGTH (type);
3829 while (len > 0 && floatreg < IA64_FR16_REGNUM)
3830 {
3831 gdb_byte to[IA64_FP_REGISTER_SIZE];
3832 target_float_convert (value_contents (arg) + argoffset,
3833 float_elt_type, to,
3834 ia64_ext_type (gdbarch));
3835 regcache_cooked_write (regcache, floatreg, to);
3836 floatreg++;
3837 argoffset += TYPE_LENGTH (float_elt_type);
3838 len -= TYPE_LENGTH (float_elt_type);
3839 }
3840 }
3841 }
3842
3843 /* Store the struct return value in r8 if necessary. */
3844 if (struct_return)
3845 {
3846 regcache_cooked_write_unsigned (regcache, IA64_GR8_REGNUM,
3847 (ULONGEST) struct_addr);
3848 }
3849
3850 global_pointer = ia64_find_global_pointer (gdbarch, func_addr);
3851
3852 if (global_pointer != 0)
3853 regcache_cooked_write_unsigned (regcache, IA64_GR1_REGNUM, global_pointer);
3854
3855 /* The following is not necessary on HP-UX, because we're using
3856 a dummy code sequence pushed on the stack to make the call, and
3857 this sequence doesn't need b0 to be set in order for our dummy
3858 breakpoint to be hit. Nonetheless, this doesn't interfere, and
3859 it's needed for other OSes, so we do this unconditionaly. */
3860 regcache_cooked_write_unsigned (regcache, IA64_BR0_REGNUM, bp_addr);
3861
3862 regcache_cooked_write_unsigned (regcache, sp_regnum, sp);
3863
3864 tdep->infcall_ops.set_function_addr (regcache, func_addr);
3865
3866 return sp;
3867 }
3868
3869 static const struct ia64_infcall_ops ia64_infcall_ops =
3870 {
3871 ia64_allocate_new_rse_frame,
3872 ia64_store_argument_in_slot,
3873 ia64_set_function_addr
3874 };
3875
3876 static struct frame_id
3877 ia64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
3878 {
3879 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3880 gdb_byte buf[8];
3881 CORE_ADDR sp, bsp;
3882
3883 get_frame_register (this_frame, sp_regnum, buf);
3884 sp = extract_unsigned_integer (buf, 8, byte_order);
3885
3886 get_frame_register (this_frame, IA64_BSP_REGNUM, buf);
3887 bsp = extract_unsigned_integer (buf, 8, byte_order);
3888
3889 if (gdbarch_debug >= 1)
3890 fprintf_unfiltered (gdb_stdlog,
3891 "dummy frame id: code %s, stack %s, special %s\n",
3892 paddress (gdbarch, get_frame_pc (this_frame)),
3893 paddress (gdbarch, sp), paddress (gdbarch, bsp));
3894
3895 return frame_id_build_special (sp, get_frame_pc (this_frame), bsp);
3896 }
3897
3898 static CORE_ADDR
3899 ia64_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
3900 {
3901 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3902 gdb_byte buf[8];
3903 CORE_ADDR ip, psr, pc;
3904
3905 frame_unwind_register (next_frame, IA64_IP_REGNUM, buf);
3906 ip = extract_unsigned_integer (buf, 8, byte_order);
3907 frame_unwind_register (next_frame, IA64_PSR_REGNUM, buf);
3908 psr = extract_unsigned_integer (buf, 8, byte_order);
3909
3910 pc = (ip & ~0xf) | ((psr >> 41) & 3);
3911 return pc;
3912 }
3913
3914 static int
3915 ia64_print_insn (bfd_vma memaddr, struct disassemble_info *info)
3916 {
3917 info->bytes_per_line = SLOT_MULTIPLIER;
3918 return default_print_insn (memaddr, info);
3919 }
3920
3921 /* The default "size_of_register_frame" gdbarch_tdep routine for ia64. */
3922
3923 static int
3924 ia64_size_of_register_frame (struct frame_info *this_frame, ULONGEST cfm)
3925 {
3926 return (cfm & 0x7f);
3927 }
3928
3929 static struct gdbarch *
3930 ia64_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
3931 {
3932 struct gdbarch *gdbarch;
3933 struct gdbarch_tdep *tdep;
3934
3935 /* If there is already a candidate, use it. */
3936 arches = gdbarch_list_lookup_by_info (arches, &info);
3937 if (arches != NULL)
3938 return arches->gdbarch;
3939
3940 tdep = XCNEW (struct gdbarch_tdep);
3941 gdbarch = gdbarch_alloc (&info, tdep);
3942
3943 tdep->size_of_register_frame = ia64_size_of_register_frame;
3944
3945 /* According to the ia64 specs, instructions that store long double
3946 floats in memory use a long-double format different than that
3947 used in the floating registers. The memory format matches the
3948 x86 extended float format which is 80 bits. An OS may choose to
3949 use this format (e.g. GNU/Linux) or choose to use a different
3950 format for storing long doubles (e.g. HPUX). In the latter case,
3951 the setting of the format may be moved/overridden in an
3952 OS-specific tdep file. */
3953 set_gdbarch_long_double_format (gdbarch, floatformats_i387_ext);
3954
3955 set_gdbarch_short_bit (gdbarch, 16);
3956 set_gdbarch_int_bit (gdbarch, 32);
3957 set_gdbarch_long_bit (gdbarch, 64);
3958 set_gdbarch_long_long_bit (gdbarch, 64);
3959 set_gdbarch_float_bit (gdbarch, 32);
3960 set_gdbarch_double_bit (gdbarch, 64);
3961 set_gdbarch_long_double_bit (gdbarch, 128);
3962 set_gdbarch_ptr_bit (gdbarch, 64);
3963
3964 set_gdbarch_num_regs (gdbarch, NUM_IA64_RAW_REGS);
3965 set_gdbarch_num_pseudo_regs (gdbarch,
3966 LAST_PSEUDO_REGNUM - FIRST_PSEUDO_REGNUM);
3967 set_gdbarch_sp_regnum (gdbarch, sp_regnum);
3968 set_gdbarch_fp0_regnum (gdbarch, IA64_FR0_REGNUM);
3969
3970 set_gdbarch_register_name (gdbarch, ia64_register_name);
3971 set_gdbarch_register_type (gdbarch, ia64_register_type);
3972
3973 set_gdbarch_pseudo_register_read (gdbarch, ia64_pseudo_register_read);
3974 set_gdbarch_pseudo_register_write (gdbarch, ia64_pseudo_register_write);
3975 set_gdbarch_dwarf2_reg_to_regnum (gdbarch, ia64_dwarf_reg_to_regnum);
3976 set_gdbarch_register_reggroup_p (gdbarch, ia64_register_reggroup_p);
3977 set_gdbarch_convert_register_p (gdbarch, ia64_convert_register_p);
3978 set_gdbarch_register_to_value (gdbarch, ia64_register_to_value);
3979 set_gdbarch_value_to_register (gdbarch, ia64_value_to_register);
3980
3981 set_gdbarch_skip_prologue (gdbarch, ia64_skip_prologue);
3982
3983 set_gdbarch_return_value (gdbarch, ia64_return_value);
3984
3985 set_gdbarch_memory_insert_breakpoint (gdbarch,
3986 ia64_memory_insert_breakpoint);
3987 set_gdbarch_memory_remove_breakpoint (gdbarch,
3988 ia64_memory_remove_breakpoint);
3989 set_gdbarch_breakpoint_from_pc (gdbarch, ia64_breakpoint_from_pc);
3990 set_gdbarch_breakpoint_kind_from_pc (gdbarch, ia64_breakpoint_kind_from_pc);
3991 set_gdbarch_read_pc (gdbarch, ia64_read_pc);
3992 set_gdbarch_write_pc (gdbarch, ia64_write_pc);
3993
3994 /* Settings for calling functions in the inferior. */
3995 set_gdbarch_push_dummy_call (gdbarch, ia64_push_dummy_call);
3996 tdep->infcall_ops = ia64_infcall_ops;
3997 set_gdbarch_frame_align (gdbarch, ia64_frame_align);
3998 set_gdbarch_dummy_id (gdbarch, ia64_dummy_id);
3999
4000 set_gdbarch_unwind_pc (gdbarch, ia64_unwind_pc);
4001 #ifdef HAVE_LIBUNWIND_IA64_H
4002 frame_unwind_append_unwinder (gdbarch,
4003 &ia64_libunwind_sigtramp_frame_unwind);
4004 frame_unwind_append_unwinder (gdbarch, &ia64_libunwind_frame_unwind);
4005 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
4006 libunwind_frame_set_descr (gdbarch, &ia64_libunwind_descr);
4007 #else
4008 frame_unwind_append_unwinder (gdbarch, &ia64_sigtramp_frame_unwind);
4009 #endif
4010 frame_unwind_append_unwinder (gdbarch, &ia64_frame_unwind);
4011 frame_base_set_default (gdbarch, &ia64_frame_base);
4012
4013 /* Settings that should be unnecessary. */
4014 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
4015
4016 set_gdbarch_print_insn (gdbarch, ia64_print_insn);
4017 set_gdbarch_convert_from_func_ptr_addr (gdbarch,
4018 ia64_convert_from_func_ptr_addr);
4019
4020 /* The virtual table contains 16-byte descriptors, not pointers to
4021 descriptors. */
4022 set_gdbarch_vtable_function_descriptors (gdbarch, 1);
4023
4024 /* Hook in ABI-specific overrides, if they have been registered. */
4025 gdbarch_init_osabi (info, gdbarch);
4026
4027 return gdbarch;
4028 }
4029
4030 void
4031 _initialize_ia64_tdep (void)
4032 {
4033 gdbarch_register (bfd_arch_ia64, ia64_gdbarch_init, NULL);
4034 }