]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/spu-tdep.c
daily update
[thirdparty/binutils-gdb.git] / gdb / spu-tdep.c
1 /* SPU target-dependent code for GDB, the GNU debugger.
2 Copyright (C) 2006, 2007, 2008 Free Software Foundation, Inc.
3
4 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
5 Based on a port by Sid Manning <sid@us.ibm.com>.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "arch-utils.h"
24 #include "gdbtypes.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include "gdb_string.h"
28 #include "gdb_assert.h"
29 #include "frame.h"
30 #include "frame-unwind.h"
31 #include "frame-base.h"
32 #include "trad-frame.h"
33 #include "symtab.h"
34 #include "symfile.h"
35 #include "value.h"
36 #include "inferior.h"
37 #include "dis-asm.h"
38 #include "objfiles.h"
39 #include "language.h"
40 #include "regcache.h"
41 #include "reggroups.h"
42 #include "floatformat.h"
43 #include "observer.h"
44
45 #include "spu-tdep.h"
46
47
48 /* The tdep structure. */
49 struct gdbarch_tdep
50 {
51 /* SPU-specific vector type. */
52 struct type *spu_builtin_type_vec128;
53 };
54
55
56 /* SPU-specific vector type. */
57 static struct type *
58 spu_builtin_type_vec128 (struct gdbarch *gdbarch)
59 {
60 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
61
62 if (!tdep->spu_builtin_type_vec128)
63 {
64 struct type *t;
65
66 t = init_composite_type ("__spu_builtin_type_vec128", TYPE_CODE_UNION);
67 append_composite_type_field (t, "uint128", builtin_type_int128);
68 append_composite_type_field (t, "v2_int64",
69 init_vector_type (builtin_type_int64, 2));
70 append_composite_type_field (t, "v4_int32",
71 init_vector_type (builtin_type_int32, 4));
72 append_composite_type_field (t, "v8_int16",
73 init_vector_type (builtin_type_int16, 8));
74 append_composite_type_field (t, "v16_int8",
75 init_vector_type (builtin_type_int8, 16));
76 append_composite_type_field (t, "v2_double",
77 init_vector_type (builtin_type (gdbarch)
78 ->builtin_double, 2));
79 append_composite_type_field (t, "v4_float",
80 init_vector_type (builtin_type (gdbarch)
81 ->builtin_float, 4));
82
83 TYPE_VECTOR (t) = 1;
84 TYPE_NAME (t) = "spu_builtin_type_vec128";
85
86 tdep->spu_builtin_type_vec128 = t;
87 }
88
89 return tdep->spu_builtin_type_vec128;
90 }
91
92
93 /* The list of available "info spu " commands. */
94 static struct cmd_list_element *infospucmdlist = NULL;
95
96 /* Registers. */
97
98 static const char *
99 spu_register_name (struct gdbarch *gdbarch, int reg_nr)
100 {
101 static char *register_names[] =
102 {
103 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
104 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
105 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
106 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
107 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
108 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
109 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
110 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
111 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
112 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
113 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
114 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
115 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
116 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
117 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
118 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
119 "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status"
120 };
121
122 if (reg_nr < 0)
123 return NULL;
124 if (reg_nr >= sizeof register_names / sizeof *register_names)
125 return NULL;
126
127 return register_names[reg_nr];
128 }
129
130 static struct type *
131 spu_register_type (struct gdbarch *gdbarch, int reg_nr)
132 {
133 if (reg_nr < SPU_NUM_GPRS)
134 return spu_builtin_type_vec128 (gdbarch);
135
136 switch (reg_nr)
137 {
138 case SPU_ID_REGNUM:
139 return builtin_type_uint32;
140
141 case SPU_PC_REGNUM:
142 return builtin_type (gdbarch)->builtin_func_ptr;
143
144 case SPU_SP_REGNUM:
145 return builtin_type (gdbarch)->builtin_data_ptr;
146
147 case SPU_FPSCR_REGNUM:
148 return builtin_type_uint128;
149
150 case SPU_SRR0_REGNUM:
151 return builtin_type_uint32;
152
153 case SPU_LSLR_REGNUM:
154 return builtin_type_uint32;
155
156 case SPU_DECR_REGNUM:
157 return builtin_type_uint32;
158
159 case SPU_DECR_STATUS_REGNUM:
160 return builtin_type_uint32;
161
162 default:
163 internal_error (__FILE__, __LINE__, "invalid regnum");
164 }
165 }
166
167 /* Pseudo registers for preferred slots - stack pointer. */
168
169 static void
170 spu_pseudo_register_read_spu (struct regcache *regcache, const char *regname,
171 gdb_byte *buf)
172 {
173 gdb_byte reg[32];
174 char annex[32];
175 ULONGEST id;
176
177 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
178 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
179 memset (reg, 0, sizeof reg);
180 target_read (&current_target, TARGET_OBJECT_SPU, annex,
181 reg, 0, sizeof reg);
182
183 store_unsigned_integer (buf, 4, strtoulst (reg, NULL, 16));
184 }
185
186 static void
187 spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
188 int regnum, gdb_byte *buf)
189 {
190 gdb_byte reg[16];
191 char annex[32];
192 ULONGEST id;
193
194 switch (regnum)
195 {
196 case SPU_SP_REGNUM:
197 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
198 memcpy (buf, reg, 4);
199 break;
200
201 case SPU_FPSCR_REGNUM:
202 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
203 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
204 target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
205 break;
206
207 case SPU_SRR0_REGNUM:
208 spu_pseudo_register_read_spu (regcache, "srr0", buf);
209 break;
210
211 case SPU_LSLR_REGNUM:
212 spu_pseudo_register_read_spu (regcache, "lslr", buf);
213 break;
214
215 case SPU_DECR_REGNUM:
216 spu_pseudo_register_read_spu (regcache, "decr", buf);
217 break;
218
219 case SPU_DECR_STATUS_REGNUM:
220 spu_pseudo_register_read_spu (regcache, "decr_status", buf);
221 break;
222
223 default:
224 internal_error (__FILE__, __LINE__, _("invalid regnum"));
225 }
226 }
227
228 static void
229 spu_pseudo_register_write_spu (struct regcache *regcache, const char *regname,
230 const gdb_byte *buf)
231 {
232 gdb_byte reg[32];
233 char annex[32];
234 ULONGEST id;
235
236 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
237 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
238 xsnprintf (reg, sizeof reg, "0x%s",
239 phex_nz (extract_unsigned_integer (buf, 4), 4));
240 target_write (&current_target, TARGET_OBJECT_SPU, annex,
241 reg, 0, strlen (reg));
242 }
243
244 static void
245 spu_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
246 int regnum, const gdb_byte *buf)
247 {
248 gdb_byte reg[16];
249 char annex[32];
250 ULONGEST id;
251
252 switch (regnum)
253 {
254 case SPU_SP_REGNUM:
255 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
256 memcpy (reg, buf, 4);
257 regcache_raw_write (regcache, SPU_RAW_SP_REGNUM, reg);
258 break;
259
260 case SPU_FPSCR_REGNUM:
261 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
262 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
263 target_write (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
264 break;
265
266 case SPU_SRR0_REGNUM:
267 spu_pseudo_register_write_spu (regcache, "srr0", buf);
268 break;
269
270 case SPU_LSLR_REGNUM:
271 spu_pseudo_register_write_spu (regcache, "lslr", buf);
272 break;
273
274 case SPU_DECR_REGNUM:
275 spu_pseudo_register_write_spu (regcache, "decr", buf);
276 break;
277
278 case SPU_DECR_STATUS_REGNUM:
279 spu_pseudo_register_write_spu (regcache, "decr_status", buf);
280 break;
281
282 default:
283 internal_error (__FILE__, __LINE__, _("invalid regnum"));
284 }
285 }
286
287 /* Value conversion -- access scalar values at the preferred slot. */
288
289 static struct value *
290 spu_value_from_register (struct type *type, int regnum,
291 struct frame_info *frame)
292 {
293 struct value *value = default_value_from_register (type, regnum, frame);
294 int len = TYPE_LENGTH (type);
295
296 if (regnum < SPU_NUM_GPRS && len < 16)
297 {
298 int preferred_slot = len < 4 ? 4 - len : 0;
299 set_value_offset (value, preferred_slot);
300 }
301
302 return value;
303 }
304
305 /* Register groups. */
306
307 static int
308 spu_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
309 struct reggroup *group)
310 {
311 /* Registers displayed via 'info regs'. */
312 if (group == general_reggroup)
313 return 1;
314
315 /* Registers displayed via 'info float'. */
316 if (group == float_reggroup)
317 return 0;
318
319 /* Registers that need to be saved/restored in order to
320 push or pop frames. */
321 if (group == save_reggroup || group == restore_reggroup)
322 return 1;
323
324 return default_register_reggroup_p (gdbarch, regnum, group);
325 }
326
327 /* Address conversion. */
328
329 static CORE_ADDR
330 spu_pointer_to_address (struct type *type, const gdb_byte *buf)
331 {
332 ULONGEST addr = extract_unsigned_integer (buf, TYPE_LENGTH (type));
333 ULONGEST lslr = SPU_LS_SIZE - 1; /* Hard-wired LS size. */
334
335 if (target_has_registers && target_has_stack && target_has_memory)
336 lslr = get_frame_register_unsigned (get_selected_frame (NULL),
337 SPU_LSLR_REGNUM);
338
339 return addr & lslr;
340 }
341
342 static CORE_ADDR
343 spu_integer_to_address (struct gdbarch *gdbarch,
344 struct type *type, const gdb_byte *buf)
345 {
346 ULONGEST addr = unpack_long (type, buf);
347 ULONGEST lslr = SPU_LS_SIZE - 1; /* Hard-wired LS size. */
348
349 if (target_has_registers && target_has_stack && target_has_memory)
350 lslr = get_frame_register_unsigned (get_selected_frame (NULL),
351 SPU_LSLR_REGNUM);
352
353 return addr & lslr;
354 }
355
356
357 /* Decoding SPU instructions. */
358
359 enum
360 {
361 op_lqd = 0x34,
362 op_lqx = 0x3c4,
363 op_lqa = 0x61,
364 op_lqr = 0x67,
365 op_stqd = 0x24,
366 op_stqx = 0x144,
367 op_stqa = 0x41,
368 op_stqr = 0x47,
369
370 op_il = 0x081,
371 op_ila = 0x21,
372 op_a = 0x0c0,
373 op_ai = 0x1c,
374
375 op_selb = 0x4,
376
377 op_br = 0x64,
378 op_bra = 0x60,
379 op_brsl = 0x66,
380 op_brasl = 0x62,
381 op_brnz = 0x42,
382 op_brz = 0x40,
383 op_brhnz = 0x46,
384 op_brhz = 0x44,
385 op_bi = 0x1a8,
386 op_bisl = 0x1a9,
387 op_biz = 0x128,
388 op_binz = 0x129,
389 op_bihz = 0x12a,
390 op_bihnz = 0x12b,
391 };
392
393 static int
394 is_rr (unsigned int insn, int op, int *rt, int *ra, int *rb)
395 {
396 if ((insn >> 21) == op)
397 {
398 *rt = insn & 127;
399 *ra = (insn >> 7) & 127;
400 *rb = (insn >> 14) & 127;
401 return 1;
402 }
403
404 return 0;
405 }
406
407 static int
408 is_rrr (unsigned int insn, int op, int *rt, int *ra, int *rb, int *rc)
409 {
410 if ((insn >> 28) == op)
411 {
412 *rt = (insn >> 21) & 127;
413 *ra = (insn >> 7) & 127;
414 *rb = (insn >> 14) & 127;
415 *rc = insn & 127;
416 return 1;
417 }
418
419 return 0;
420 }
421
422 static int
423 is_ri7 (unsigned int insn, int op, int *rt, int *ra, int *i7)
424 {
425 if ((insn >> 21) == op)
426 {
427 *rt = insn & 127;
428 *ra = (insn >> 7) & 127;
429 *i7 = (((insn >> 14) & 127) ^ 0x40) - 0x40;
430 return 1;
431 }
432
433 return 0;
434 }
435
436 static int
437 is_ri10 (unsigned int insn, int op, int *rt, int *ra, int *i10)
438 {
439 if ((insn >> 24) == op)
440 {
441 *rt = insn & 127;
442 *ra = (insn >> 7) & 127;
443 *i10 = (((insn >> 14) & 0x3ff) ^ 0x200) - 0x200;
444 return 1;
445 }
446
447 return 0;
448 }
449
450 static int
451 is_ri16 (unsigned int insn, int op, int *rt, int *i16)
452 {
453 if ((insn >> 23) == op)
454 {
455 *rt = insn & 127;
456 *i16 = (((insn >> 7) & 0xffff) ^ 0x8000) - 0x8000;
457 return 1;
458 }
459
460 return 0;
461 }
462
463 static int
464 is_ri18 (unsigned int insn, int op, int *rt, int *i18)
465 {
466 if ((insn >> 25) == op)
467 {
468 *rt = insn & 127;
469 *i18 = (((insn >> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
470 return 1;
471 }
472
473 return 0;
474 }
475
476 static int
477 is_branch (unsigned int insn, int *offset, int *reg)
478 {
479 int rt, i7, i16;
480
481 if (is_ri16 (insn, op_br, &rt, &i16)
482 || is_ri16 (insn, op_brsl, &rt, &i16)
483 || is_ri16 (insn, op_brnz, &rt, &i16)
484 || is_ri16 (insn, op_brz, &rt, &i16)
485 || is_ri16 (insn, op_brhnz, &rt, &i16)
486 || is_ri16 (insn, op_brhz, &rt, &i16))
487 {
488 *reg = SPU_PC_REGNUM;
489 *offset = i16 << 2;
490 return 1;
491 }
492
493 if (is_ri16 (insn, op_bra, &rt, &i16)
494 || is_ri16 (insn, op_brasl, &rt, &i16))
495 {
496 *reg = -1;
497 *offset = i16 << 2;
498 return 1;
499 }
500
501 if (is_ri7 (insn, op_bi, &rt, reg, &i7)
502 || is_ri7 (insn, op_bisl, &rt, reg, &i7)
503 || is_ri7 (insn, op_biz, &rt, reg, &i7)
504 || is_ri7 (insn, op_binz, &rt, reg, &i7)
505 || is_ri7 (insn, op_bihz, &rt, reg, &i7)
506 || is_ri7 (insn, op_bihnz, &rt, reg, &i7))
507 {
508 *offset = 0;
509 return 1;
510 }
511
512 return 0;
513 }
514
515
516 /* Prolog parsing. */
517
518 struct spu_prologue_data
519 {
520 /* Stack frame size. -1 if analysis was unsuccessful. */
521 int size;
522
523 /* How to find the CFA. The CFA is equal to SP at function entry. */
524 int cfa_reg;
525 int cfa_offset;
526
527 /* Offset relative to CFA where a register is saved. -1 if invalid. */
528 int reg_offset[SPU_NUM_GPRS];
529 };
530
531 static CORE_ADDR
532 spu_analyze_prologue (CORE_ADDR start_pc, CORE_ADDR end_pc,
533 struct spu_prologue_data *data)
534 {
535 int found_sp = 0;
536 int found_fp = 0;
537 int found_lr = 0;
538 int reg_immed[SPU_NUM_GPRS];
539 gdb_byte buf[16];
540 CORE_ADDR prolog_pc = start_pc;
541 CORE_ADDR pc;
542 int i;
543
544
545 /* Initialize DATA to default values. */
546 data->size = -1;
547
548 data->cfa_reg = SPU_RAW_SP_REGNUM;
549 data->cfa_offset = 0;
550
551 for (i = 0; i < SPU_NUM_GPRS; i++)
552 data->reg_offset[i] = -1;
553
554 /* Set up REG_IMMED array. This is non-zero for a register if we know its
555 preferred slot currently holds this immediate value. */
556 for (i = 0; i < SPU_NUM_GPRS; i++)
557 reg_immed[i] = 0;
558
559 /* Scan instructions until the first branch.
560
561 The following instructions are important prolog components:
562
563 - The first instruction to set up the stack pointer.
564 - The first instruction to set up the frame pointer.
565 - The first instruction to save the link register.
566
567 We return the instruction after the latest of these three,
568 or the incoming PC if none is found. The first instruction
569 to set up the stack pointer also defines the frame size.
570
571 Note that instructions saving incoming arguments to their stack
572 slots are not counted as important, because they are hard to
573 identify with certainty. This should not matter much, because
574 arguments are relevant only in code compiled with debug data,
575 and in such code the GDB core will advance until the first source
576 line anyway, using SAL data.
577
578 For purposes of stack unwinding, we analyze the following types
579 of instructions in addition:
580
581 - Any instruction adding to the current frame pointer.
582 - Any instruction loading an immediate constant into a register.
583 - Any instruction storing a register onto the stack.
584
585 These are used to compute the CFA and REG_OFFSET output. */
586
587 for (pc = start_pc; pc < end_pc; pc += 4)
588 {
589 unsigned int insn;
590 int rt, ra, rb, rc, immed;
591
592 if (target_read_memory (pc, buf, 4))
593 break;
594 insn = extract_unsigned_integer (buf, 4);
595
596 /* AI is the typical instruction to set up a stack frame.
597 It is also used to initialize the frame pointer. */
598 if (is_ri10 (insn, op_ai, &rt, &ra, &immed))
599 {
600 if (rt == data->cfa_reg && ra == data->cfa_reg)
601 data->cfa_offset -= immed;
602
603 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
604 && !found_sp)
605 {
606 found_sp = 1;
607 prolog_pc = pc + 4;
608
609 data->size = -immed;
610 }
611 else if (rt == SPU_FP_REGNUM && ra == SPU_RAW_SP_REGNUM
612 && !found_fp)
613 {
614 found_fp = 1;
615 prolog_pc = pc + 4;
616
617 data->cfa_reg = SPU_FP_REGNUM;
618 data->cfa_offset -= immed;
619 }
620 }
621
622 /* A is used to set up stack frames of size >= 512 bytes.
623 If we have tracked the contents of the addend register,
624 we can handle this as well. */
625 else if (is_rr (insn, op_a, &rt, &ra, &rb))
626 {
627 if (rt == data->cfa_reg && ra == data->cfa_reg)
628 {
629 if (reg_immed[rb] != 0)
630 data->cfa_offset -= reg_immed[rb];
631 else
632 data->cfa_reg = -1; /* We don't know the CFA any more. */
633 }
634
635 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
636 && !found_sp)
637 {
638 found_sp = 1;
639 prolog_pc = pc + 4;
640
641 if (reg_immed[rb] != 0)
642 data->size = -reg_immed[rb];
643 }
644 }
645
646 /* We need to track IL and ILA used to load immediate constants
647 in case they are later used as input to an A instruction. */
648 else if (is_ri16 (insn, op_il, &rt, &immed))
649 {
650 reg_immed[rt] = immed;
651
652 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
653 found_sp = 1;
654 }
655
656 else if (is_ri18 (insn, op_ila, &rt, &immed))
657 {
658 reg_immed[rt] = immed & 0x3ffff;
659
660 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
661 found_sp = 1;
662 }
663
664 /* STQD is used to save registers to the stack. */
665 else if (is_ri10 (insn, op_stqd, &rt, &ra, &immed))
666 {
667 if (ra == data->cfa_reg)
668 data->reg_offset[rt] = data->cfa_offset - (immed << 4);
669
670 if (ra == data->cfa_reg && rt == SPU_LR_REGNUM
671 && !found_lr)
672 {
673 found_lr = 1;
674 prolog_pc = pc + 4;
675 }
676 }
677
678 /* _start uses SELB to set up the stack pointer. */
679 else if (is_rrr (insn, op_selb, &rt, &ra, &rb, &rc))
680 {
681 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
682 found_sp = 1;
683 }
684
685 /* We terminate if we find a branch. */
686 else if (is_branch (insn, &immed, &ra))
687 break;
688 }
689
690
691 /* If we successfully parsed until here, and didn't find any instruction
692 modifying SP, we assume we have a frameless function. */
693 if (!found_sp)
694 data->size = 0;
695
696 /* Return cooked instead of raw SP. */
697 if (data->cfa_reg == SPU_RAW_SP_REGNUM)
698 data->cfa_reg = SPU_SP_REGNUM;
699
700 return prolog_pc;
701 }
702
703 /* Return the first instruction after the prologue starting at PC. */
704 static CORE_ADDR
705 spu_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
706 {
707 struct spu_prologue_data data;
708 return spu_analyze_prologue (pc, (CORE_ADDR)-1, &data);
709 }
710
711 /* Return the frame pointer in use at address PC. */
712 static void
713 spu_virtual_frame_pointer (struct gdbarch *gdbarch, CORE_ADDR pc,
714 int *reg, LONGEST *offset)
715 {
716 struct spu_prologue_data data;
717 spu_analyze_prologue (pc, (CORE_ADDR)-1, &data);
718
719 if (data.size != -1 && data.cfa_reg != -1)
720 {
721 /* The 'frame pointer' address is CFA minus frame size. */
722 *reg = data.cfa_reg;
723 *offset = data.cfa_offset - data.size;
724 }
725 else
726 {
727 /* ??? We don't really know ... */
728 *reg = SPU_SP_REGNUM;
729 *offset = 0;
730 }
731 }
732
733 /* Return true if we are in the function's epilogue, i.e. after the
734 instruction that destroyed the function's stack frame.
735
736 1) scan forward from the point of execution:
737 a) If you find an instruction that modifies the stack pointer
738 or transfers control (except a return), execution is not in
739 an epilogue, return.
740 b) Stop scanning if you find a return instruction or reach the
741 end of the function or reach the hard limit for the size of
742 an epilogue.
743 2) scan backward from the point of execution:
744 a) If you find an instruction that modifies the stack pointer,
745 execution *is* in an epilogue, return.
746 b) Stop scanning if you reach an instruction that transfers
747 control or the beginning of the function or reach the hard
748 limit for the size of an epilogue. */
749
750 static int
751 spu_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
752 {
753 CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end;
754 bfd_byte buf[4];
755 unsigned int insn;
756 int rt, ra, rb, rc, immed;
757
758 /* Find the search limits based on function boundaries and hard limit.
759 We assume the epilogue can be up to 64 instructions long. */
760
761 const int spu_max_epilogue_size = 64 * 4;
762
763 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
764 return 0;
765
766 if (pc - func_start < spu_max_epilogue_size)
767 epilogue_start = func_start;
768 else
769 epilogue_start = pc - spu_max_epilogue_size;
770
771 if (func_end - pc < spu_max_epilogue_size)
772 epilogue_end = func_end;
773 else
774 epilogue_end = pc + spu_max_epilogue_size;
775
776 /* Scan forward until next 'bi $0'. */
777
778 for (scan_pc = pc; scan_pc < epilogue_end; scan_pc += 4)
779 {
780 if (target_read_memory (scan_pc, buf, 4))
781 return 0;
782 insn = extract_unsigned_integer (buf, 4);
783
784 if (is_branch (insn, &immed, &ra))
785 {
786 if (immed == 0 && ra == SPU_LR_REGNUM)
787 break;
788
789 return 0;
790 }
791
792 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
793 || is_rr (insn, op_a, &rt, &ra, &rb)
794 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
795 {
796 if (rt == SPU_RAW_SP_REGNUM)
797 return 0;
798 }
799 }
800
801 if (scan_pc >= epilogue_end)
802 return 0;
803
804 /* Scan backward until adjustment to stack pointer (R1). */
805
806 for (scan_pc = pc - 4; scan_pc >= epilogue_start; scan_pc -= 4)
807 {
808 if (target_read_memory (scan_pc, buf, 4))
809 return 0;
810 insn = extract_unsigned_integer (buf, 4);
811
812 if (is_branch (insn, &immed, &ra))
813 return 0;
814
815 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
816 || is_rr (insn, op_a, &rt, &ra, &rb)
817 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
818 {
819 if (rt == SPU_RAW_SP_REGNUM)
820 return 1;
821 }
822 }
823
824 return 0;
825 }
826
827
828 /* Normal stack frames. */
829
830 struct spu_unwind_cache
831 {
832 CORE_ADDR func;
833 CORE_ADDR frame_base;
834 CORE_ADDR local_base;
835
836 struct trad_frame_saved_reg *saved_regs;
837 };
838
839 static struct spu_unwind_cache *
840 spu_frame_unwind_cache (struct frame_info *this_frame,
841 void **this_prologue_cache)
842 {
843 struct spu_unwind_cache *info;
844 struct spu_prologue_data data;
845 gdb_byte buf[16];
846
847 if (*this_prologue_cache)
848 return *this_prologue_cache;
849
850 info = FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache);
851 *this_prologue_cache = info;
852 info->saved_regs = trad_frame_alloc_saved_regs (this_frame);
853 info->frame_base = 0;
854 info->local_base = 0;
855
856 /* Find the start of the current function, and analyze its prologue. */
857 info->func = get_frame_func (this_frame);
858 if (info->func == 0)
859 {
860 /* Fall back to using the current PC as frame ID. */
861 info->func = get_frame_pc (this_frame);
862 data.size = -1;
863 }
864 else
865 spu_analyze_prologue (info->func, get_frame_pc (this_frame), &data);
866
867
868 /* If successful, use prologue analysis data. */
869 if (data.size != -1 && data.cfa_reg != -1)
870 {
871 CORE_ADDR cfa;
872 int i;
873
874 /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */
875 get_frame_register (this_frame, data.cfa_reg, buf);
876 cfa = extract_unsigned_integer (buf, 4) + data.cfa_offset;
877
878 /* Call-saved register slots. */
879 for (i = 0; i < SPU_NUM_GPRS; i++)
880 if (i == SPU_LR_REGNUM
881 || (i >= SPU_SAVED1_REGNUM && i <= SPU_SAVEDN_REGNUM))
882 if (data.reg_offset[i] != -1)
883 info->saved_regs[i].addr = cfa - data.reg_offset[i];
884
885 /* Frame bases. */
886 info->frame_base = cfa;
887 info->local_base = cfa - data.size;
888 }
889
890 /* Otherwise, fall back to reading the backchain link. */
891 else
892 {
893 CORE_ADDR reg;
894 LONGEST backchain;
895 int status;
896
897 /* Get the backchain. */
898 reg = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
899 status = safe_read_memory_integer (reg, 4, &backchain);
900
901 /* A zero backchain terminates the frame chain. Also, sanity
902 check against the local store size limit. */
903 if (status && backchain > 0 && backchain < SPU_LS_SIZE)
904 {
905 /* Assume the link register is saved into its slot. */
906 if (backchain + 16 < SPU_LS_SIZE)
907 info->saved_regs[SPU_LR_REGNUM].addr = backchain + 16;
908
909 /* Frame bases. */
910 info->frame_base = backchain;
911 info->local_base = reg;
912 }
913 }
914
915 /* If we didn't find a frame, we cannot determine SP / return address. */
916 if (info->frame_base == 0)
917 return info;
918
919 /* The previous SP is equal to the CFA. */
920 trad_frame_set_value (info->saved_regs, SPU_SP_REGNUM, info->frame_base);
921
922 /* Read full contents of the unwound link register in order to
923 be able to determine the return address. */
924 if (trad_frame_addr_p (info->saved_regs, SPU_LR_REGNUM))
925 target_read_memory (info->saved_regs[SPU_LR_REGNUM].addr, buf, 16);
926 else
927 get_frame_register (this_frame, SPU_LR_REGNUM, buf);
928
929 /* Normally, the return address is contained in the slot 0 of the
930 link register, and slots 1-3 are zero. For an overlay return,
931 slot 0 contains the address of the overlay manager return stub,
932 slot 1 contains the partition number of the overlay section to
933 be returned to, and slot 2 contains the return address within
934 that section. Return the latter address in that case. */
935 if (extract_unsigned_integer (buf + 8, 4) != 0)
936 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
937 extract_unsigned_integer (buf + 8, 4));
938 else
939 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
940 extract_unsigned_integer (buf, 4));
941
942 return info;
943 }
944
945 static void
946 spu_frame_this_id (struct frame_info *this_frame,
947 void **this_prologue_cache, struct frame_id *this_id)
948 {
949 struct spu_unwind_cache *info =
950 spu_frame_unwind_cache (this_frame, this_prologue_cache);
951
952 if (info->frame_base == 0)
953 return;
954
955 *this_id = frame_id_build (info->frame_base, info->func);
956 }
957
958 static struct value *
959 spu_frame_prev_register (struct frame_info *this_frame,
960 void **this_prologue_cache, int regnum)
961 {
962 struct spu_unwind_cache *info
963 = spu_frame_unwind_cache (this_frame, this_prologue_cache);
964
965 /* Special-case the stack pointer. */
966 if (regnum == SPU_RAW_SP_REGNUM)
967 regnum = SPU_SP_REGNUM;
968
969 return trad_frame_get_prev_register (this_frame, info->saved_regs, regnum);
970 }
971
972 static const struct frame_unwind spu_frame_unwind = {
973 NORMAL_FRAME,
974 spu_frame_this_id,
975 spu_frame_prev_register,
976 NULL,
977 default_frame_sniffer
978 };
979
980 static CORE_ADDR
981 spu_frame_base_address (struct frame_info *this_frame, void **this_cache)
982 {
983 struct spu_unwind_cache *info
984 = spu_frame_unwind_cache (this_frame, this_cache);
985 return info->local_base;
986 }
987
988 static const struct frame_base spu_frame_base = {
989 &spu_frame_unwind,
990 spu_frame_base_address,
991 spu_frame_base_address,
992 spu_frame_base_address
993 };
994
995 static CORE_ADDR
996 spu_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
997 {
998 CORE_ADDR pc = frame_unwind_register_unsigned (next_frame, SPU_PC_REGNUM);
999 /* Mask off interrupt enable bit. */
1000 return pc & -4;
1001 }
1002
1003 static CORE_ADDR
1004 spu_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame)
1005 {
1006 return frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
1007 }
1008
1009 static CORE_ADDR
1010 spu_read_pc (struct regcache *regcache)
1011 {
1012 ULONGEST pc;
1013 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &pc);
1014 /* Mask off interrupt enable bit. */
1015 return pc & -4;
1016 }
1017
1018 static void
1019 spu_write_pc (struct regcache *regcache, CORE_ADDR pc)
1020 {
1021 /* Keep interrupt enabled state unchanged. */
1022 ULONGEST old_pc;
1023 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &old_pc);
1024 regcache_cooked_write_unsigned (regcache, SPU_PC_REGNUM,
1025 (pc & -4) | (old_pc & 3));
1026 }
1027
1028
1029 /* Function calling convention. */
1030
1031 static CORE_ADDR
1032 spu_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1033 {
1034 return sp & ~15;
1035 }
1036
1037 static CORE_ADDR
1038 spu_push_dummy_code (struct gdbarch *gdbarch, CORE_ADDR sp, CORE_ADDR funaddr,
1039 struct value **args, int nargs, struct type *value_type,
1040 CORE_ADDR *real_pc, CORE_ADDR *bp_addr,
1041 struct regcache *regcache)
1042 {
1043 /* Allocate space sufficient for a breakpoint, keeping the stack aligned. */
1044 sp = (sp - 4) & ~15;
1045 /* Store the address of that breakpoint */
1046 *bp_addr = sp;
1047 /* The call starts at the callee's entry point. */
1048 *real_pc = funaddr;
1049
1050 return sp;
1051 }
1052
1053 static int
1054 spu_scalar_value_p (struct type *type)
1055 {
1056 switch (TYPE_CODE (type))
1057 {
1058 case TYPE_CODE_INT:
1059 case TYPE_CODE_ENUM:
1060 case TYPE_CODE_RANGE:
1061 case TYPE_CODE_CHAR:
1062 case TYPE_CODE_BOOL:
1063 case TYPE_CODE_PTR:
1064 case TYPE_CODE_REF:
1065 return TYPE_LENGTH (type) <= 16;
1066
1067 default:
1068 return 0;
1069 }
1070 }
1071
1072 static void
1073 spu_value_to_regcache (struct regcache *regcache, int regnum,
1074 struct type *type, const gdb_byte *in)
1075 {
1076 int len = TYPE_LENGTH (type);
1077
1078 if (spu_scalar_value_p (type))
1079 {
1080 int preferred_slot = len < 4 ? 4 - len : 0;
1081 regcache_cooked_write_part (regcache, regnum, preferred_slot, len, in);
1082 }
1083 else
1084 {
1085 while (len >= 16)
1086 {
1087 regcache_cooked_write (regcache, regnum++, in);
1088 in += 16;
1089 len -= 16;
1090 }
1091
1092 if (len > 0)
1093 regcache_cooked_write_part (regcache, regnum, 0, len, in);
1094 }
1095 }
1096
1097 static void
1098 spu_regcache_to_value (struct regcache *regcache, int regnum,
1099 struct type *type, gdb_byte *out)
1100 {
1101 int len = TYPE_LENGTH (type);
1102
1103 if (spu_scalar_value_p (type))
1104 {
1105 int preferred_slot = len < 4 ? 4 - len : 0;
1106 regcache_cooked_read_part (regcache, regnum, preferred_slot, len, out);
1107 }
1108 else
1109 {
1110 while (len >= 16)
1111 {
1112 regcache_cooked_read (regcache, regnum++, out);
1113 out += 16;
1114 len -= 16;
1115 }
1116
1117 if (len > 0)
1118 regcache_cooked_read_part (regcache, regnum, 0, len, out);
1119 }
1120 }
1121
1122 static CORE_ADDR
1123 spu_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1124 struct regcache *regcache, CORE_ADDR bp_addr,
1125 int nargs, struct value **args, CORE_ADDR sp,
1126 int struct_return, CORE_ADDR struct_addr)
1127 {
1128 CORE_ADDR sp_delta;
1129 int i;
1130 int regnum = SPU_ARG1_REGNUM;
1131 int stack_arg = -1;
1132 gdb_byte buf[16];
1133
1134 /* Set the return address. */
1135 memset (buf, 0, sizeof buf);
1136 store_unsigned_integer (buf, 4, bp_addr);
1137 regcache_cooked_write (regcache, SPU_LR_REGNUM, buf);
1138
1139 /* If STRUCT_RETURN is true, then the struct return address (in
1140 STRUCT_ADDR) will consume the first argument-passing register.
1141 Both adjust the register count and store that value. */
1142 if (struct_return)
1143 {
1144 memset (buf, 0, sizeof buf);
1145 store_unsigned_integer (buf, 4, struct_addr);
1146 regcache_cooked_write (regcache, regnum++, buf);
1147 }
1148
1149 /* Fill in argument registers. */
1150 for (i = 0; i < nargs; i++)
1151 {
1152 struct value *arg = args[i];
1153 struct type *type = check_typedef (value_type (arg));
1154 const gdb_byte *contents = value_contents (arg);
1155 int len = TYPE_LENGTH (type);
1156 int n_regs = align_up (len, 16) / 16;
1157
1158 /* If the argument doesn't wholly fit into registers, it and
1159 all subsequent arguments go to the stack. */
1160 if (regnum + n_regs - 1 > SPU_ARGN_REGNUM)
1161 {
1162 stack_arg = i;
1163 break;
1164 }
1165
1166 spu_value_to_regcache (regcache, regnum, type, contents);
1167 regnum += n_regs;
1168 }
1169
1170 /* Overflow arguments go to the stack. */
1171 if (stack_arg != -1)
1172 {
1173 CORE_ADDR ap;
1174
1175 /* Allocate all required stack size. */
1176 for (i = stack_arg; i < nargs; i++)
1177 {
1178 struct type *type = check_typedef (value_type (args[i]));
1179 sp -= align_up (TYPE_LENGTH (type), 16);
1180 }
1181
1182 /* Fill in stack arguments. */
1183 ap = sp;
1184 for (i = stack_arg; i < nargs; i++)
1185 {
1186 struct value *arg = args[i];
1187 struct type *type = check_typedef (value_type (arg));
1188 int len = TYPE_LENGTH (type);
1189 int preferred_slot;
1190
1191 if (spu_scalar_value_p (type))
1192 preferred_slot = len < 4 ? 4 - len : 0;
1193 else
1194 preferred_slot = 0;
1195
1196 target_write_memory (ap + preferred_slot, value_contents (arg), len);
1197 ap += align_up (TYPE_LENGTH (type), 16);
1198 }
1199 }
1200
1201 /* Allocate stack frame header. */
1202 sp -= 32;
1203
1204 /* Store stack back chain. */
1205 regcache_cooked_read (regcache, SPU_RAW_SP_REGNUM, buf);
1206 target_write_memory (sp, buf, 16);
1207
1208 /* Finally, update all slots of the SP register. */
1209 sp_delta = sp - extract_unsigned_integer (buf, 4);
1210 for (i = 0; i < 4; i++)
1211 {
1212 CORE_ADDR sp_slot = extract_unsigned_integer (buf + 4*i, 4);
1213 store_unsigned_integer (buf + 4*i, 4, sp_slot + sp_delta);
1214 }
1215 regcache_cooked_write (regcache, SPU_RAW_SP_REGNUM, buf);
1216
1217 return sp;
1218 }
1219
1220 static struct frame_id
1221 spu_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1222 {
1223 CORE_ADDR pc = get_frame_register_unsigned (this_frame, SPU_PC_REGNUM);
1224 CORE_ADDR sp = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
1225 return frame_id_build (sp, pc & -4);
1226 }
1227
1228 /* Function return value access. */
1229
1230 static enum return_value_convention
1231 spu_return_value (struct gdbarch *gdbarch, struct type *func_type,
1232 struct type *type, struct regcache *regcache,
1233 gdb_byte *out, const gdb_byte *in)
1234 {
1235 enum return_value_convention rvc;
1236
1237 if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16)
1238 rvc = RETURN_VALUE_REGISTER_CONVENTION;
1239 else
1240 rvc = RETURN_VALUE_STRUCT_CONVENTION;
1241
1242 if (in)
1243 {
1244 switch (rvc)
1245 {
1246 case RETURN_VALUE_REGISTER_CONVENTION:
1247 spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in);
1248 break;
1249
1250 case RETURN_VALUE_STRUCT_CONVENTION:
1251 error ("Cannot set function return value.");
1252 break;
1253 }
1254 }
1255 else if (out)
1256 {
1257 switch (rvc)
1258 {
1259 case RETURN_VALUE_REGISTER_CONVENTION:
1260 spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out);
1261 break;
1262
1263 case RETURN_VALUE_STRUCT_CONVENTION:
1264 error ("Function return value unknown.");
1265 break;
1266 }
1267 }
1268
1269 return rvc;
1270 }
1271
1272
1273 /* Breakpoints. */
1274
1275 static const gdb_byte *
1276 spu_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR * pcptr, int *lenptr)
1277 {
1278 static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff };
1279
1280 *lenptr = sizeof breakpoint;
1281 return breakpoint;
1282 }
1283
1284
1285 /* Software single-stepping support. */
1286
1287 int
1288 spu_software_single_step (struct frame_info *frame)
1289 {
1290 CORE_ADDR pc, next_pc;
1291 unsigned int insn;
1292 int offset, reg;
1293 gdb_byte buf[4];
1294
1295 pc = get_frame_pc (frame);
1296
1297 if (target_read_memory (pc, buf, 4))
1298 return 1;
1299 insn = extract_unsigned_integer (buf, 4);
1300
1301 /* Next sequential instruction is at PC + 4, except if the current
1302 instruction is a PPE-assisted call, in which case it is at PC + 8.
1303 Wrap around LS limit to be on the safe side. */
1304 if ((insn & 0xffffff00) == 0x00002100)
1305 next_pc = (pc + 8) & (SPU_LS_SIZE - 1);
1306 else
1307 next_pc = (pc + 4) & (SPU_LS_SIZE - 1);
1308
1309 insert_single_step_breakpoint (next_pc);
1310
1311 if (is_branch (insn, &offset, &reg))
1312 {
1313 CORE_ADDR target = offset;
1314
1315 if (reg == SPU_PC_REGNUM)
1316 target += pc;
1317 else if (reg != -1)
1318 {
1319 get_frame_register_bytes (frame, reg, 0, 4, buf);
1320 target += extract_unsigned_integer (buf, 4) & -4;
1321 }
1322
1323 target = target & (SPU_LS_SIZE - 1);
1324 if (target != next_pc)
1325 insert_single_step_breakpoint (target);
1326 }
1327
1328 return 1;
1329 }
1330
1331 /* Target overlays for the SPU overlay manager.
1332
1333 See the documentation of simple_overlay_update for how the
1334 interface is supposed to work.
1335
1336 Data structures used by the overlay manager:
1337
1338 struct ovly_table
1339 {
1340 u32 vma;
1341 u32 size;
1342 u32 pos;
1343 u32 buf;
1344 } _ovly_table[]; -- one entry per overlay section
1345
1346 struct ovly_buf_table
1347 {
1348 u32 mapped;
1349 } _ovly_buf_table[]; -- one entry per overlay buffer
1350
1351 _ovly_table should never change.
1352
1353 Both tables are aligned to a 16-byte boundary, the symbols _ovly_table
1354 and _ovly_buf_table are of type STT_OBJECT and their size set to the size
1355 of the respective array. buf in _ovly_table is an index into _ovly_buf_table.
1356
1357 mapped is an index into _ovly_table. Both the mapped and buf indices start
1358 from one to reference the first entry in their respective tables. */
1359
1360 /* Using the per-objfile private data mechanism, we store for each
1361 objfile an array of "struct spu_overlay_table" structures, one
1362 for each obj_section of the objfile. This structure holds two
1363 fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this
1364 is *not* an overlay section. If it is non-zero, it represents
1365 a target address. The overlay section is mapped iff the target
1366 integer at this location equals MAPPED_VAL. */
1367
1368 static const struct objfile_data *spu_overlay_data;
1369
1370 struct spu_overlay_table
1371 {
1372 CORE_ADDR mapped_ptr;
1373 CORE_ADDR mapped_val;
1374 };
1375
1376 /* Retrieve the overlay table for OBJFILE. If not already cached, read
1377 the _ovly_table data structure from the target and initialize the
1378 spu_overlay_table data structure from it. */
1379 static struct spu_overlay_table *
1380 spu_get_overlay_table (struct objfile *objfile)
1381 {
1382 struct minimal_symbol *ovly_table_msym, *ovly_buf_table_msym;
1383 CORE_ADDR ovly_table_base, ovly_buf_table_base;
1384 unsigned ovly_table_size, ovly_buf_table_size;
1385 struct spu_overlay_table *tbl;
1386 struct obj_section *osect;
1387 char *ovly_table;
1388 int i;
1389
1390 tbl = objfile_data (objfile, spu_overlay_data);
1391 if (tbl)
1392 return tbl;
1393
1394 ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile);
1395 if (!ovly_table_msym)
1396 return NULL;
1397
1398 ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table", NULL, objfile);
1399 if (!ovly_buf_table_msym)
1400 return NULL;
1401
1402 ovly_table_base = SYMBOL_VALUE_ADDRESS (ovly_table_msym);
1403 ovly_table_size = MSYMBOL_SIZE (ovly_table_msym);
1404
1405 ovly_buf_table_base = SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
1406 ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym);
1407
1408 ovly_table = xmalloc (ovly_table_size);
1409 read_memory (ovly_table_base, ovly_table, ovly_table_size);
1410
1411 tbl = OBSTACK_CALLOC (&objfile->objfile_obstack,
1412 objfile->sections_end - objfile->sections,
1413 struct spu_overlay_table);
1414
1415 for (i = 0; i < ovly_table_size / 16; i++)
1416 {
1417 CORE_ADDR vma = extract_unsigned_integer (ovly_table + 16*i + 0, 4);
1418 CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4, 4);
1419 CORE_ADDR pos = extract_unsigned_integer (ovly_table + 16*i + 8, 4);
1420 CORE_ADDR buf = extract_unsigned_integer (ovly_table + 16*i + 12, 4);
1421
1422 if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size)
1423 continue;
1424
1425 ALL_OBJFILE_OSECTIONS (objfile, osect)
1426 if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section)
1427 && pos == osect->the_bfd_section->filepos)
1428 {
1429 int ndx = osect - objfile->sections;
1430 tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4;
1431 tbl[ndx].mapped_val = i + 1;
1432 break;
1433 }
1434 }
1435
1436 xfree (ovly_table);
1437 set_objfile_data (objfile, spu_overlay_data, tbl);
1438 return tbl;
1439 }
1440
1441 /* Read _ovly_buf_table entry from the target to dermine whether
1442 OSECT is currently mapped, and update the mapped state. */
1443 static void
1444 spu_overlay_update_osect (struct obj_section *osect)
1445 {
1446 struct spu_overlay_table *ovly_table;
1447 CORE_ADDR val;
1448
1449 ovly_table = spu_get_overlay_table (osect->objfile);
1450 if (!ovly_table)
1451 return;
1452
1453 ovly_table += osect - osect->objfile->sections;
1454 if (ovly_table->mapped_ptr == 0)
1455 return;
1456
1457 val = read_memory_unsigned_integer (ovly_table->mapped_ptr, 4);
1458 osect->ovly_mapped = (val == ovly_table->mapped_val);
1459 }
1460
1461 /* If OSECT is NULL, then update all sections' mapped state.
1462 If OSECT is non-NULL, then update only OSECT's mapped state. */
1463 static void
1464 spu_overlay_update (struct obj_section *osect)
1465 {
1466 /* Just one section. */
1467 if (osect)
1468 spu_overlay_update_osect (osect);
1469
1470 /* All sections. */
1471 else
1472 {
1473 struct objfile *objfile;
1474
1475 ALL_OBJSECTIONS (objfile, osect)
1476 if (section_is_overlay (osect))
1477 spu_overlay_update_osect (osect);
1478 }
1479 }
1480
1481 /* Whenever a new objfile is loaded, read the target's _ovly_table.
1482 If there is one, go through all sections and make sure for non-
1483 overlay sections LMA equals VMA, while for overlay sections LMA
1484 is larger than local store size. */
1485 static void
1486 spu_overlay_new_objfile (struct objfile *objfile)
1487 {
1488 struct spu_overlay_table *ovly_table;
1489 struct obj_section *osect;
1490
1491 /* If we've already touched this file, do nothing. */
1492 if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL)
1493 return;
1494
1495 /* Consider only SPU objfiles. */
1496 if (bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1497 return;
1498
1499 /* Check if this objfile has overlays. */
1500 ovly_table = spu_get_overlay_table (objfile);
1501 if (!ovly_table)
1502 return;
1503
1504 /* Now go and fiddle with all the LMAs. */
1505 ALL_OBJFILE_OSECTIONS (objfile, osect)
1506 {
1507 bfd *obfd = objfile->obfd;
1508 asection *bsect = osect->the_bfd_section;
1509 int ndx = osect - objfile->sections;
1510
1511 if (ovly_table[ndx].mapped_ptr == 0)
1512 bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
1513 else
1514 bfd_section_lma (obfd, bsect) = bsect->filepos + SPU_LS_SIZE;
1515 }
1516 }
1517
1518
1519 /* "info spu" commands. */
1520
1521 static void
1522 info_spu_event_command (char *args, int from_tty)
1523 {
1524 struct frame_info *frame = get_selected_frame (NULL);
1525 ULONGEST event_status = 0;
1526 ULONGEST event_mask = 0;
1527 struct cleanup *chain;
1528 gdb_byte buf[100];
1529 char annex[32];
1530 LONGEST len;
1531 int rc, id;
1532
1533 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1534 error (_("\"info spu\" is only supported on the SPU architecture."));
1535
1536 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1537
1538 xsnprintf (annex, sizeof annex, "%d/event_status", id);
1539 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1540 buf, 0, (sizeof (buf) - 1));
1541 if (len <= 0)
1542 error (_("Could not read event_status."));
1543 buf[len] = '\0';
1544 event_status = strtoulst (buf, NULL, 16);
1545
1546 xsnprintf (annex, sizeof annex, "%d/event_mask", id);
1547 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1548 buf, 0, (sizeof (buf) - 1));
1549 if (len <= 0)
1550 error (_("Could not read event_mask."));
1551 buf[len] = '\0';
1552 event_mask = strtoulst (buf, NULL, 16);
1553
1554 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoEvent");
1555
1556 if (ui_out_is_mi_like_p (uiout))
1557 {
1558 ui_out_field_fmt (uiout, "event_status",
1559 "0x%s", phex_nz (event_status, 4));
1560 ui_out_field_fmt (uiout, "event_mask",
1561 "0x%s", phex_nz (event_mask, 4));
1562 }
1563 else
1564 {
1565 printf_filtered (_("Event Status 0x%s\n"), phex (event_status, 4));
1566 printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask, 4));
1567 }
1568
1569 do_cleanups (chain);
1570 }
1571
1572 static void
1573 info_spu_signal_command (char *args, int from_tty)
1574 {
1575 struct frame_info *frame = get_selected_frame (NULL);
1576 ULONGEST signal1 = 0;
1577 ULONGEST signal1_type = 0;
1578 int signal1_pending = 0;
1579 ULONGEST signal2 = 0;
1580 ULONGEST signal2_type = 0;
1581 int signal2_pending = 0;
1582 struct cleanup *chain;
1583 char annex[32];
1584 gdb_byte buf[100];
1585 LONGEST len;
1586 int rc, id;
1587
1588 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1589 error (_("\"info spu\" is only supported on the SPU architecture."));
1590
1591 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1592
1593 xsnprintf (annex, sizeof annex, "%d/signal1", id);
1594 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
1595 if (len < 0)
1596 error (_("Could not read signal1."));
1597 else if (len == 4)
1598 {
1599 signal1 = extract_unsigned_integer (buf, 4);
1600 signal1_pending = 1;
1601 }
1602
1603 xsnprintf (annex, sizeof annex, "%d/signal1_type", id);
1604 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1605 buf, 0, (sizeof (buf) - 1));
1606 if (len <= 0)
1607 error (_("Could not read signal1_type."));
1608 buf[len] = '\0';
1609 signal1_type = strtoulst (buf, NULL, 16);
1610
1611 xsnprintf (annex, sizeof annex, "%d/signal2", id);
1612 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
1613 if (len < 0)
1614 error (_("Could not read signal2."));
1615 else if (len == 4)
1616 {
1617 signal2 = extract_unsigned_integer (buf, 4);
1618 signal2_pending = 1;
1619 }
1620
1621 xsnprintf (annex, sizeof annex, "%d/signal2_type", id);
1622 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1623 buf, 0, (sizeof (buf) - 1));
1624 if (len <= 0)
1625 error (_("Could not read signal2_type."));
1626 buf[len] = '\0';
1627 signal2_type = strtoulst (buf, NULL, 16);
1628
1629 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoSignal");
1630
1631 if (ui_out_is_mi_like_p (uiout))
1632 {
1633 ui_out_field_int (uiout, "signal1_pending", signal1_pending);
1634 ui_out_field_fmt (uiout, "signal1", "0x%s", phex_nz (signal1, 4));
1635 ui_out_field_int (uiout, "signal1_type", signal1_type);
1636 ui_out_field_int (uiout, "signal2_pending", signal2_pending);
1637 ui_out_field_fmt (uiout, "signal2", "0x%s", phex_nz (signal2, 4));
1638 ui_out_field_int (uiout, "signal2_type", signal2_type);
1639 }
1640 else
1641 {
1642 if (signal1_pending)
1643 printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1, 4));
1644 else
1645 printf_filtered (_("Signal 1 not pending "));
1646
1647 if (signal1_type)
1648 printf_filtered (_("(Type Or)\n"));
1649 else
1650 printf_filtered (_("(Type Overwrite)\n"));
1651
1652 if (signal2_pending)
1653 printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2, 4));
1654 else
1655 printf_filtered (_("Signal 2 not pending "));
1656
1657 if (signal2_type)
1658 printf_filtered (_("(Type Or)\n"));
1659 else
1660 printf_filtered (_("(Type Overwrite)\n"));
1661 }
1662
1663 do_cleanups (chain);
1664 }
1665
1666 static void
1667 info_spu_mailbox_list (gdb_byte *buf, int nr,
1668 const char *field, const char *msg)
1669 {
1670 struct cleanup *chain;
1671 int i;
1672
1673 if (nr <= 0)
1674 return;
1675
1676 chain = make_cleanup_ui_out_table_begin_end (uiout, 1, nr, "mbox");
1677
1678 ui_out_table_header (uiout, 32, ui_left, field, msg);
1679 ui_out_table_body (uiout);
1680
1681 for (i = 0; i < nr; i++)
1682 {
1683 struct cleanup *val_chain;
1684 ULONGEST val;
1685 val_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "mbox");
1686 val = extract_unsigned_integer (buf + 4*i, 4);
1687 ui_out_field_fmt (uiout, field, "0x%s", phex (val, 4));
1688 do_cleanups (val_chain);
1689
1690 if (!ui_out_is_mi_like_p (uiout))
1691 printf_filtered ("\n");
1692 }
1693
1694 do_cleanups (chain);
1695 }
1696
1697 static void
1698 info_spu_mailbox_command (char *args, int from_tty)
1699 {
1700 struct frame_info *frame = get_selected_frame (NULL);
1701 struct cleanup *chain;
1702 char annex[32];
1703 gdb_byte buf[1024];
1704 LONGEST len;
1705 int i, id;
1706
1707 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1708 error (_("\"info spu\" is only supported on the SPU architecture."));
1709
1710 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1711
1712 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoMailbox");
1713
1714 xsnprintf (annex, sizeof annex, "%d/mbox_info", id);
1715 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1716 buf, 0, sizeof buf);
1717 if (len < 0)
1718 error (_("Could not read mbox_info."));
1719
1720 info_spu_mailbox_list (buf, len / 4, "mbox", "SPU Outbound Mailbox");
1721
1722 xsnprintf (annex, sizeof annex, "%d/ibox_info", id);
1723 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1724 buf, 0, sizeof buf);
1725 if (len < 0)
1726 error (_("Could not read ibox_info."));
1727
1728 info_spu_mailbox_list (buf, len / 4, "ibox", "SPU Outbound Interrupt Mailbox");
1729
1730 xsnprintf (annex, sizeof annex, "%d/wbox_info", id);
1731 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1732 buf, 0, sizeof buf);
1733 if (len < 0)
1734 error (_("Could not read wbox_info."));
1735
1736 info_spu_mailbox_list (buf, len / 4, "wbox", "SPU Inbound Mailbox");
1737
1738 do_cleanups (chain);
1739 }
1740
1741 static ULONGEST
1742 spu_mfc_get_bitfield (ULONGEST word, int first, int last)
1743 {
1744 ULONGEST mask = ~(~(ULONGEST)0 << (last - first + 1));
1745 return (word >> (63 - last)) & mask;
1746 }
1747
1748 static void
1749 info_spu_dma_cmdlist (gdb_byte *buf, int nr)
1750 {
1751 static char *spu_mfc_opcode[256] =
1752 {
1753 /* 00 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1754 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1755 /* 10 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1756 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1757 /* 20 */ "put", "putb", "putf", NULL, "putl", "putlb", "putlf", NULL,
1758 "puts", "putbs", "putfs", NULL, NULL, NULL, NULL, NULL,
1759 /* 30 */ "putr", "putrb", "putrf", NULL, "putrl", "putrlb", "putrlf", NULL,
1760 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1761 /* 40 */ "get", "getb", "getf", NULL, "getl", "getlb", "getlf", NULL,
1762 "gets", "getbs", "getfs", NULL, NULL, NULL, NULL, NULL,
1763 /* 50 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1764 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1765 /* 60 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1766 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1767 /* 70 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1768 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1769 /* 80 */ "sdcrt", "sdcrtst", NULL, NULL, NULL, NULL, NULL, NULL,
1770 NULL, "sdcrz", NULL, NULL, NULL, "sdcrst", NULL, "sdcrf",
1771 /* 90 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1772 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1773 /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL, NULL, NULL, NULL, NULL,
1774 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1775 /* b0 */ "putlluc", NULL, NULL, NULL, "putllc", NULL, NULL, NULL,
1776 "putqlluc", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1777 /* c0 */ "barrier", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1778 "mfceieio", NULL, NULL, NULL, "mfcsync", NULL, NULL, NULL,
1779 /* d0 */ "getllar", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1780 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1781 /* e0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1782 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1783 /* f0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1784 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1785 };
1786
1787 int *seq = alloca (nr * sizeof (int));
1788 int done = 0;
1789 struct cleanup *chain;
1790 int i, j;
1791
1792
1793 /* Determine sequence in which to display (valid) entries. */
1794 for (i = 0; i < nr; i++)
1795 {
1796 /* Search for the first valid entry all of whose
1797 dependencies are met. */
1798 for (j = 0; j < nr; j++)
1799 {
1800 ULONGEST mfc_cq_dw3;
1801 ULONGEST dependencies;
1802
1803 if (done & (1 << (nr - 1 - j)))
1804 continue;
1805
1806 mfc_cq_dw3 = extract_unsigned_integer (buf + 32*j + 24, 8);
1807 if (!spu_mfc_get_bitfield (mfc_cq_dw3, 16, 16))
1808 continue;
1809
1810 dependencies = spu_mfc_get_bitfield (mfc_cq_dw3, 0, nr - 1);
1811 if ((dependencies & done) != dependencies)
1812 continue;
1813
1814 seq[i] = j;
1815 done |= 1 << (nr - 1 - j);
1816 break;
1817 }
1818
1819 if (j == nr)
1820 break;
1821 }
1822
1823 nr = i;
1824
1825
1826 chain = make_cleanup_ui_out_table_begin_end (uiout, 10, nr, "dma_cmd");
1827
1828 ui_out_table_header (uiout, 7, ui_left, "opcode", "Opcode");
1829 ui_out_table_header (uiout, 3, ui_left, "tag", "Tag");
1830 ui_out_table_header (uiout, 3, ui_left, "tid", "TId");
1831 ui_out_table_header (uiout, 3, ui_left, "rid", "RId");
1832 ui_out_table_header (uiout, 18, ui_left, "ea", "EA");
1833 ui_out_table_header (uiout, 7, ui_left, "lsa", "LSA");
1834 ui_out_table_header (uiout, 7, ui_left, "size", "Size");
1835 ui_out_table_header (uiout, 7, ui_left, "lstaddr", "LstAddr");
1836 ui_out_table_header (uiout, 7, ui_left, "lstsize", "LstSize");
1837 ui_out_table_header (uiout, 1, ui_left, "error_p", "E");
1838
1839 ui_out_table_body (uiout);
1840
1841 for (i = 0; i < nr; i++)
1842 {
1843 struct cleanup *cmd_chain;
1844 ULONGEST mfc_cq_dw0;
1845 ULONGEST mfc_cq_dw1;
1846 ULONGEST mfc_cq_dw2;
1847 int mfc_cmd_opcode, mfc_cmd_tag, rclass_id, tclass_id;
1848 int lsa, size, list_lsa, list_size, mfc_lsa, mfc_size;
1849 ULONGEST mfc_ea;
1850 int list_valid_p, noop_valid_p, qw_valid_p, ea_valid_p, cmd_error_p;
1851
1852 /* Decode contents of MFC Command Queue Context Save/Restore Registers.
1853 See "Cell Broadband Engine Registers V1.3", section 3.3.2.1. */
1854
1855 mfc_cq_dw0 = extract_unsigned_integer (buf + 32*seq[i], 8);
1856 mfc_cq_dw1 = extract_unsigned_integer (buf + 32*seq[i] + 8, 8);
1857 mfc_cq_dw2 = extract_unsigned_integer (buf + 32*seq[i] + 16, 8);
1858
1859 list_lsa = spu_mfc_get_bitfield (mfc_cq_dw0, 0, 14);
1860 list_size = spu_mfc_get_bitfield (mfc_cq_dw0, 15, 26);
1861 mfc_cmd_opcode = spu_mfc_get_bitfield (mfc_cq_dw0, 27, 34);
1862 mfc_cmd_tag = spu_mfc_get_bitfield (mfc_cq_dw0, 35, 39);
1863 list_valid_p = spu_mfc_get_bitfield (mfc_cq_dw0, 40, 40);
1864 rclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 41, 43);
1865 tclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 44, 46);
1866
1867 mfc_ea = spu_mfc_get_bitfield (mfc_cq_dw1, 0, 51) << 12
1868 | spu_mfc_get_bitfield (mfc_cq_dw2, 25, 36);
1869
1870 mfc_lsa = spu_mfc_get_bitfield (mfc_cq_dw2, 0, 13);
1871 mfc_size = spu_mfc_get_bitfield (mfc_cq_dw2, 14, 24);
1872 noop_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 37, 37);
1873 qw_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 38, 38);
1874 ea_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 39, 39);
1875 cmd_error_p = spu_mfc_get_bitfield (mfc_cq_dw2, 40, 40);
1876
1877 cmd_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "cmd");
1878
1879 if (spu_mfc_opcode[mfc_cmd_opcode])
1880 ui_out_field_string (uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]);
1881 else
1882 ui_out_field_int (uiout, "opcode", mfc_cmd_opcode);
1883
1884 ui_out_field_int (uiout, "tag", mfc_cmd_tag);
1885 ui_out_field_int (uiout, "tid", tclass_id);
1886 ui_out_field_int (uiout, "rid", rclass_id);
1887
1888 if (ea_valid_p)
1889 ui_out_field_fmt (uiout, "ea", "0x%s", phex (mfc_ea, 8));
1890 else
1891 ui_out_field_skip (uiout, "ea");
1892
1893 ui_out_field_fmt (uiout, "lsa", "0x%05x", mfc_lsa << 4);
1894 if (qw_valid_p)
1895 ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size << 4);
1896 else
1897 ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size);
1898
1899 if (list_valid_p)
1900 {
1901 ui_out_field_fmt (uiout, "lstaddr", "0x%05x", list_lsa << 3);
1902 ui_out_field_fmt (uiout, "lstsize", "0x%05x", list_size << 3);
1903 }
1904 else
1905 {
1906 ui_out_field_skip (uiout, "lstaddr");
1907 ui_out_field_skip (uiout, "lstsize");
1908 }
1909
1910 if (cmd_error_p)
1911 ui_out_field_string (uiout, "error_p", "*");
1912 else
1913 ui_out_field_skip (uiout, "error_p");
1914
1915 do_cleanups (cmd_chain);
1916
1917 if (!ui_out_is_mi_like_p (uiout))
1918 printf_filtered ("\n");
1919 }
1920
1921 do_cleanups (chain);
1922 }
1923
1924 static void
1925 info_spu_dma_command (char *args, int from_tty)
1926 {
1927 struct frame_info *frame = get_selected_frame (NULL);
1928 ULONGEST dma_info_type;
1929 ULONGEST dma_info_mask;
1930 ULONGEST dma_info_status;
1931 ULONGEST dma_info_stall_and_notify;
1932 ULONGEST dma_info_atomic_command_status;
1933 struct cleanup *chain;
1934 char annex[32];
1935 gdb_byte buf[1024];
1936 LONGEST len;
1937 int i, id;
1938
1939 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1940 error (_("\"info spu\" is only supported on the SPU architecture."));
1941
1942 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1943
1944 xsnprintf (annex, sizeof annex, "%d/dma_info", id);
1945 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1946 buf, 0, 40 + 16 * 32);
1947 if (len <= 0)
1948 error (_("Could not read dma_info."));
1949
1950 dma_info_type = extract_unsigned_integer (buf, 8);
1951 dma_info_mask = extract_unsigned_integer (buf + 8, 8);
1952 dma_info_status = extract_unsigned_integer (buf + 16, 8);
1953 dma_info_stall_and_notify = extract_unsigned_integer (buf + 24, 8);
1954 dma_info_atomic_command_status = extract_unsigned_integer (buf + 32, 8);
1955
1956 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoDMA");
1957
1958 if (ui_out_is_mi_like_p (uiout))
1959 {
1960 ui_out_field_fmt (uiout, "dma_info_type", "0x%s",
1961 phex_nz (dma_info_type, 4));
1962 ui_out_field_fmt (uiout, "dma_info_mask", "0x%s",
1963 phex_nz (dma_info_mask, 4));
1964 ui_out_field_fmt (uiout, "dma_info_status", "0x%s",
1965 phex_nz (dma_info_status, 4));
1966 ui_out_field_fmt (uiout, "dma_info_stall_and_notify", "0x%s",
1967 phex_nz (dma_info_stall_and_notify, 4));
1968 ui_out_field_fmt (uiout, "dma_info_atomic_command_status", "0x%s",
1969 phex_nz (dma_info_atomic_command_status, 4));
1970 }
1971 else
1972 {
1973 const char *query_msg = _("no query pending");
1974
1975 if (dma_info_type & 4)
1976 switch (dma_info_type & 3)
1977 {
1978 case 1: query_msg = _("'any' query pending"); break;
1979 case 2: query_msg = _("'all' query pending"); break;
1980 default: query_msg = _("undefined query type"); break;
1981 }
1982
1983 printf_filtered (_("Tag-Group Status 0x%s\n"),
1984 phex (dma_info_status, 4));
1985 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
1986 phex (dma_info_mask, 4), query_msg);
1987 printf_filtered (_("Stall-and-Notify 0x%s\n"),
1988 phex (dma_info_stall_and_notify, 4));
1989 printf_filtered (_("Atomic Cmd Status 0x%s\n"),
1990 phex (dma_info_atomic_command_status, 4));
1991 printf_filtered ("\n");
1992 }
1993
1994 info_spu_dma_cmdlist (buf + 40, 16);
1995 do_cleanups (chain);
1996 }
1997
1998 static void
1999 info_spu_proxydma_command (char *args, int from_tty)
2000 {
2001 struct frame_info *frame = get_selected_frame (NULL);
2002 ULONGEST dma_info_type;
2003 ULONGEST dma_info_mask;
2004 ULONGEST dma_info_status;
2005 struct cleanup *chain;
2006 char annex[32];
2007 gdb_byte buf[1024];
2008 LONGEST len;
2009 int i, id;
2010
2011 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
2012 error (_("\"info spu\" is only supported on the SPU architecture."));
2013
2014 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2015
2016 xsnprintf (annex, sizeof annex, "%d/proxydma_info", id);
2017 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2018 buf, 0, 24 + 8 * 32);
2019 if (len <= 0)
2020 error (_("Could not read proxydma_info."));
2021
2022 dma_info_type = extract_unsigned_integer (buf, 8);
2023 dma_info_mask = extract_unsigned_integer (buf + 8, 8);
2024 dma_info_status = extract_unsigned_integer (buf + 16, 8);
2025
2026 chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoProxyDMA");
2027
2028 if (ui_out_is_mi_like_p (uiout))
2029 {
2030 ui_out_field_fmt (uiout, "proxydma_info_type", "0x%s",
2031 phex_nz (dma_info_type, 4));
2032 ui_out_field_fmt (uiout, "proxydma_info_mask", "0x%s",
2033 phex_nz (dma_info_mask, 4));
2034 ui_out_field_fmt (uiout, "proxydma_info_status", "0x%s",
2035 phex_nz (dma_info_status, 4));
2036 }
2037 else
2038 {
2039 const char *query_msg;
2040
2041 switch (dma_info_type & 3)
2042 {
2043 case 0: query_msg = _("no query pending"); break;
2044 case 1: query_msg = _("'any' query pending"); break;
2045 case 2: query_msg = _("'all' query pending"); break;
2046 default: query_msg = _("undefined query type"); break;
2047 }
2048
2049 printf_filtered (_("Tag-Group Status 0x%s\n"),
2050 phex (dma_info_status, 4));
2051 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2052 phex (dma_info_mask, 4), query_msg);
2053 printf_filtered ("\n");
2054 }
2055
2056 info_spu_dma_cmdlist (buf + 24, 8);
2057 do_cleanups (chain);
2058 }
2059
2060 static void
2061 info_spu_command (char *args, int from_tty)
2062 {
2063 printf_unfiltered (_("\"info spu\" must be followed by the name of an SPU facility.\n"));
2064 help_list (infospucmdlist, "info spu ", -1, gdb_stdout);
2065 }
2066
2067
2068 /* Set up gdbarch struct. */
2069
2070 static struct gdbarch *
2071 spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2072 {
2073 struct gdbarch *gdbarch;
2074 struct gdbarch_tdep *tdep;
2075
2076 /* Find a candidate among the list of pre-declared architectures. */
2077 arches = gdbarch_list_lookup_by_info (arches, &info);
2078 if (arches != NULL)
2079 return arches->gdbarch;
2080
2081 /* Is is for us? */
2082 if (info.bfd_arch_info->mach != bfd_mach_spu)
2083 return NULL;
2084
2085 /* Yes, create a new architecture. */
2086 tdep = XCALLOC (1, struct gdbarch_tdep);
2087 gdbarch = gdbarch_alloc (&info, tdep);
2088
2089 /* Disassembler. */
2090 set_gdbarch_print_insn (gdbarch, print_insn_spu);
2091
2092 /* Registers. */
2093 set_gdbarch_num_regs (gdbarch, SPU_NUM_REGS);
2094 set_gdbarch_num_pseudo_regs (gdbarch, SPU_NUM_PSEUDO_REGS);
2095 set_gdbarch_sp_regnum (gdbarch, SPU_SP_REGNUM);
2096 set_gdbarch_pc_regnum (gdbarch, SPU_PC_REGNUM);
2097 set_gdbarch_read_pc (gdbarch, spu_read_pc);
2098 set_gdbarch_write_pc (gdbarch, spu_write_pc);
2099 set_gdbarch_register_name (gdbarch, spu_register_name);
2100 set_gdbarch_register_type (gdbarch, spu_register_type);
2101 set_gdbarch_pseudo_register_read (gdbarch, spu_pseudo_register_read);
2102 set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write);
2103 set_gdbarch_value_from_register (gdbarch, spu_value_from_register);
2104 set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p);
2105
2106 /* Data types. */
2107 set_gdbarch_char_signed (gdbarch, 0);
2108 set_gdbarch_ptr_bit (gdbarch, 32);
2109 set_gdbarch_addr_bit (gdbarch, 32);
2110 set_gdbarch_short_bit (gdbarch, 16);
2111 set_gdbarch_int_bit (gdbarch, 32);
2112 set_gdbarch_long_bit (gdbarch, 32);
2113 set_gdbarch_long_long_bit (gdbarch, 64);
2114 set_gdbarch_float_bit (gdbarch, 32);
2115 set_gdbarch_double_bit (gdbarch, 64);
2116 set_gdbarch_long_double_bit (gdbarch, 64);
2117 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2118 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2119 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
2120
2121 /* Address conversion. */
2122 set_gdbarch_pointer_to_address (gdbarch, spu_pointer_to_address);
2123 set_gdbarch_integer_to_address (gdbarch, spu_integer_to_address);
2124
2125 /* Inferior function calls. */
2126 set_gdbarch_call_dummy_location (gdbarch, ON_STACK);
2127 set_gdbarch_frame_align (gdbarch, spu_frame_align);
2128 set_gdbarch_frame_red_zone_size (gdbarch, 2000);
2129 set_gdbarch_push_dummy_code (gdbarch, spu_push_dummy_code);
2130 set_gdbarch_push_dummy_call (gdbarch, spu_push_dummy_call);
2131 set_gdbarch_dummy_id (gdbarch, spu_dummy_id);
2132 set_gdbarch_return_value (gdbarch, spu_return_value);
2133
2134 /* Frame handling. */
2135 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2136 frame_unwind_append_unwinder (gdbarch, &spu_frame_unwind);
2137 frame_base_set_default (gdbarch, &spu_frame_base);
2138 set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc);
2139 set_gdbarch_unwind_sp (gdbarch, spu_unwind_sp);
2140 set_gdbarch_virtual_frame_pointer (gdbarch, spu_virtual_frame_pointer);
2141 set_gdbarch_frame_args_skip (gdbarch, 0);
2142 set_gdbarch_skip_prologue (gdbarch, spu_skip_prologue);
2143 set_gdbarch_in_function_epilogue_p (gdbarch, spu_in_function_epilogue_p);
2144
2145 /* Breakpoints. */
2146 set_gdbarch_decr_pc_after_break (gdbarch, 4);
2147 set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc);
2148 set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
2149 set_gdbarch_software_single_step (gdbarch, spu_software_single_step);
2150
2151 /* Overlays. */
2152 set_gdbarch_overlay_update (gdbarch, spu_overlay_update);
2153
2154 return gdbarch;
2155 }
2156
2157 void
2158 _initialize_spu_tdep (void)
2159 {
2160 register_gdbarch_init (bfd_arch_spu, spu_gdbarch_init);
2161
2162 /* Add ourselves to objfile event chain. */
2163 observer_attach_new_objfile (spu_overlay_new_objfile);
2164 spu_overlay_data = register_objfile_data ();
2165
2166 /* Add root prefix command for all "info spu" commands. */
2167 add_prefix_cmd ("spu", class_info, info_spu_command,
2168 _("Various SPU specific commands."),
2169 &infospucmdlist, "info spu ", 0, &infolist);
2170
2171 /* Add various "info spu" commands. */
2172 add_cmd ("event", class_info, info_spu_event_command,
2173 _("Display SPU event facility status.\n"),
2174 &infospucmdlist);
2175 add_cmd ("signal", class_info, info_spu_signal_command,
2176 _("Display SPU signal notification facility status.\n"),
2177 &infospucmdlist);
2178 add_cmd ("mailbox", class_info, info_spu_mailbox_command,
2179 _("Display SPU mailbox facility status.\n"),
2180 &infospucmdlist);
2181 add_cmd ("dma", class_info, info_spu_dma_command,
2182 _("Display MFC DMA status.\n"),
2183 &infospucmdlist);
2184 add_cmd ("proxydma", class_info, info_spu_proxydma_command,
2185 _("Display MFC Proxy-DMA status.\n"),
2186 &infospucmdlist);
2187 }