1 /* SPU target-dependent code for GDB, the GNU debugger.
2 Copyright (C) 2006-2014 Free Software Foundation, Inc.
4 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
5 Based on a port by Sid Manning <sid@us.ibm.com>.
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "arch-utils.h"
28 #include "gdb_assert.h"
30 #include "frame-unwind.h"
31 #include "frame-base.h"
32 #include "trad-frame.h"
41 #include "reggroups.h"
42 #include "floatformat.h"
47 #include "exceptions.h"
51 /* The list of available "set spu " and "show spu " commands. */
52 static struct cmd_list_element
*setspucmdlist
= NULL
;
53 static struct cmd_list_element
*showspucmdlist
= NULL
;
55 /* Whether to stop for new SPE contexts. */
56 static int spu_stop_on_load_p
= 0;
57 /* Whether to automatically flush the SW-managed cache. */
58 static int spu_auto_flush_cache_p
= 1;
61 /* The tdep structure. */
64 /* The spufs ID identifying our address space. */
67 /* SPU-specific vector type. */
68 struct type
*spu_builtin_type_vec128
;
72 /* SPU-specific vector type. */
74 spu_builtin_type_vec128 (struct gdbarch
*gdbarch
)
76 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
78 if (!tdep
->spu_builtin_type_vec128
)
80 const struct builtin_type
*bt
= builtin_type (gdbarch
);
83 t
= arch_composite_type (gdbarch
,
84 "__spu_builtin_type_vec128", TYPE_CODE_UNION
);
85 append_composite_type_field (t
, "uint128", bt
->builtin_int128
);
86 append_composite_type_field (t
, "v2_int64",
87 init_vector_type (bt
->builtin_int64
, 2));
88 append_composite_type_field (t
, "v4_int32",
89 init_vector_type (bt
->builtin_int32
, 4));
90 append_composite_type_field (t
, "v8_int16",
91 init_vector_type (bt
->builtin_int16
, 8));
92 append_composite_type_field (t
, "v16_int8",
93 init_vector_type (bt
->builtin_int8
, 16));
94 append_composite_type_field (t
, "v2_double",
95 init_vector_type (bt
->builtin_double
, 2));
96 append_composite_type_field (t
, "v4_float",
97 init_vector_type (bt
->builtin_float
, 4));
100 TYPE_NAME (t
) = "spu_builtin_type_vec128";
102 tdep
->spu_builtin_type_vec128
= t
;
105 return tdep
->spu_builtin_type_vec128
;
109 /* The list of available "info spu " commands. */
110 static struct cmd_list_element
*infospucmdlist
= NULL
;
115 spu_register_name (struct gdbarch
*gdbarch
, int reg_nr
)
117 static char *register_names
[] =
119 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
120 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
121 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
122 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
123 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
124 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
125 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
126 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
127 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
128 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
129 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
130 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
131 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
132 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
133 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
134 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
135 "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status"
140 if (reg_nr
>= sizeof register_names
/ sizeof *register_names
)
143 return register_names
[reg_nr
];
147 spu_register_type (struct gdbarch
*gdbarch
, int reg_nr
)
149 if (reg_nr
< SPU_NUM_GPRS
)
150 return spu_builtin_type_vec128 (gdbarch
);
155 return builtin_type (gdbarch
)->builtin_uint32
;
158 return builtin_type (gdbarch
)->builtin_func_ptr
;
161 return builtin_type (gdbarch
)->builtin_data_ptr
;
163 case SPU_FPSCR_REGNUM
:
164 return builtin_type (gdbarch
)->builtin_uint128
;
166 case SPU_SRR0_REGNUM
:
167 return builtin_type (gdbarch
)->builtin_uint32
;
169 case SPU_LSLR_REGNUM
:
170 return builtin_type (gdbarch
)->builtin_uint32
;
172 case SPU_DECR_REGNUM
:
173 return builtin_type (gdbarch
)->builtin_uint32
;
175 case SPU_DECR_STATUS_REGNUM
:
176 return builtin_type (gdbarch
)->builtin_uint32
;
179 internal_error (__FILE__
, __LINE__
, _("invalid regnum"));
183 /* Pseudo registers for preferred slots - stack pointer. */
185 static enum register_status
186 spu_pseudo_register_read_spu (struct regcache
*regcache
, const char *regname
,
189 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
190 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
191 enum register_status status
;
197 status
= regcache_raw_read_unsigned (regcache
, SPU_ID_REGNUM
, &id
);
198 if (status
!= REG_VALID
)
200 xsnprintf (annex
, sizeof annex
, "%d/%s", (int) id
, regname
);
201 memset (reg
, 0, sizeof reg
);
202 target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
205 ul
= strtoulst ((char *) reg
, NULL
, 16);
206 store_unsigned_integer (buf
, 4, byte_order
, ul
);
210 static enum register_status
211 spu_pseudo_register_read (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
212 int regnum
, gdb_byte
*buf
)
217 enum register_status status
;
222 status
= regcache_raw_read (regcache
, SPU_RAW_SP_REGNUM
, reg
);
223 if (status
!= REG_VALID
)
225 memcpy (buf
, reg
, 4);
228 case SPU_FPSCR_REGNUM
:
229 status
= regcache_raw_read_unsigned (regcache
, SPU_ID_REGNUM
, &id
);
230 if (status
!= REG_VALID
)
232 xsnprintf (annex
, sizeof annex
, "%d/fpcr", (int) id
);
233 target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
, buf
, 0, 16);
236 case SPU_SRR0_REGNUM
:
237 return spu_pseudo_register_read_spu (regcache
, "srr0", buf
);
239 case SPU_LSLR_REGNUM
:
240 return spu_pseudo_register_read_spu (regcache
, "lslr", buf
);
242 case SPU_DECR_REGNUM
:
243 return spu_pseudo_register_read_spu (regcache
, "decr", buf
);
245 case SPU_DECR_STATUS_REGNUM
:
246 return spu_pseudo_register_read_spu (regcache
, "decr_status", buf
);
249 internal_error (__FILE__
, __LINE__
, _("invalid regnum"));
254 spu_pseudo_register_write_spu (struct regcache
*regcache
, const char *regname
,
257 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
258 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
263 regcache_raw_read_unsigned (regcache
, SPU_ID_REGNUM
, &id
);
264 xsnprintf (annex
, sizeof annex
, "%d/%s", (int) id
, regname
);
265 xsnprintf (reg
, sizeof reg
, "0x%s",
266 phex_nz (extract_unsigned_integer (buf
, 4, byte_order
), 4));
267 target_write (¤t_target
, TARGET_OBJECT_SPU
, annex
,
268 (gdb_byte
*) reg
, 0, strlen (reg
));
272 spu_pseudo_register_write (struct gdbarch
*gdbarch
, struct regcache
*regcache
,
273 int regnum
, const gdb_byte
*buf
)
282 regcache_raw_read (regcache
, SPU_RAW_SP_REGNUM
, reg
);
283 memcpy (reg
, buf
, 4);
284 regcache_raw_write (regcache
, SPU_RAW_SP_REGNUM
, reg
);
287 case SPU_FPSCR_REGNUM
:
288 regcache_raw_read_unsigned (regcache
, SPU_ID_REGNUM
, &id
);
289 xsnprintf (annex
, sizeof annex
, "%d/fpcr", (int) id
);
290 target_write (¤t_target
, TARGET_OBJECT_SPU
, annex
, buf
, 0, 16);
293 case SPU_SRR0_REGNUM
:
294 spu_pseudo_register_write_spu (regcache
, "srr0", buf
);
297 case SPU_LSLR_REGNUM
:
298 spu_pseudo_register_write_spu (regcache
, "lslr", buf
);
301 case SPU_DECR_REGNUM
:
302 spu_pseudo_register_write_spu (regcache
, "decr", buf
);
305 case SPU_DECR_STATUS_REGNUM
:
306 spu_pseudo_register_write_spu (regcache
, "decr_status", buf
);
310 internal_error (__FILE__
, __LINE__
, _("invalid regnum"));
314 /* Value conversion -- access scalar values at the preferred slot. */
316 static struct value
*
317 spu_value_from_register (struct gdbarch
*gdbarch
, struct type
*type
,
318 int regnum
, struct frame_id frame_id
)
320 struct value
*value
= default_value_from_register (gdbarch
, type
,
322 int len
= TYPE_LENGTH (type
);
324 if (regnum
< SPU_NUM_GPRS
&& len
< 16)
326 int preferred_slot
= len
< 4 ? 4 - len
: 0;
327 set_value_offset (value
, preferred_slot
);
333 /* Register groups. */
336 spu_register_reggroup_p (struct gdbarch
*gdbarch
, int regnum
,
337 struct reggroup
*group
)
339 /* Registers displayed via 'info regs'. */
340 if (group
== general_reggroup
)
343 /* Registers displayed via 'info float'. */
344 if (group
== float_reggroup
)
347 /* Registers that need to be saved/restored in order to
348 push or pop frames. */
349 if (group
== save_reggroup
|| group
== restore_reggroup
)
352 return default_register_reggroup_p (gdbarch
, regnum
, group
);
356 /* Address handling. */
359 spu_gdbarch_id (struct gdbarch
*gdbarch
)
361 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
364 /* The objfile architecture of a standalone SPU executable does not
365 provide an SPU ID. Retrieve it from the objfile's relocated
366 address range in this special case. */
368 && symfile_objfile
&& symfile_objfile
->obfd
369 && bfd_get_arch (symfile_objfile
->obfd
) == bfd_arch_spu
370 && symfile_objfile
->sections
!= symfile_objfile
->sections_end
)
371 id
= SPUADDR_SPU (obj_section_addr (symfile_objfile
->sections
));
377 spu_address_class_type_flags (int byte_size
, int dwarf2_addr_class
)
379 if (dwarf2_addr_class
== 1)
380 return TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1
;
386 spu_address_class_type_flags_to_name (struct gdbarch
*gdbarch
, int type_flags
)
388 if (type_flags
& TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1
)
395 spu_address_class_name_to_type_flags (struct gdbarch
*gdbarch
,
396 const char *name
, int *type_flags_ptr
)
398 if (strcmp (name
, "__ea") == 0)
400 *type_flags_ptr
= TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1
;
408 spu_address_to_pointer (struct gdbarch
*gdbarch
,
409 struct type
*type
, gdb_byte
*buf
, CORE_ADDR addr
)
411 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
412 store_unsigned_integer (buf
, TYPE_LENGTH (type
), byte_order
,
413 SPUADDR_ADDR (addr
));
417 spu_pointer_to_address (struct gdbarch
*gdbarch
,
418 struct type
*type
, const gdb_byte
*buf
)
420 int id
= spu_gdbarch_id (gdbarch
);
421 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
423 = extract_unsigned_integer (buf
, TYPE_LENGTH (type
), byte_order
);
425 /* Do not convert __ea pointers. */
426 if (TYPE_ADDRESS_CLASS_1 (type
))
429 return addr
? SPUADDR (id
, addr
) : 0;
433 spu_integer_to_address (struct gdbarch
*gdbarch
,
434 struct type
*type
, const gdb_byte
*buf
)
436 int id
= spu_gdbarch_id (gdbarch
);
437 ULONGEST addr
= unpack_long (type
, buf
);
439 return SPUADDR (id
, addr
);
443 /* Decoding SPU instructions. */
480 is_rr (unsigned int insn
, int op
, int *rt
, int *ra
, int *rb
)
482 if ((insn
>> 21) == op
)
485 *ra
= (insn
>> 7) & 127;
486 *rb
= (insn
>> 14) & 127;
494 is_rrr (unsigned int insn
, int op
, int *rt
, int *ra
, int *rb
, int *rc
)
496 if ((insn
>> 28) == op
)
498 *rt
= (insn
>> 21) & 127;
499 *ra
= (insn
>> 7) & 127;
500 *rb
= (insn
>> 14) & 127;
509 is_ri7 (unsigned int insn
, int op
, int *rt
, int *ra
, int *i7
)
511 if ((insn
>> 21) == op
)
514 *ra
= (insn
>> 7) & 127;
515 *i7
= (((insn
>> 14) & 127) ^ 0x40) - 0x40;
523 is_ri10 (unsigned int insn
, int op
, int *rt
, int *ra
, int *i10
)
525 if ((insn
>> 24) == op
)
528 *ra
= (insn
>> 7) & 127;
529 *i10
= (((insn
>> 14) & 0x3ff) ^ 0x200) - 0x200;
537 is_ri16 (unsigned int insn
, int op
, int *rt
, int *i16
)
539 if ((insn
>> 23) == op
)
542 *i16
= (((insn
>> 7) & 0xffff) ^ 0x8000) - 0x8000;
550 is_ri18 (unsigned int insn
, int op
, int *rt
, int *i18
)
552 if ((insn
>> 25) == op
)
555 *i18
= (((insn
>> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
563 is_branch (unsigned int insn
, int *offset
, int *reg
)
567 if (is_ri16 (insn
, op_br
, &rt
, &i16
)
568 || is_ri16 (insn
, op_brsl
, &rt
, &i16
)
569 || is_ri16 (insn
, op_brnz
, &rt
, &i16
)
570 || is_ri16 (insn
, op_brz
, &rt
, &i16
)
571 || is_ri16 (insn
, op_brhnz
, &rt
, &i16
)
572 || is_ri16 (insn
, op_brhz
, &rt
, &i16
))
574 *reg
= SPU_PC_REGNUM
;
579 if (is_ri16 (insn
, op_bra
, &rt
, &i16
)
580 || is_ri16 (insn
, op_brasl
, &rt
, &i16
))
587 if (is_ri7 (insn
, op_bi
, &rt
, reg
, &i7
)
588 || is_ri7 (insn
, op_bisl
, &rt
, reg
, &i7
)
589 || is_ri7 (insn
, op_biz
, &rt
, reg
, &i7
)
590 || is_ri7 (insn
, op_binz
, &rt
, reg
, &i7
)
591 || is_ri7 (insn
, op_bihz
, &rt
, reg
, &i7
)
592 || is_ri7 (insn
, op_bihnz
, &rt
, reg
, &i7
))
602 /* Prolog parsing. */
604 struct spu_prologue_data
606 /* Stack frame size. -1 if analysis was unsuccessful. */
609 /* How to find the CFA. The CFA is equal to SP at function entry. */
613 /* Offset relative to CFA where a register is saved. -1 if invalid. */
614 int reg_offset
[SPU_NUM_GPRS
];
618 spu_analyze_prologue (struct gdbarch
*gdbarch
,
619 CORE_ADDR start_pc
, CORE_ADDR end_pc
,
620 struct spu_prologue_data
*data
)
622 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
627 int reg_immed
[SPU_NUM_GPRS
];
629 CORE_ADDR prolog_pc
= start_pc
;
634 /* Initialize DATA to default values. */
637 data
->cfa_reg
= SPU_RAW_SP_REGNUM
;
638 data
->cfa_offset
= 0;
640 for (i
= 0; i
< SPU_NUM_GPRS
; i
++)
641 data
->reg_offset
[i
] = -1;
643 /* Set up REG_IMMED array. This is non-zero for a register if we know its
644 preferred slot currently holds this immediate value. */
645 for (i
= 0; i
< SPU_NUM_GPRS
; i
++)
648 /* Scan instructions until the first branch.
650 The following instructions are important prolog components:
652 - The first instruction to set up the stack pointer.
653 - The first instruction to set up the frame pointer.
654 - The first instruction to save the link register.
655 - The first instruction to save the backchain.
657 We return the instruction after the latest of these four,
658 or the incoming PC if none is found. The first instruction
659 to set up the stack pointer also defines the frame size.
661 Note that instructions saving incoming arguments to their stack
662 slots are not counted as important, because they are hard to
663 identify with certainty. This should not matter much, because
664 arguments are relevant only in code compiled with debug data,
665 and in such code the GDB core will advance until the first source
666 line anyway, using SAL data.
668 For purposes of stack unwinding, we analyze the following types
669 of instructions in addition:
671 - Any instruction adding to the current frame pointer.
672 - Any instruction loading an immediate constant into a register.
673 - Any instruction storing a register onto the stack.
675 These are used to compute the CFA and REG_OFFSET output. */
677 for (pc
= start_pc
; pc
< end_pc
; pc
+= 4)
680 int rt
, ra
, rb
, rc
, immed
;
682 if (target_read_memory (pc
, buf
, 4))
684 insn
= extract_unsigned_integer (buf
, 4, byte_order
);
686 /* AI is the typical instruction to set up a stack frame.
687 It is also used to initialize the frame pointer. */
688 if (is_ri10 (insn
, op_ai
, &rt
, &ra
, &immed
))
690 if (rt
== data
->cfa_reg
&& ra
== data
->cfa_reg
)
691 data
->cfa_offset
-= immed
;
693 if (rt
== SPU_RAW_SP_REGNUM
&& ra
== SPU_RAW_SP_REGNUM
701 else if (rt
== SPU_FP_REGNUM
&& ra
== SPU_RAW_SP_REGNUM
707 data
->cfa_reg
= SPU_FP_REGNUM
;
708 data
->cfa_offset
-= immed
;
712 /* A is used to set up stack frames of size >= 512 bytes.
713 If we have tracked the contents of the addend register,
714 we can handle this as well. */
715 else if (is_rr (insn
, op_a
, &rt
, &ra
, &rb
))
717 if (rt
== data
->cfa_reg
&& ra
== data
->cfa_reg
)
719 if (reg_immed
[rb
] != 0)
720 data
->cfa_offset
-= reg_immed
[rb
];
722 data
->cfa_reg
= -1; /* We don't know the CFA any more. */
725 if (rt
== SPU_RAW_SP_REGNUM
&& ra
== SPU_RAW_SP_REGNUM
731 if (reg_immed
[rb
] != 0)
732 data
->size
= -reg_immed
[rb
];
736 /* We need to track IL and ILA used to load immediate constants
737 in case they are later used as input to an A instruction. */
738 else if (is_ri16 (insn
, op_il
, &rt
, &immed
))
740 reg_immed
[rt
] = immed
;
742 if (rt
== SPU_RAW_SP_REGNUM
&& !found_sp
)
746 else if (is_ri18 (insn
, op_ila
, &rt
, &immed
))
748 reg_immed
[rt
] = immed
& 0x3ffff;
750 if (rt
== SPU_RAW_SP_REGNUM
&& !found_sp
)
754 /* STQD is used to save registers to the stack. */
755 else if (is_ri10 (insn
, op_stqd
, &rt
, &ra
, &immed
))
757 if (ra
== data
->cfa_reg
)
758 data
->reg_offset
[rt
] = data
->cfa_offset
- (immed
<< 4);
760 if (ra
== data
->cfa_reg
&& rt
== SPU_LR_REGNUM
767 if (ra
== SPU_RAW_SP_REGNUM
768 && (found_sp
? immed
== 0 : rt
== SPU_RAW_SP_REGNUM
)
776 /* _start uses SELB to set up the stack pointer. */
777 else if (is_rrr (insn
, op_selb
, &rt
, &ra
, &rb
, &rc
))
779 if (rt
== SPU_RAW_SP_REGNUM
&& !found_sp
)
783 /* We terminate if we find a branch. */
784 else if (is_branch (insn
, &immed
, &ra
))
789 /* If we successfully parsed until here, and didn't find any instruction
790 modifying SP, we assume we have a frameless function. */
794 /* Return cooked instead of raw SP. */
795 if (data
->cfa_reg
== SPU_RAW_SP_REGNUM
)
796 data
->cfa_reg
= SPU_SP_REGNUM
;
801 /* Return the first instruction after the prologue starting at PC. */
803 spu_skip_prologue (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
805 struct spu_prologue_data data
;
806 return spu_analyze_prologue (gdbarch
, pc
, (CORE_ADDR
)-1, &data
);
809 /* Return the frame pointer in use at address PC. */
811 spu_virtual_frame_pointer (struct gdbarch
*gdbarch
, CORE_ADDR pc
,
812 int *reg
, LONGEST
*offset
)
814 struct spu_prologue_data data
;
815 spu_analyze_prologue (gdbarch
, pc
, (CORE_ADDR
)-1, &data
);
817 if (data
.size
!= -1 && data
.cfa_reg
!= -1)
819 /* The 'frame pointer' address is CFA minus frame size. */
821 *offset
= data
.cfa_offset
- data
.size
;
825 /* ??? We don't really know ... */
826 *reg
= SPU_SP_REGNUM
;
831 /* Return true if we are in the function's epilogue, i.e. after the
832 instruction that destroyed the function's stack frame.
834 1) scan forward from the point of execution:
835 a) If you find an instruction that modifies the stack pointer
836 or transfers control (except a return), execution is not in
838 b) Stop scanning if you find a return instruction or reach the
839 end of the function or reach the hard limit for the size of
841 2) scan backward from the point of execution:
842 a) If you find an instruction that modifies the stack pointer,
843 execution *is* in an epilogue, return.
844 b) Stop scanning if you reach an instruction that transfers
845 control or the beginning of the function or reach the hard
846 limit for the size of an epilogue. */
849 spu_in_function_epilogue_p (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
851 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
852 CORE_ADDR scan_pc
, func_start
, func_end
, epilogue_start
, epilogue_end
;
855 int rt
, ra
, rb
, immed
;
857 /* Find the search limits based on function boundaries and hard limit.
858 We assume the epilogue can be up to 64 instructions long. */
860 const int spu_max_epilogue_size
= 64 * 4;
862 if (!find_pc_partial_function (pc
, NULL
, &func_start
, &func_end
))
865 if (pc
- func_start
< spu_max_epilogue_size
)
866 epilogue_start
= func_start
;
868 epilogue_start
= pc
- spu_max_epilogue_size
;
870 if (func_end
- pc
< spu_max_epilogue_size
)
871 epilogue_end
= func_end
;
873 epilogue_end
= pc
+ spu_max_epilogue_size
;
875 /* Scan forward until next 'bi $0'. */
877 for (scan_pc
= pc
; scan_pc
< epilogue_end
; scan_pc
+= 4)
879 if (target_read_memory (scan_pc
, buf
, 4))
881 insn
= extract_unsigned_integer (buf
, 4, byte_order
);
883 if (is_branch (insn
, &immed
, &ra
))
885 if (immed
== 0 && ra
== SPU_LR_REGNUM
)
891 if (is_ri10 (insn
, op_ai
, &rt
, &ra
, &immed
)
892 || is_rr (insn
, op_a
, &rt
, &ra
, &rb
)
893 || is_ri10 (insn
, op_lqd
, &rt
, &ra
, &immed
))
895 if (rt
== SPU_RAW_SP_REGNUM
)
900 if (scan_pc
>= epilogue_end
)
903 /* Scan backward until adjustment to stack pointer (R1). */
905 for (scan_pc
= pc
- 4; scan_pc
>= epilogue_start
; scan_pc
-= 4)
907 if (target_read_memory (scan_pc
, buf
, 4))
909 insn
= extract_unsigned_integer (buf
, 4, byte_order
);
911 if (is_branch (insn
, &immed
, &ra
))
914 if (is_ri10 (insn
, op_ai
, &rt
, &ra
, &immed
)
915 || is_rr (insn
, op_a
, &rt
, &ra
, &rb
)
916 || is_ri10 (insn
, op_lqd
, &rt
, &ra
, &immed
))
918 if (rt
== SPU_RAW_SP_REGNUM
)
927 /* Normal stack frames. */
929 struct spu_unwind_cache
932 CORE_ADDR frame_base
;
933 CORE_ADDR local_base
;
935 struct trad_frame_saved_reg
*saved_regs
;
938 static struct spu_unwind_cache
*
939 spu_frame_unwind_cache (struct frame_info
*this_frame
,
940 void **this_prologue_cache
)
942 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
943 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
944 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
945 struct spu_unwind_cache
*info
;
946 struct spu_prologue_data data
;
947 CORE_ADDR id
= tdep
->id
;
950 if (*this_prologue_cache
)
951 return *this_prologue_cache
;
953 info
= FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache
);
954 *this_prologue_cache
= info
;
955 info
->saved_regs
= trad_frame_alloc_saved_regs (this_frame
);
956 info
->frame_base
= 0;
957 info
->local_base
= 0;
959 /* Find the start of the current function, and analyze its prologue. */
960 info
->func
= get_frame_func (this_frame
);
963 /* Fall back to using the current PC as frame ID. */
964 info
->func
= get_frame_pc (this_frame
);
968 spu_analyze_prologue (gdbarch
, info
->func
, get_frame_pc (this_frame
),
971 /* If successful, use prologue analysis data. */
972 if (data
.size
!= -1 && data
.cfa_reg
!= -1)
977 /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */
978 get_frame_register (this_frame
, data
.cfa_reg
, buf
);
979 cfa
= extract_unsigned_integer (buf
, 4, byte_order
) + data
.cfa_offset
;
980 cfa
= SPUADDR (id
, cfa
);
982 /* Call-saved register slots. */
983 for (i
= 0; i
< SPU_NUM_GPRS
; i
++)
984 if (i
== SPU_LR_REGNUM
985 || (i
>= SPU_SAVED1_REGNUM
&& i
<= SPU_SAVEDN_REGNUM
))
986 if (data
.reg_offset
[i
] != -1)
987 info
->saved_regs
[i
].addr
= cfa
- data
.reg_offset
[i
];
990 info
->frame_base
= cfa
;
991 info
->local_base
= cfa
- data
.size
;
994 /* Otherwise, fall back to reading the backchain link. */
1002 /* Get local store limit. */
1003 lslr
= get_frame_register_unsigned (this_frame
, SPU_LSLR_REGNUM
);
1005 lslr
= (ULONGEST
) -1;
1007 /* Get the backchain. */
1008 reg
= get_frame_register_unsigned (this_frame
, SPU_SP_REGNUM
);
1009 status
= safe_read_memory_integer (SPUADDR (id
, reg
), 4, byte_order
,
1012 /* A zero backchain terminates the frame chain. Also, sanity
1013 check against the local store size limit. */
1014 if (status
&& backchain
> 0 && backchain
<= lslr
)
1016 /* Assume the link register is saved into its slot. */
1017 if (backchain
+ 16 <= lslr
)
1018 info
->saved_regs
[SPU_LR_REGNUM
].addr
= SPUADDR (id
,
1022 info
->frame_base
= SPUADDR (id
, backchain
);
1023 info
->local_base
= SPUADDR (id
, reg
);
1027 /* If we didn't find a frame, we cannot determine SP / return address. */
1028 if (info
->frame_base
== 0)
1031 /* The previous SP is equal to the CFA. */
1032 trad_frame_set_value (info
->saved_regs
, SPU_SP_REGNUM
,
1033 SPUADDR_ADDR (info
->frame_base
));
1035 /* Read full contents of the unwound link register in order to
1036 be able to determine the return address. */
1037 if (trad_frame_addr_p (info
->saved_regs
, SPU_LR_REGNUM
))
1038 target_read_memory (info
->saved_regs
[SPU_LR_REGNUM
].addr
, buf
, 16);
1040 get_frame_register (this_frame
, SPU_LR_REGNUM
, buf
);
1042 /* Normally, the return address is contained in the slot 0 of the
1043 link register, and slots 1-3 are zero. For an overlay return,
1044 slot 0 contains the address of the overlay manager return stub,
1045 slot 1 contains the partition number of the overlay section to
1046 be returned to, and slot 2 contains the return address within
1047 that section. Return the latter address in that case. */
1048 if (extract_unsigned_integer (buf
+ 8, 4, byte_order
) != 0)
1049 trad_frame_set_value (info
->saved_regs
, SPU_PC_REGNUM
,
1050 extract_unsigned_integer (buf
+ 8, 4, byte_order
));
1052 trad_frame_set_value (info
->saved_regs
, SPU_PC_REGNUM
,
1053 extract_unsigned_integer (buf
, 4, byte_order
));
1059 spu_frame_this_id (struct frame_info
*this_frame
,
1060 void **this_prologue_cache
, struct frame_id
*this_id
)
1062 struct spu_unwind_cache
*info
=
1063 spu_frame_unwind_cache (this_frame
, this_prologue_cache
);
1065 if (info
->frame_base
== 0)
1068 *this_id
= frame_id_build (info
->frame_base
, info
->func
);
1071 static struct value
*
1072 spu_frame_prev_register (struct frame_info
*this_frame
,
1073 void **this_prologue_cache
, int regnum
)
1075 struct spu_unwind_cache
*info
1076 = spu_frame_unwind_cache (this_frame
, this_prologue_cache
);
1078 /* Special-case the stack pointer. */
1079 if (regnum
== SPU_RAW_SP_REGNUM
)
1080 regnum
= SPU_SP_REGNUM
;
1082 return trad_frame_get_prev_register (this_frame
, info
->saved_regs
, regnum
);
1085 static const struct frame_unwind spu_frame_unwind
= {
1087 default_frame_unwind_stop_reason
,
1089 spu_frame_prev_register
,
1091 default_frame_sniffer
1095 spu_frame_base_address (struct frame_info
*this_frame
, void **this_cache
)
1097 struct spu_unwind_cache
*info
1098 = spu_frame_unwind_cache (this_frame
, this_cache
);
1099 return info
->local_base
;
1102 static const struct frame_base spu_frame_base
= {
1104 spu_frame_base_address
,
1105 spu_frame_base_address
,
1106 spu_frame_base_address
1110 spu_unwind_pc (struct gdbarch
*gdbarch
, struct frame_info
*next_frame
)
1112 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1113 CORE_ADDR pc
= frame_unwind_register_unsigned (next_frame
, SPU_PC_REGNUM
);
1114 /* Mask off interrupt enable bit. */
1115 return SPUADDR (tdep
->id
, pc
& -4);
1119 spu_unwind_sp (struct gdbarch
*gdbarch
, struct frame_info
*next_frame
)
1121 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1122 CORE_ADDR sp
= frame_unwind_register_unsigned (next_frame
, SPU_SP_REGNUM
);
1123 return SPUADDR (tdep
->id
, sp
);
1127 spu_read_pc (struct regcache
*regcache
)
1129 struct gdbarch_tdep
*tdep
= gdbarch_tdep (get_regcache_arch (regcache
));
1131 regcache_cooked_read_unsigned (regcache
, SPU_PC_REGNUM
, &pc
);
1132 /* Mask off interrupt enable bit. */
1133 return SPUADDR (tdep
->id
, pc
& -4);
1137 spu_write_pc (struct regcache
*regcache
, CORE_ADDR pc
)
1139 /* Keep interrupt enabled state unchanged. */
1142 regcache_cooked_read_unsigned (regcache
, SPU_PC_REGNUM
, &old_pc
);
1143 regcache_cooked_write_unsigned (regcache
, SPU_PC_REGNUM
,
1144 (SPUADDR_ADDR (pc
) & -4) | (old_pc
& 3));
1148 /* Cell/B.E. cross-architecture unwinder support. */
1150 struct spu2ppu_cache
1152 struct frame_id frame_id
;
1153 struct regcache
*regcache
;
1156 static struct gdbarch
*
1157 spu2ppu_prev_arch (struct frame_info
*this_frame
, void **this_cache
)
1159 struct spu2ppu_cache
*cache
= *this_cache
;
1160 return get_regcache_arch (cache
->regcache
);
1164 spu2ppu_this_id (struct frame_info
*this_frame
,
1165 void **this_cache
, struct frame_id
*this_id
)
1167 struct spu2ppu_cache
*cache
= *this_cache
;
1168 *this_id
= cache
->frame_id
;
1171 static struct value
*
1172 spu2ppu_prev_register (struct frame_info
*this_frame
,
1173 void **this_cache
, int regnum
)
1175 struct spu2ppu_cache
*cache
= *this_cache
;
1176 struct gdbarch
*gdbarch
= get_regcache_arch (cache
->regcache
);
1179 buf
= alloca (register_size (gdbarch
, regnum
));
1180 regcache_cooked_read (cache
->regcache
, regnum
, buf
);
1181 return frame_unwind_got_bytes (this_frame
, regnum
, buf
);
1185 spu2ppu_sniffer (const struct frame_unwind
*self
,
1186 struct frame_info
*this_frame
, void **this_prologue_cache
)
1188 struct gdbarch
*gdbarch
= get_frame_arch (this_frame
);
1189 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1190 CORE_ADDR base
, func
, backchain
;
1193 if (gdbarch_bfd_arch_info (target_gdbarch ())->arch
== bfd_arch_spu
)
1196 base
= get_frame_sp (this_frame
);
1197 func
= get_frame_pc (this_frame
);
1198 if (target_read_memory (base
, buf
, 4))
1200 backchain
= extract_unsigned_integer (buf
, 4, byte_order
);
1204 struct frame_info
*fi
;
1206 struct spu2ppu_cache
*cache
1207 = FRAME_OBSTACK_CALLOC (1, struct spu2ppu_cache
);
1209 cache
->frame_id
= frame_id_build (base
+ 16, func
);
1211 for (fi
= get_next_frame (this_frame
); fi
; fi
= get_next_frame (fi
))
1212 if (gdbarch_bfd_arch_info (get_frame_arch (fi
))->arch
!= bfd_arch_spu
)
1217 cache
->regcache
= frame_save_as_regcache (fi
);
1218 *this_prologue_cache
= cache
;
1223 struct regcache
*regcache
;
1224 regcache
= get_thread_arch_regcache (inferior_ptid
, target_gdbarch ());
1225 cache
->regcache
= regcache_dup (regcache
);
1226 *this_prologue_cache
= cache
;
1235 spu2ppu_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1237 struct spu2ppu_cache
*cache
= this_cache
;
1238 regcache_xfree (cache
->regcache
);
1241 static const struct frame_unwind spu2ppu_unwind
= {
1243 default_frame_unwind_stop_reason
,
1245 spu2ppu_prev_register
,
1248 spu2ppu_dealloc_cache
,
1253 /* Function calling convention. */
1256 spu_frame_align (struct gdbarch
*gdbarch
, CORE_ADDR sp
)
1262 spu_push_dummy_code (struct gdbarch
*gdbarch
, CORE_ADDR sp
, CORE_ADDR funaddr
,
1263 struct value
**args
, int nargs
, struct type
*value_type
,
1264 CORE_ADDR
*real_pc
, CORE_ADDR
*bp_addr
,
1265 struct regcache
*regcache
)
1267 /* Allocate space sufficient for a breakpoint, keeping the stack aligned. */
1268 sp
= (sp
- 4) & ~15;
1269 /* Store the address of that breakpoint */
1271 /* The call starts at the callee's entry point. */
1278 spu_scalar_value_p (struct type
*type
)
1280 switch (TYPE_CODE (type
))
1283 case TYPE_CODE_ENUM
:
1284 case TYPE_CODE_RANGE
:
1285 case TYPE_CODE_CHAR
:
1286 case TYPE_CODE_BOOL
:
1289 return TYPE_LENGTH (type
) <= 16;
1297 spu_value_to_regcache (struct regcache
*regcache
, int regnum
,
1298 struct type
*type
, const gdb_byte
*in
)
1300 int len
= TYPE_LENGTH (type
);
1302 if (spu_scalar_value_p (type
))
1304 int preferred_slot
= len
< 4 ? 4 - len
: 0;
1305 regcache_cooked_write_part (regcache
, regnum
, preferred_slot
, len
, in
);
1311 regcache_cooked_write (regcache
, regnum
++, in
);
1317 regcache_cooked_write_part (regcache
, regnum
, 0, len
, in
);
1322 spu_regcache_to_value (struct regcache
*regcache
, int regnum
,
1323 struct type
*type
, gdb_byte
*out
)
1325 int len
= TYPE_LENGTH (type
);
1327 if (spu_scalar_value_p (type
))
1329 int preferred_slot
= len
< 4 ? 4 - len
: 0;
1330 regcache_cooked_read_part (regcache
, regnum
, preferred_slot
, len
, out
);
1336 regcache_cooked_read (regcache
, regnum
++, out
);
1342 regcache_cooked_read_part (regcache
, regnum
, 0, len
, out
);
1347 spu_push_dummy_call (struct gdbarch
*gdbarch
, struct value
*function
,
1348 struct regcache
*regcache
, CORE_ADDR bp_addr
,
1349 int nargs
, struct value
**args
, CORE_ADDR sp
,
1350 int struct_return
, CORE_ADDR struct_addr
)
1352 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1355 int regnum
= SPU_ARG1_REGNUM
;
1359 /* Set the return address. */
1360 memset (buf
, 0, sizeof buf
);
1361 store_unsigned_integer (buf
, 4, byte_order
, SPUADDR_ADDR (bp_addr
));
1362 regcache_cooked_write (regcache
, SPU_LR_REGNUM
, buf
);
1364 /* If STRUCT_RETURN is true, then the struct return address (in
1365 STRUCT_ADDR) will consume the first argument-passing register.
1366 Both adjust the register count and store that value. */
1369 memset (buf
, 0, sizeof buf
);
1370 store_unsigned_integer (buf
, 4, byte_order
, SPUADDR_ADDR (struct_addr
));
1371 regcache_cooked_write (regcache
, regnum
++, buf
);
1374 /* Fill in argument registers. */
1375 for (i
= 0; i
< nargs
; i
++)
1377 struct value
*arg
= args
[i
];
1378 struct type
*type
= check_typedef (value_type (arg
));
1379 const gdb_byte
*contents
= value_contents (arg
);
1380 int n_regs
= align_up (TYPE_LENGTH (type
), 16) / 16;
1382 /* If the argument doesn't wholly fit into registers, it and
1383 all subsequent arguments go to the stack. */
1384 if (regnum
+ n_regs
- 1 > SPU_ARGN_REGNUM
)
1390 spu_value_to_regcache (regcache
, regnum
, type
, contents
);
1394 /* Overflow arguments go to the stack. */
1395 if (stack_arg
!= -1)
1399 /* Allocate all required stack size. */
1400 for (i
= stack_arg
; i
< nargs
; i
++)
1402 struct type
*type
= check_typedef (value_type (args
[i
]));
1403 sp
-= align_up (TYPE_LENGTH (type
), 16);
1406 /* Fill in stack arguments. */
1408 for (i
= stack_arg
; i
< nargs
; i
++)
1410 struct value
*arg
= args
[i
];
1411 struct type
*type
= check_typedef (value_type (arg
));
1412 int len
= TYPE_LENGTH (type
);
1415 if (spu_scalar_value_p (type
))
1416 preferred_slot
= len
< 4 ? 4 - len
: 0;
1420 target_write_memory (ap
+ preferred_slot
, value_contents (arg
), len
);
1421 ap
+= align_up (TYPE_LENGTH (type
), 16);
1425 /* Allocate stack frame header. */
1428 /* Store stack back chain. */
1429 regcache_cooked_read (regcache
, SPU_RAW_SP_REGNUM
, buf
);
1430 target_write_memory (sp
, buf
, 16);
1432 /* Finally, update all slots of the SP register. */
1433 sp_delta
= sp
- extract_unsigned_integer (buf
, 4, byte_order
);
1434 for (i
= 0; i
< 4; i
++)
1436 CORE_ADDR sp_slot
= extract_unsigned_integer (buf
+ 4*i
, 4, byte_order
);
1437 store_unsigned_integer (buf
+ 4*i
, 4, byte_order
, sp_slot
+ sp_delta
);
1439 regcache_cooked_write (regcache
, SPU_RAW_SP_REGNUM
, buf
);
1444 static struct frame_id
1445 spu_dummy_id (struct gdbarch
*gdbarch
, struct frame_info
*this_frame
)
1447 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1448 CORE_ADDR pc
= get_frame_register_unsigned (this_frame
, SPU_PC_REGNUM
);
1449 CORE_ADDR sp
= get_frame_register_unsigned (this_frame
, SPU_SP_REGNUM
);
1450 return frame_id_build (SPUADDR (tdep
->id
, sp
), SPUADDR (tdep
->id
, pc
& -4));
1453 /* Function return value access. */
1455 static enum return_value_convention
1456 spu_return_value (struct gdbarch
*gdbarch
, struct value
*function
,
1457 struct type
*type
, struct regcache
*regcache
,
1458 gdb_byte
*out
, const gdb_byte
*in
)
1460 struct type
*func_type
= function
? value_type (function
) : NULL
;
1461 enum return_value_convention rvc
;
1462 int opencl_vector
= 0;
1466 func_type
= check_typedef (func_type
);
1468 if (TYPE_CODE (func_type
) == TYPE_CODE_PTR
)
1469 func_type
= check_typedef (TYPE_TARGET_TYPE (func_type
));
1471 if (TYPE_CODE (func_type
) == TYPE_CODE_FUNC
1472 && TYPE_CALLING_CONVENTION (func_type
) == DW_CC_GDB_IBM_OpenCL
1473 && TYPE_CODE (type
) == TYPE_CODE_ARRAY
1474 && TYPE_VECTOR (type
))
1478 if (TYPE_LENGTH (type
) <= (SPU_ARGN_REGNUM
- SPU_ARG1_REGNUM
+ 1) * 16)
1479 rvc
= RETURN_VALUE_REGISTER_CONVENTION
;
1481 rvc
= RETURN_VALUE_STRUCT_CONVENTION
;
1487 case RETURN_VALUE_REGISTER_CONVENTION
:
1488 if (opencl_vector
&& TYPE_LENGTH (type
) == 2)
1489 regcache_cooked_write_part (regcache
, SPU_ARG1_REGNUM
, 2, 2, in
);
1491 spu_value_to_regcache (regcache
, SPU_ARG1_REGNUM
, type
, in
);
1494 case RETURN_VALUE_STRUCT_CONVENTION
:
1495 error (_("Cannot set function return value."));
1503 case RETURN_VALUE_REGISTER_CONVENTION
:
1504 if (opencl_vector
&& TYPE_LENGTH (type
) == 2)
1505 regcache_cooked_read_part (regcache
, SPU_ARG1_REGNUM
, 2, 2, out
);
1507 spu_regcache_to_value (regcache
, SPU_ARG1_REGNUM
, type
, out
);
1510 case RETURN_VALUE_STRUCT_CONVENTION
:
1511 error (_("Function return value unknown."));
1522 static const gdb_byte
*
1523 spu_breakpoint_from_pc (struct gdbarch
*gdbarch
,
1524 CORE_ADDR
* pcptr
, int *lenptr
)
1526 static const gdb_byte breakpoint
[] = { 0x00, 0x00, 0x3f, 0xff };
1528 *lenptr
= sizeof breakpoint
;
1533 spu_memory_remove_breakpoint (struct gdbarch
*gdbarch
,
1534 struct bp_target_info
*bp_tgt
)
1536 /* We work around a problem in combined Cell/B.E. debugging here. Consider
1537 that in a combined application, we have some breakpoints inserted in SPU
1538 code, and now the application forks (on the PPU side). GDB common code
1539 will assume that the fork system call copied all breakpoints into the new
1540 process' address space, and that all those copies now need to be removed
1541 (see breakpoint.c:detach_breakpoints).
1543 While this is certainly true for PPU side breakpoints, it is not true
1544 for SPU side breakpoints. fork will clone the SPU context file
1545 descriptors, so that all the existing SPU contexts are in accessible
1546 in the new process. However, the contents of the SPU contexts themselves
1547 are *not* cloned. Therefore the effect of detach_breakpoints is to
1548 remove SPU breakpoints from the *original* SPU context's local store
1549 -- this is not the correct behaviour.
1551 The workaround is to check whether the PID we are asked to remove this
1552 breakpoint from (i.e. ptid_get_pid (inferior_ptid)) is different from the
1553 PID of the current inferior (i.e. current_inferior ()->pid). This is only
1554 true in the context of detach_breakpoints. If so, we simply do nothing.
1555 [ Note that for the fork child process, it does not matter if breakpoints
1556 remain inserted, because those SPU contexts are not runnable anyway --
1557 the Linux kernel allows only the original process to invoke spu_run. */
1559 if (ptid_get_pid (inferior_ptid
) != current_inferior ()->pid
)
1562 return default_memory_remove_breakpoint (gdbarch
, bp_tgt
);
1566 /* Software single-stepping support. */
1569 spu_software_single_step (struct frame_info
*frame
)
1571 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
1572 struct address_space
*aspace
= get_frame_address_space (frame
);
1573 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1574 CORE_ADDR pc
, next_pc
;
1580 pc
= get_frame_pc (frame
);
1582 if (target_read_memory (pc
, buf
, 4))
1584 insn
= extract_unsigned_integer (buf
, 4, byte_order
);
1586 /* Get local store limit. */
1587 lslr
= get_frame_register_unsigned (frame
, SPU_LSLR_REGNUM
);
1589 lslr
= (ULONGEST
) -1;
1591 /* Next sequential instruction is at PC + 4, except if the current
1592 instruction is a PPE-assisted call, in which case it is at PC + 8.
1593 Wrap around LS limit to be on the safe side. */
1594 if ((insn
& 0xffffff00) == 0x00002100)
1595 next_pc
= (SPUADDR_ADDR (pc
) + 8) & lslr
;
1597 next_pc
= (SPUADDR_ADDR (pc
) + 4) & lslr
;
1599 insert_single_step_breakpoint (gdbarch
,
1600 aspace
, SPUADDR (SPUADDR_SPU (pc
), next_pc
));
1602 if (is_branch (insn
, &offset
, ®
))
1604 CORE_ADDR target
= offset
;
1606 if (reg
== SPU_PC_REGNUM
)
1607 target
+= SPUADDR_ADDR (pc
);
1612 if (get_frame_register_bytes (frame
, reg
, 0, 4, buf
,
1614 target
+= extract_unsigned_integer (buf
, 4, byte_order
) & -4;
1618 throw_error (OPTIMIZED_OUT_ERROR
,
1619 _("Could not determine address of "
1620 "single-step breakpoint."));
1622 throw_error (NOT_AVAILABLE_ERROR
,
1623 _("Could not determine address of "
1624 "single-step breakpoint."));
1628 target
= target
& lslr
;
1629 if (target
!= next_pc
)
1630 insert_single_step_breakpoint (gdbarch
, aspace
,
1631 SPUADDR (SPUADDR_SPU (pc
), target
));
1638 /* Longjmp support. */
1641 spu_get_longjmp_target (struct frame_info
*frame
, CORE_ADDR
*pc
)
1643 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
1644 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1645 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
1650 /* Jump buffer is pointed to by the argument register $r3. */
1651 if (!get_frame_register_bytes (frame
, SPU_ARG1_REGNUM
, 0, 4, buf
,
1655 jb_addr
= extract_unsigned_integer (buf
, 4, byte_order
);
1656 if (target_read_memory (SPUADDR (tdep
->id
, jb_addr
), buf
, 4))
1659 *pc
= extract_unsigned_integer (buf
, 4, byte_order
);
1660 *pc
= SPUADDR (tdep
->id
, *pc
);
1667 struct spu_dis_asm_data
1669 struct gdbarch
*gdbarch
;
1674 spu_dis_asm_print_address (bfd_vma addr
, struct disassemble_info
*info
)
1676 struct spu_dis_asm_data
*data
= info
->application_data
;
1677 print_address (data
->gdbarch
, SPUADDR (data
->id
, addr
), info
->stream
);
1681 gdb_print_insn_spu (bfd_vma memaddr
, struct disassemble_info
*info
)
1683 /* The opcodes disassembler does 18-bit address arithmetic. Make
1684 sure the SPU ID encoded in the high bits is added back when we
1685 call print_address. */
1686 struct disassemble_info spu_info
= *info
;
1687 struct spu_dis_asm_data data
;
1688 data
.gdbarch
= info
->application_data
;
1689 data
.id
= SPUADDR_SPU (memaddr
);
1691 spu_info
.application_data
= &data
;
1692 spu_info
.print_address_func
= spu_dis_asm_print_address
;
1693 return print_insn_spu (memaddr
, &spu_info
);
1697 /* Target overlays for the SPU overlay manager.
1699 See the documentation of simple_overlay_update for how the
1700 interface is supposed to work.
1702 Data structures used by the overlay manager:
1710 } _ovly_table[]; -- one entry per overlay section
1712 struct ovly_buf_table
1715 } _ovly_buf_table[]; -- one entry per overlay buffer
1717 _ovly_table should never change.
1719 Both tables are aligned to a 16-byte boundary, the symbols
1720 _ovly_table and _ovly_buf_table are of type STT_OBJECT and their
1721 size set to the size of the respective array. buf in _ovly_table is
1722 an index into _ovly_buf_table.
1724 mapped is an index into _ovly_table. Both the mapped and buf indices start
1725 from one to reference the first entry in their respective tables. */
1727 /* Using the per-objfile private data mechanism, we store for each
1728 objfile an array of "struct spu_overlay_table" structures, one
1729 for each obj_section of the objfile. This structure holds two
1730 fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this
1731 is *not* an overlay section. If it is non-zero, it represents
1732 a target address. The overlay section is mapped iff the target
1733 integer at this location equals MAPPED_VAL. */
1735 static const struct objfile_data
*spu_overlay_data
;
1737 struct spu_overlay_table
1739 CORE_ADDR mapped_ptr
;
1740 CORE_ADDR mapped_val
;
1743 /* Retrieve the overlay table for OBJFILE. If not already cached, read
1744 the _ovly_table data structure from the target and initialize the
1745 spu_overlay_table data structure from it. */
1746 static struct spu_overlay_table
*
1747 spu_get_overlay_table (struct objfile
*objfile
)
1749 enum bfd_endian byte_order
= bfd_big_endian (objfile
->obfd
)?
1750 BFD_ENDIAN_BIG
: BFD_ENDIAN_LITTLE
;
1751 struct bound_minimal_symbol ovly_table_msym
, ovly_buf_table_msym
;
1752 CORE_ADDR ovly_table_base
, ovly_buf_table_base
;
1753 unsigned ovly_table_size
, ovly_buf_table_size
;
1754 struct spu_overlay_table
*tbl
;
1755 struct obj_section
*osect
;
1756 gdb_byte
*ovly_table
;
1759 tbl
= objfile_data (objfile
, spu_overlay_data
);
1763 ovly_table_msym
= lookup_minimal_symbol ("_ovly_table", NULL
, objfile
);
1764 if (!ovly_table_msym
.minsym
)
1767 ovly_buf_table_msym
= lookup_minimal_symbol ("_ovly_buf_table",
1769 if (!ovly_buf_table_msym
.minsym
)
1772 ovly_table_base
= BMSYMBOL_VALUE_ADDRESS (ovly_table_msym
);
1773 ovly_table_size
= MSYMBOL_SIZE (ovly_table_msym
.minsym
);
1775 ovly_buf_table_base
= BMSYMBOL_VALUE_ADDRESS (ovly_buf_table_msym
);
1776 ovly_buf_table_size
= MSYMBOL_SIZE (ovly_buf_table_msym
.minsym
);
1778 ovly_table
= xmalloc (ovly_table_size
);
1779 read_memory (ovly_table_base
, ovly_table
, ovly_table_size
);
1781 tbl
= OBSTACK_CALLOC (&objfile
->objfile_obstack
,
1782 objfile
->sections_end
- objfile
->sections
,
1783 struct spu_overlay_table
);
1785 for (i
= 0; i
< ovly_table_size
/ 16; i
++)
1787 CORE_ADDR vma
= extract_unsigned_integer (ovly_table
+ 16*i
+ 0,
1789 CORE_ADDR size
= extract_unsigned_integer (ovly_table
+ 16*i
+ 4,
1791 CORE_ADDR pos
= extract_unsigned_integer (ovly_table
+ 16*i
+ 8,
1793 CORE_ADDR buf
= extract_unsigned_integer (ovly_table
+ 16*i
+ 12,
1796 if (buf
== 0 || (buf
- 1) * 4 >= ovly_buf_table_size
)
1799 ALL_OBJFILE_OSECTIONS (objfile
, osect
)
1800 if (vma
== bfd_section_vma (objfile
->obfd
, osect
->the_bfd_section
)
1801 && pos
== osect
->the_bfd_section
->filepos
)
1803 int ndx
= osect
- objfile
->sections
;
1804 tbl
[ndx
].mapped_ptr
= ovly_buf_table_base
+ (buf
- 1) * 4;
1805 tbl
[ndx
].mapped_val
= i
+ 1;
1811 set_objfile_data (objfile
, spu_overlay_data
, tbl
);
1815 /* Read _ovly_buf_table entry from the target to dermine whether
1816 OSECT is currently mapped, and update the mapped state. */
1818 spu_overlay_update_osect (struct obj_section
*osect
)
1820 enum bfd_endian byte_order
= bfd_big_endian (osect
->objfile
->obfd
)?
1821 BFD_ENDIAN_BIG
: BFD_ENDIAN_LITTLE
;
1822 struct spu_overlay_table
*ovly_table
;
1825 ovly_table
= spu_get_overlay_table (osect
->objfile
);
1829 ovly_table
+= osect
- osect
->objfile
->sections
;
1830 if (ovly_table
->mapped_ptr
== 0)
1833 id
= SPUADDR_SPU (obj_section_addr (osect
));
1834 val
= read_memory_unsigned_integer (SPUADDR (id
, ovly_table
->mapped_ptr
),
1836 osect
->ovly_mapped
= (val
== ovly_table
->mapped_val
);
1839 /* If OSECT is NULL, then update all sections' mapped state.
1840 If OSECT is non-NULL, then update only OSECT's mapped state. */
1842 spu_overlay_update (struct obj_section
*osect
)
1844 /* Just one section. */
1846 spu_overlay_update_osect (osect
);
1851 struct objfile
*objfile
;
1853 ALL_OBJSECTIONS (objfile
, osect
)
1854 if (section_is_overlay (osect
))
1855 spu_overlay_update_osect (osect
);
1859 /* Whenever a new objfile is loaded, read the target's _ovly_table.
1860 If there is one, go through all sections and make sure for non-
1861 overlay sections LMA equals VMA, while for overlay sections LMA
1862 is larger than SPU_OVERLAY_LMA. */
1864 spu_overlay_new_objfile (struct objfile
*objfile
)
1866 struct spu_overlay_table
*ovly_table
;
1867 struct obj_section
*osect
;
1869 /* If we've already touched this file, do nothing. */
1870 if (!objfile
|| objfile_data (objfile
, spu_overlay_data
) != NULL
)
1873 /* Consider only SPU objfiles. */
1874 if (bfd_get_arch (objfile
->obfd
) != bfd_arch_spu
)
1877 /* Check if this objfile has overlays. */
1878 ovly_table
= spu_get_overlay_table (objfile
);
1882 /* Now go and fiddle with all the LMAs. */
1883 ALL_OBJFILE_OSECTIONS (objfile
, osect
)
1885 bfd
*obfd
= objfile
->obfd
;
1886 asection
*bsect
= osect
->the_bfd_section
;
1887 int ndx
= osect
- objfile
->sections
;
1889 if (ovly_table
[ndx
].mapped_ptr
== 0)
1890 bfd_section_lma (obfd
, bsect
) = bfd_section_vma (obfd
, bsect
);
1892 bfd_section_lma (obfd
, bsect
) = SPU_OVERLAY_LMA
+ bsect
->filepos
;
1897 /* Insert temporary breakpoint on "main" function of newly loaded
1898 SPE context OBJFILE. */
1900 spu_catch_start (struct objfile
*objfile
)
1902 struct bound_minimal_symbol minsym
;
1903 struct symtab
*symtab
;
1907 /* Do this only if requested by "set spu stop-on-load on". */
1908 if (!spu_stop_on_load_p
)
1911 /* Consider only SPU objfiles. */
1912 if (!objfile
|| bfd_get_arch (objfile
->obfd
) != bfd_arch_spu
)
1915 /* The main objfile is handled differently. */
1916 if (objfile
== symfile_objfile
)
1919 /* There can be multiple symbols named "main". Search for the
1920 "main" in *this* objfile. */
1921 minsym
= lookup_minimal_symbol ("main", NULL
, objfile
);
1925 /* If we have debugging information, try to use it -- this
1926 will allow us to properly skip the prologue. */
1927 pc
= BMSYMBOL_VALUE_ADDRESS (minsym
);
1928 symtab
= find_pc_sect_symtab (pc
, MSYMBOL_OBJ_SECTION (minsym
.objfile
,
1932 struct blockvector
*bv
= BLOCKVECTOR (symtab
);
1933 struct block
*block
= BLOCKVECTOR_BLOCK (bv
, GLOBAL_BLOCK
);
1935 struct symtab_and_line sal
;
1937 sym
= lookup_block_symbol (block
, "main", VAR_DOMAIN
);
1940 fixup_symbol_section (sym
, objfile
);
1941 sal
= find_function_start_sal (sym
, 1);
1946 /* Use a numerical address for the set_breakpoint command to avoid having
1947 the breakpoint re-set incorrectly. */
1948 xsnprintf (buf
, sizeof buf
, "*%s", core_addr_to_string (pc
));
1949 create_breakpoint (get_objfile_arch (objfile
), buf
/* arg */,
1950 NULL
/* cond_string */, -1 /* thread */,
1951 NULL
/* extra_string */,
1952 0 /* parse_condition_and_thread */, 1 /* tempflag */,
1953 bp_breakpoint
/* type_wanted */,
1954 0 /* ignore_count */,
1955 AUTO_BOOLEAN_FALSE
/* pending_break_support */,
1956 &bkpt_breakpoint_ops
/* ops */, 0 /* from_tty */,
1957 1 /* enabled */, 0 /* internal */, 0);
1961 /* Look up OBJFILE loaded into FRAME's SPU context. */
1962 static struct objfile
*
1963 spu_objfile_from_frame (struct frame_info
*frame
)
1965 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
1966 struct gdbarch_tdep
*tdep
= gdbarch_tdep (gdbarch
);
1967 struct objfile
*obj
;
1969 if (gdbarch_bfd_arch_info (gdbarch
)->arch
!= bfd_arch_spu
)
1974 if (obj
->sections
!= obj
->sections_end
1975 && SPUADDR_SPU (obj_section_addr (obj
->sections
)) == tdep
->id
)
1982 /* Flush cache for ea pointer access if available. */
1984 flush_ea_cache (void)
1986 struct bound_minimal_symbol msymbol
;
1987 struct objfile
*obj
;
1989 if (!has_stack_frames ())
1992 obj
= spu_objfile_from_frame (get_current_frame ());
1996 /* Lookup inferior function __cache_flush. */
1997 msymbol
= lookup_minimal_symbol ("__cache_flush", NULL
, obj
);
1998 if (msymbol
.minsym
!= NULL
)
2003 type
= objfile_type (obj
)->builtin_void
;
2004 type
= lookup_function_type (type
);
2005 type
= lookup_pointer_type (type
);
2006 addr
= BMSYMBOL_VALUE_ADDRESS (msymbol
);
2008 call_function_by_hand (value_from_pointer (type
, addr
), 0, NULL
);
2012 /* This handler is called when the inferior has stopped. If it is stopped in
2013 SPU architecture then flush the ea cache if used. */
2015 spu_attach_normal_stop (struct bpstats
*bs
, int print_frame
)
2017 if (!spu_auto_flush_cache_p
)
2020 /* Temporarily reset spu_auto_flush_cache_p to avoid recursively
2021 re-entering this function when __cache_flush stops. */
2022 spu_auto_flush_cache_p
= 0;
2024 spu_auto_flush_cache_p
= 1;
2028 /* "info spu" commands. */
2031 info_spu_event_command (char *args
, int from_tty
)
2033 struct frame_info
*frame
= get_selected_frame (NULL
);
2034 ULONGEST event_status
= 0;
2035 ULONGEST event_mask
= 0;
2036 struct cleanup
*chain
;
2042 if (gdbarch_bfd_arch_info (get_frame_arch (frame
))->arch
!= bfd_arch_spu
)
2043 error (_("\"info spu\" is only supported on the SPU architecture."));
2045 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
2047 xsnprintf (annex
, sizeof annex
, "%d/event_status", id
);
2048 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2049 buf
, 0, (sizeof (buf
) - 1));
2051 error (_("Could not read event_status."));
2053 event_status
= strtoulst ((char *) buf
, NULL
, 16);
2055 xsnprintf (annex
, sizeof annex
, "%d/event_mask", id
);
2056 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2057 buf
, 0, (sizeof (buf
) - 1));
2059 error (_("Could not read event_mask."));
2061 event_mask
= strtoulst ((char *) buf
, NULL
, 16);
2063 chain
= make_cleanup_ui_out_tuple_begin_end (current_uiout
, "SPUInfoEvent");
2065 if (ui_out_is_mi_like_p (current_uiout
))
2067 ui_out_field_fmt (current_uiout
, "event_status",
2068 "0x%s", phex_nz (event_status
, 4));
2069 ui_out_field_fmt (current_uiout
, "event_mask",
2070 "0x%s", phex_nz (event_mask
, 4));
2074 printf_filtered (_("Event Status 0x%s\n"), phex (event_status
, 4));
2075 printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask
, 4));
2078 do_cleanups (chain
);
2082 info_spu_signal_command (char *args
, int from_tty
)
2084 struct frame_info
*frame
= get_selected_frame (NULL
);
2085 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2086 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2087 ULONGEST signal1
= 0;
2088 ULONGEST signal1_type
= 0;
2089 int signal1_pending
= 0;
2090 ULONGEST signal2
= 0;
2091 ULONGEST signal2_type
= 0;
2092 int signal2_pending
= 0;
2093 struct cleanup
*chain
;
2099 if (gdbarch_bfd_arch_info (gdbarch
)->arch
!= bfd_arch_spu
)
2100 error (_("\"info spu\" is only supported on the SPU architecture."));
2102 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
2104 xsnprintf (annex
, sizeof annex
, "%d/signal1", id
);
2105 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
, buf
, 0, 4);
2107 error (_("Could not read signal1."));
2110 signal1
= extract_unsigned_integer (buf
, 4, byte_order
);
2111 signal1_pending
= 1;
2114 xsnprintf (annex
, sizeof annex
, "%d/signal1_type", id
);
2115 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2116 buf
, 0, (sizeof (buf
) - 1));
2118 error (_("Could not read signal1_type."));
2120 signal1_type
= strtoulst ((char *) buf
, NULL
, 16);
2122 xsnprintf (annex
, sizeof annex
, "%d/signal2", id
);
2123 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
, buf
, 0, 4);
2125 error (_("Could not read signal2."));
2128 signal2
= extract_unsigned_integer (buf
, 4, byte_order
);
2129 signal2_pending
= 1;
2132 xsnprintf (annex
, sizeof annex
, "%d/signal2_type", id
);
2133 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2134 buf
, 0, (sizeof (buf
) - 1));
2136 error (_("Could not read signal2_type."));
2138 signal2_type
= strtoulst ((char *) buf
, NULL
, 16);
2140 chain
= make_cleanup_ui_out_tuple_begin_end (current_uiout
, "SPUInfoSignal");
2142 if (ui_out_is_mi_like_p (current_uiout
))
2144 ui_out_field_int (current_uiout
, "signal1_pending", signal1_pending
);
2145 ui_out_field_fmt (current_uiout
, "signal1", "0x%s", phex_nz (signal1
, 4));
2146 ui_out_field_int (current_uiout
, "signal1_type", signal1_type
);
2147 ui_out_field_int (current_uiout
, "signal2_pending", signal2_pending
);
2148 ui_out_field_fmt (current_uiout
, "signal2", "0x%s", phex_nz (signal2
, 4));
2149 ui_out_field_int (current_uiout
, "signal2_type", signal2_type
);
2153 if (signal1_pending
)
2154 printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1
, 4));
2156 printf_filtered (_("Signal 1 not pending "));
2159 printf_filtered (_("(Type Or)\n"));
2161 printf_filtered (_("(Type Overwrite)\n"));
2163 if (signal2_pending
)
2164 printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2
, 4));
2166 printf_filtered (_("Signal 2 not pending "));
2169 printf_filtered (_("(Type Or)\n"));
2171 printf_filtered (_("(Type Overwrite)\n"));
2174 do_cleanups (chain
);
2178 info_spu_mailbox_list (gdb_byte
*buf
, int nr
, enum bfd_endian byte_order
,
2179 const char *field
, const char *msg
)
2181 struct cleanup
*chain
;
2187 chain
= make_cleanup_ui_out_table_begin_end (current_uiout
, 1, nr
, "mbox");
2189 ui_out_table_header (current_uiout
, 32, ui_left
, field
, msg
);
2190 ui_out_table_body (current_uiout
);
2192 for (i
= 0; i
< nr
; i
++)
2194 struct cleanup
*val_chain
;
2196 val_chain
= make_cleanup_ui_out_tuple_begin_end (current_uiout
, "mbox");
2197 val
= extract_unsigned_integer (buf
+ 4*i
, 4, byte_order
);
2198 ui_out_field_fmt (current_uiout
, field
, "0x%s", phex (val
, 4));
2199 do_cleanups (val_chain
);
2201 if (!ui_out_is_mi_like_p (current_uiout
))
2202 printf_filtered ("\n");
2205 do_cleanups (chain
);
2209 info_spu_mailbox_command (char *args
, int from_tty
)
2211 struct frame_info
*frame
= get_selected_frame (NULL
);
2212 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2213 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2214 struct cleanup
*chain
;
2220 if (gdbarch_bfd_arch_info (gdbarch
)->arch
!= bfd_arch_spu
)
2221 error (_("\"info spu\" is only supported on the SPU architecture."));
2223 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
2225 chain
= make_cleanup_ui_out_tuple_begin_end (current_uiout
, "SPUInfoMailbox");
2227 xsnprintf (annex
, sizeof annex
, "%d/mbox_info", id
);
2228 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2229 buf
, 0, sizeof buf
);
2231 error (_("Could not read mbox_info."));
2233 info_spu_mailbox_list (buf
, len
/ 4, byte_order
,
2234 "mbox", "SPU Outbound Mailbox");
2236 xsnprintf (annex
, sizeof annex
, "%d/ibox_info", id
);
2237 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2238 buf
, 0, sizeof buf
);
2240 error (_("Could not read ibox_info."));
2242 info_spu_mailbox_list (buf
, len
/ 4, byte_order
,
2243 "ibox", "SPU Outbound Interrupt Mailbox");
2245 xsnprintf (annex
, sizeof annex
, "%d/wbox_info", id
);
2246 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2247 buf
, 0, sizeof buf
);
2249 error (_("Could not read wbox_info."));
2251 info_spu_mailbox_list (buf
, len
/ 4, byte_order
,
2252 "wbox", "SPU Inbound Mailbox");
2254 do_cleanups (chain
);
2258 spu_mfc_get_bitfield (ULONGEST word
, int first
, int last
)
2260 ULONGEST mask
= ~(~(ULONGEST
)0 << (last
- first
+ 1));
2261 return (word
>> (63 - last
)) & mask
;
2265 info_spu_dma_cmdlist (gdb_byte
*buf
, int nr
, enum bfd_endian byte_order
)
2267 static char *spu_mfc_opcode
[256] =
2269 /* 00 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2270 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2271 /* 10 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2272 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2273 /* 20 */ "put", "putb", "putf", NULL
, "putl", "putlb", "putlf", NULL
,
2274 "puts", "putbs", "putfs", NULL
, NULL
, NULL
, NULL
, NULL
,
2275 /* 30 */ "putr", "putrb", "putrf", NULL
, "putrl", "putrlb", "putrlf", NULL
,
2276 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2277 /* 40 */ "get", "getb", "getf", NULL
, "getl", "getlb", "getlf", NULL
,
2278 "gets", "getbs", "getfs", NULL
, NULL
, NULL
, NULL
, NULL
,
2279 /* 50 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2280 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2281 /* 60 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2282 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2283 /* 70 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2284 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2285 /* 80 */ "sdcrt", "sdcrtst", NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2286 NULL
, "sdcrz", NULL
, NULL
, NULL
, "sdcrst", NULL
, "sdcrf",
2287 /* 90 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2288 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2289 /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL
, NULL
, NULL
, NULL
, NULL
,
2290 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2291 /* b0 */ "putlluc", NULL
, NULL
, NULL
, "putllc", NULL
, NULL
, NULL
,
2292 "putqlluc", NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2293 /* c0 */ "barrier", NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2294 "mfceieio", NULL
, NULL
, NULL
, "mfcsync", NULL
, NULL
, NULL
,
2295 /* d0 */ "getllar", NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2296 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2297 /* e0 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2298 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2299 /* f0 */ NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2300 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
2303 int *seq
= alloca (nr
* sizeof (int));
2305 struct cleanup
*chain
;
2309 /* Determine sequence in which to display (valid) entries. */
2310 for (i
= 0; i
< nr
; i
++)
2312 /* Search for the first valid entry all of whose
2313 dependencies are met. */
2314 for (j
= 0; j
< nr
; j
++)
2316 ULONGEST mfc_cq_dw3
;
2317 ULONGEST dependencies
;
2319 if (done
& (1 << (nr
- 1 - j
)))
2323 = extract_unsigned_integer (buf
+ 32*j
+ 24,8, byte_order
);
2324 if (!spu_mfc_get_bitfield (mfc_cq_dw3
, 16, 16))
2327 dependencies
= spu_mfc_get_bitfield (mfc_cq_dw3
, 0, nr
- 1);
2328 if ((dependencies
& done
) != dependencies
)
2332 done
|= 1 << (nr
- 1 - j
);
2343 chain
= make_cleanup_ui_out_table_begin_end (current_uiout
, 10, nr
,
2346 ui_out_table_header (current_uiout
, 7, ui_left
, "opcode", "Opcode");
2347 ui_out_table_header (current_uiout
, 3, ui_left
, "tag", "Tag");
2348 ui_out_table_header (current_uiout
, 3, ui_left
, "tid", "TId");
2349 ui_out_table_header (current_uiout
, 3, ui_left
, "rid", "RId");
2350 ui_out_table_header (current_uiout
, 18, ui_left
, "ea", "EA");
2351 ui_out_table_header (current_uiout
, 7, ui_left
, "lsa", "LSA");
2352 ui_out_table_header (current_uiout
, 7, ui_left
, "size", "Size");
2353 ui_out_table_header (current_uiout
, 7, ui_left
, "lstaddr", "LstAddr");
2354 ui_out_table_header (current_uiout
, 7, ui_left
, "lstsize", "LstSize");
2355 ui_out_table_header (current_uiout
, 1, ui_left
, "error_p", "E");
2357 ui_out_table_body (current_uiout
);
2359 for (i
= 0; i
< nr
; i
++)
2361 struct cleanup
*cmd_chain
;
2362 ULONGEST mfc_cq_dw0
;
2363 ULONGEST mfc_cq_dw1
;
2364 ULONGEST mfc_cq_dw2
;
2365 int mfc_cmd_opcode
, mfc_cmd_tag
, rclass_id
, tclass_id
;
2366 int list_lsa
, list_size
, mfc_lsa
, mfc_size
;
2368 int list_valid_p
, noop_valid_p
, qw_valid_p
, ea_valid_p
, cmd_error_p
;
2370 /* Decode contents of MFC Command Queue Context Save/Restore Registers.
2371 See "Cell Broadband Engine Registers V1.3", section 3.3.2.1. */
2374 = extract_unsigned_integer (buf
+ 32*seq
[i
], 8, byte_order
);
2376 = extract_unsigned_integer (buf
+ 32*seq
[i
] + 8, 8, byte_order
);
2378 = extract_unsigned_integer (buf
+ 32*seq
[i
] + 16, 8, byte_order
);
2380 list_lsa
= spu_mfc_get_bitfield (mfc_cq_dw0
, 0, 14);
2381 list_size
= spu_mfc_get_bitfield (mfc_cq_dw0
, 15, 26);
2382 mfc_cmd_opcode
= spu_mfc_get_bitfield (mfc_cq_dw0
, 27, 34);
2383 mfc_cmd_tag
= spu_mfc_get_bitfield (mfc_cq_dw0
, 35, 39);
2384 list_valid_p
= spu_mfc_get_bitfield (mfc_cq_dw0
, 40, 40);
2385 rclass_id
= spu_mfc_get_bitfield (mfc_cq_dw0
, 41, 43);
2386 tclass_id
= spu_mfc_get_bitfield (mfc_cq_dw0
, 44, 46);
2388 mfc_ea
= spu_mfc_get_bitfield (mfc_cq_dw1
, 0, 51) << 12
2389 | spu_mfc_get_bitfield (mfc_cq_dw2
, 25, 36);
2391 mfc_lsa
= spu_mfc_get_bitfield (mfc_cq_dw2
, 0, 13);
2392 mfc_size
= spu_mfc_get_bitfield (mfc_cq_dw2
, 14, 24);
2393 noop_valid_p
= spu_mfc_get_bitfield (mfc_cq_dw2
, 37, 37);
2394 qw_valid_p
= spu_mfc_get_bitfield (mfc_cq_dw2
, 38, 38);
2395 ea_valid_p
= spu_mfc_get_bitfield (mfc_cq_dw2
, 39, 39);
2396 cmd_error_p
= spu_mfc_get_bitfield (mfc_cq_dw2
, 40, 40);
2398 cmd_chain
= make_cleanup_ui_out_tuple_begin_end (current_uiout
, "cmd");
2400 if (spu_mfc_opcode
[mfc_cmd_opcode
])
2401 ui_out_field_string (current_uiout
, "opcode", spu_mfc_opcode
[mfc_cmd_opcode
]);
2403 ui_out_field_int (current_uiout
, "opcode", mfc_cmd_opcode
);
2405 ui_out_field_int (current_uiout
, "tag", mfc_cmd_tag
);
2406 ui_out_field_int (current_uiout
, "tid", tclass_id
);
2407 ui_out_field_int (current_uiout
, "rid", rclass_id
);
2410 ui_out_field_fmt (current_uiout
, "ea", "0x%s", phex (mfc_ea
, 8));
2412 ui_out_field_skip (current_uiout
, "ea");
2414 ui_out_field_fmt (current_uiout
, "lsa", "0x%05x", mfc_lsa
<< 4);
2416 ui_out_field_fmt (current_uiout
, "size", "0x%05x", mfc_size
<< 4);
2418 ui_out_field_fmt (current_uiout
, "size", "0x%05x", mfc_size
);
2422 ui_out_field_fmt (current_uiout
, "lstaddr", "0x%05x", list_lsa
<< 3);
2423 ui_out_field_fmt (current_uiout
, "lstsize", "0x%05x", list_size
<< 3);
2427 ui_out_field_skip (current_uiout
, "lstaddr");
2428 ui_out_field_skip (current_uiout
, "lstsize");
2432 ui_out_field_string (current_uiout
, "error_p", "*");
2434 ui_out_field_skip (current_uiout
, "error_p");
2436 do_cleanups (cmd_chain
);
2438 if (!ui_out_is_mi_like_p (current_uiout
))
2439 printf_filtered ("\n");
2442 do_cleanups (chain
);
2446 info_spu_dma_command (char *args
, int from_tty
)
2448 struct frame_info
*frame
= get_selected_frame (NULL
);
2449 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2450 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2451 ULONGEST dma_info_type
;
2452 ULONGEST dma_info_mask
;
2453 ULONGEST dma_info_status
;
2454 ULONGEST dma_info_stall_and_notify
;
2455 ULONGEST dma_info_atomic_command_status
;
2456 struct cleanup
*chain
;
2462 if (gdbarch_bfd_arch_info (get_frame_arch (frame
))->arch
!= bfd_arch_spu
)
2463 error (_("\"info spu\" is only supported on the SPU architecture."));
2465 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
2467 xsnprintf (annex
, sizeof annex
, "%d/dma_info", id
);
2468 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2469 buf
, 0, 40 + 16 * 32);
2471 error (_("Could not read dma_info."));
2474 = extract_unsigned_integer (buf
, 8, byte_order
);
2476 = extract_unsigned_integer (buf
+ 8, 8, byte_order
);
2478 = extract_unsigned_integer (buf
+ 16, 8, byte_order
);
2479 dma_info_stall_and_notify
2480 = extract_unsigned_integer (buf
+ 24, 8, byte_order
);
2481 dma_info_atomic_command_status
2482 = extract_unsigned_integer (buf
+ 32, 8, byte_order
);
2484 chain
= make_cleanup_ui_out_tuple_begin_end (current_uiout
, "SPUInfoDMA");
2486 if (ui_out_is_mi_like_p (current_uiout
))
2488 ui_out_field_fmt (current_uiout
, "dma_info_type", "0x%s",
2489 phex_nz (dma_info_type
, 4));
2490 ui_out_field_fmt (current_uiout
, "dma_info_mask", "0x%s",
2491 phex_nz (dma_info_mask
, 4));
2492 ui_out_field_fmt (current_uiout
, "dma_info_status", "0x%s",
2493 phex_nz (dma_info_status
, 4));
2494 ui_out_field_fmt (current_uiout
, "dma_info_stall_and_notify", "0x%s",
2495 phex_nz (dma_info_stall_and_notify
, 4));
2496 ui_out_field_fmt (current_uiout
, "dma_info_atomic_command_status", "0x%s",
2497 phex_nz (dma_info_atomic_command_status
, 4));
2501 const char *query_msg
= _("no query pending");
2503 if (dma_info_type
& 4)
2504 switch (dma_info_type
& 3)
2506 case 1: query_msg
= _("'any' query pending"); break;
2507 case 2: query_msg
= _("'all' query pending"); break;
2508 default: query_msg
= _("undefined query type"); break;
2511 printf_filtered (_("Tag-Group Status 0x%s\n"),
2512 phex (dma_info_status
, 4));
2513 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2514 phex (dma_info_mask
, 4), query_msg
);
2515 printf_filtered (_("Stall-and-Notify 0x%s\n"),
2516 phex (dma_info_stall_and_notify
, 4));
2517 printf_filtered (_("Atomic Cmd Status 0x%s\n"),
2518 phex (dma_info_atomic_command_status
, 4));
2519 printf_filtered ("\n");
2522 info_spu_dma_cmdlist (buf
+ 40, 16, byte_order
);
2523 do_cleanups (chain
);
2527 info_spu_proxydma_command (char *args
, int from_tty
)
2529 struct frame_info
*frame
= get_selected_frame (NULL
);
2530 struct gdbarch
*gdbarch
= get_frame_arch (frame
);
2531 enum bfd_endian byte_order
= gdbarch_byte_order (gdbarch
);
2532 ULONGEST dma_info_type
;
2533 ULONGEST dma_info_mask
;
2534 ULONGEST dma_info_status
;
2535 struct cleanup
*chain
;
2541 if (gdbarch_bfd_arch_info (gdbarch
)->arch
!= bfd_arch_spu
)
2542 error (_("\"info spu\" is only supported on the SPU architecture."));
2544 id
= get_frame_register_unsigned (frame
, SPU_ID_REGNUM
);
2546 xsnprintf (annex
, sizeof annex
, "%d/proxydma_info", id
);
2547 len
= target_read (¤t_target
, TARGET_OBJECT_SPU
, annex
,
2548 buf
, 0, 24 + 8 * 32);
2550 error (_("Could not read proxydma_info."));
2552 dma_info_type
= extract_unsigned_integer (buf
, 8, byte_order
);
2553 dma_info_mask
= extract_unsigned_integer (buf
+ 8, 8, byte_order
);
2554 dma_info_status
= extract_unsigned_integer (buf
+ 16, 8, byte_order
);
2556 chain
= make_cleanup_ui_out_tuple_begin_end (current_uiout
,
2559 if (ui_out_is_mi_like_p (current_uiout
))
2561 ui_out_field_fmt (current_uiout
, "proxydma_info_type", "0x%s",
2562 phex_nz (dma_info_type
, 4));
2563 ui_out_field_fmt (current_uiout
, "proxydma_info_mask", "0x%s",
2564 phex_nz (dma_info_mask
, 4));
2565 ui_out_field_fmt (current_uiout
, "proxydma_info_status", "0x%s",
2566 phex_nz (dma_info_status
, 4));
2570 const char *query_msg
;
2572 switch (dma_info_type
& 3)
2574 case 0: query_msg
= _("no query pending"); break;
2575 case 1: query_msg
= _("'any' query pending"); break;
2576 case 2: query_msg
= _("'all' query pending"); break;
2577 default: query_msg
= _("undefined query type"); break;
2580 printf_filtered (_("Tag-Group Status 0x%s\n"),
2581 phex (dma_info_status
, 4));
2582 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2583 phex (dma_info_mask
, 4), query_msg
);
2584 printf_filtered ("\n");
2587 info_spu_dma_cmdlist (buf
+ 24, 8, byte_order
);
2588 do_cleanups (chain
);
2592 info_spu_command (char *args
, int from_tty
)
2594 printf_unfiltered (_("\"info spu\" must be followed by "
2595 "the name of an SPU facility.\n"));
2596 help_list (infospucmdlist
, "info spu ", -1, gdb_stdout
);
2600 /* Root of all "set spu "/"show spu " commands. */
2603 show_spu_command (char *args
, int from_tty
)
2605 help_list (showspucmdlist
, "show spu ", all_commands
, gdb_stdout
);
2609 set_spu_command (char *args
, int from_tty
)
2611 help_list (setspucmdlist
, "set spu ", all_commands
, gdb_stdout
);
2615 show_spu_stop_on_load (struct ui_file
*file
, int from_tty
,
2616 struct cmd_list_element
*c
, const char *value
)
2618 fprintf_filtered (file
, _("Stopping for new SPE threads is %s.\n"),
2623 show_spu_auto_flush_cache (struct ui_file
*file
, int from_tty
,
2624 struct cmd_list_element
*c
, const char *value
)
2626 fprintf_filtered (file
, _("Automatic software-cache flush is %s.\n"),
2631 /* Set up gdbarch struct. */
2633 static struct gdbarch
*
2634 spu_gdbarch_init (struct gdbarch_info info
, struct gdbarch_list
*arches
)
2636 struct gdbarch
*gdbarch
;
2637 struct gdbarch_tdep
*tdep
;
2640 /* Which spufs ID was requested as address space? */
2642 id
= *(int *)info
.tdep_info
;
2643 /* For objfile architectures of SPU solibs, decode the ID from the name.
2644 This assumes the filename convention employed by solib-spu.c. */
2647 char *name
= strrchr (info
.abfd
->filename
, '@');
2649 sscanf (name
, "@0x%*x <%d>", &id
);
2652 /* Find a candidate among extant architectures. */
2653 for (arches
= gdbarch_list_lookup_by_info (arches
, &info
);
2655 arches
= gdbarch_list_lookup_by_info (arches
->next
, &info
))
2657 tdep
= gdbarch_tdep (arches
->gdbarch
);
2658 if (tdep
&& tdep
->id
== id
)
2659 return arches
->gdbarch
;
2662 /* None found, so create a new architecture. */
2663 tdep
= XCNEW (struct gdbarch_tdep
);
2665 gdbarch
= gdbarch_alloc (&info
, tdep
);
2668 set_gdbarch_print_insn (gdbarch
, gdb_print_insn_spu
);
2671 set_gdbarch_num_regs (gdbarch
, SPU_NUM_REGS
);
2672 set_gdbarch_num_pseudo_regs (gdbarch
, SPU_NUM_PSEUDO_REGS
);
2673 set_gdbarch_sp_regnum (gdbarch
, SPU_SP_REGNUM
);
2674 set_gdbarch_pc_regnum (gdbarch
, SPU_PC_REGNUM
);
2675 set_gdbarch_read_pc (gdbarch
, spu_read_pc
);
2676 set_gdbarch_write_pc (gdbarch
, spu_write_pc
);
2677 set_gdbarch_register_name (gdbarch
, spu_register_name
);
2678 set_gdbarch_register_type (gdbarch
, spu_register_type
);
2679 set_gdbarch_pseudo_register_read (gdbarch
, spu_pseudo_register_read
);
2680 set_gdbarch_pseudo_register_write (gdbarch
, spu_pseudo_register_write
);
2681 set_gdbarch_value_from_register (gdbarch
, spu_value_from_register
);
2682 set_gdbarch_register_reggroup_p (gdbarch
, spu_register_reggroup_p
);
2685 set_gdbarch_char_signed (gdbarch
, 0);
2686 set_gdbarch_ptr_bit (gdbarch
, 32);
2687 set_gdbarch_addr_bit (gdbarch
, 32);
2688 set_gdbarch_short_bit (gdbarch
, 16);
2689 set_gdbarch_int_bit (gdbarch
, 32);
2690 set_gdbarch_long_bit (gdbarch
, 32);
2691 set_gdbarch_long_long_bit (gdbarch
, 64);
2692 set_gdbarch_float_bit (gdbarch
, 32);
2693 set_gdbarch_double_bit (gdbarch
, 64);
2694 set_gdbarch_long_double_bit (gdbarch
, 64);
2695 set_gdbarch_float_format (gdbarch
, floatformats_ieee_single
);
2696 set_gdbarch_double_format (gdbarch
, floatformats_ieee_double
);
2697 set_gdbarch_long_double_format (gdbarch
, floatformats_ieee_double
);
2699 /* Address handling. */
2700 set_gdbarch_address_to_pointer (gdbarch
, spu_address_to_pointer
);
2701 set_gdbarch_pointer_to_address (gdbarch
, spu_pointer_to_address
);
2702 set_gdbarch_integer_to_address (gdbarch
, spu_integer_to_address
);
2703 set_gdbarch_address_class_type_flags (gdbarch
, spu_address_class_type_flags
);
2704 set_gdbarch_address_class_type_flags_to_name
2705 (gdbarch
, spu_address_class_type_flags_to_name
);
2706 set_gdbarch_address_class_name_to_type_flags
2707 (gdbarch
, spu_address_class_name_to_type_flags
);
2710 /* Inferior function calls. */
2711 set_gdbarch_call_dummy_location (gdbarch
, ON_STACK
);
2712 set_gdbarch_frame_align (gdbarch
, spu_frame_align
);
2713 set_gdbarch_frame_red_zone_size (gdbarch
, 2000);
2714 set_gdbarch_push_dummy_code (gdbarch
, spu_push_dummy_code
);
2715 set_gdbarch_push_dummy_call (gdbarch
, spu_push_dummy_call
);
2716 set_gdbarch_dummy_id (gdbarch
, spu_dummy_id
);
2717 set_gdbarch_return_value (gdbarch
, spu_return_value
);
2719 /* Frame handling. */
2720 set_gdbarch_inner_than (gdbarch
, core_addr_lessthan
);
2721 frame_unwind_append_unwinder (gdbarch
, &spu_frame_unwind
);
2722 frame_base_set_default (gdbarch
, &spu_frame_base
);
2723 set_gdbarch_unwind_pc (gdbarch
, spu_unwind_pc
);
2724 set_gdbarch_unwind_sp (gdbarch
, spu_unwind_sp
);
2725 set_gdbarch_virtual_frame_pointer (gdbarch
, spu_virtual_frame_pointer
);
2726 set_gdbarch_frame_args_skip (gdbarch
, 0);
2727 set_gdbarch_skip_prologue (gdbarch
, spu_skip_prologue
);
2728 set_gdbarch_in_function_epilogue_p (gdbarch
, spu_in_function_epilogue_p
);
2730 /* Cell/B.E. cross-architecture unwinder support. */
2731 frame_unwind_prepend_unwinder (gdbarch
, &spu2ppu_unwind
);
2734 set_gdbarch_decr_pc_after_break (gdbarch
, 4);
2735 set_gdbarch_breakpoint_from_pc (gdbarch
, spu_breakpoint_from_pc
);
2736 set_gdbarch_memory_remove_breakpoint (gdbarch
, spu_memory_remove_breakpoint
);
2737 set_gdbarch_cannot_step_breakpoint (gdbarch
, 1);
2738 set_gdbarch_software_single_step (gdbarch
, spu_software_single_step
);
2739 set_gdbarch_get_longjmp_target (gdbarch
, spu_get_longjmp_target
);
2742 set_gdbarch_overlay_update (gdbarch
, spu_overlay_update
);
2747 /* Provide a prototype to silence -Wmissing-prototypes. */
2748 extern initialize_file_ftype _initialize_spu_tdep
;
2751 _initialize_spu_tdep (void)
2753 register_gdbarch_init (bfd_arch_spu
, spu_gdbarch_init
);
2755 /* Add ourselves to objfile event chain. */
2756 observer_attach_new_objfile (spu_overlay_new_objfile
);
2757 spu_overlay_data
= register_objfile_data ();
2759 /* Install spu stop-on-load handler. */
2760 observer_attach_new_objfile (spu_catch_start
);
2762 /* Add ourselves to normal_stop event chain. */
2763 observer_attach_normal_stop (spu_attach_normal_stop
);
2765 /* Add root prefix command for all "set spu"/"show spu" commands. */
2766 add_prefix_cmd ("spu", no_class
, set_spu_command
,
2767 _("Various SPU specific commands."),
2768 &setspucmdlist
, "set spu ", 0, &setlist
);
2769 add_prefix_cmd ("spu", no_class
, show_spu_command
,
2770 _("Various SPU specific commands."),
2771 &showspucmdlist
, "show spu ", 0, &showlist
);
2773 /* Toggle whether or not to add a temporary breakpoint at the "main"
2774 function of new SPE contexts. */
2775 add_setshow_boolean_cmd ("stop-on-load", class_support
,
2776 &spu_stop_on_load_p
, _("\
2777 Set whether to stop for new SPE threads."),
2779 Show whether to stop for new SPE threads."),
2781 Use \"on\" to give control to the user when a new SPE thread\n\
2782 enters its \"main\" function.\n\
2783 Use \"off\" to disable stopping for new SPE threads."),
2785 show_spu_stop_on_load
,
2786 &setspucmdlist
, &showspucmdlist
);
2788 /* Toggle whether or not to automatically flush the software-managed
2789 cache whenever SPE execution stops. */
2790 add_setshow_boolean_cmd ("auto-flush-cache", class_support
,
2791 &spu_auto_flush_cache_p
, _("\
2792 Set whether to automatically flush the software-managed cache."),
2794 Show whether to automatically flush the software-managed cache."),
2796 Use \"on\" to automatically flush the software-managed cache\n\
2797 whenever SPE execution stops.\n\
2798 Use \"off\" to never automatically flush the software-managed cache."),
2800 show_spu_auto_flush_cache
,
2801 &setspucmdlist
, &showspucmdlist
);
2803 /* Add root prefix command for all "info spu" commands. */
2804 add_prefix_cmd ("spu", class_info
, info_spu_command
,
2805 _("Various SPU specific commands."),
2806 &infospucmdlist
, "info spu ", 0, &infolist
);
2808 /* Add various "info spu" commands. */
2809 add_cmd ("event", class_info
, info_spu_event_command
,
2810 _("Display SPU event facility status.\n"),
2812 add_cmd ("signal", class_info
, info_spu_signal_command
,
2813 _("Display SPU signal notification facility status.\n"),
2815 add_cmd ("mailbox", class_info
, info_spu_mailbox_command
,
2816 _("Display SPU mailbox facility status.\n"),
2818 add_cmd ("dma", class_info
, info_spu_dma_command
,
2819 _("Display MFC DMA status.\n"),
2821 add_cmd ("proxydma", class_info
, info_spu_proxydma_command
,
2822 _("Display MFC Proxy-DMA status.\n"),