]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/spu-tdep.c
Use address_from_register in dwarf2-frame.c:read_addr_from_reg
[thirdparty/binutils-gdb.git] / gdb / spu-tdep.c
1 /* SPU target-dependent code for GDB, the GNU debugger.
2 Copyright (C) 2006-2014 Free Software Foundation, Inc.
3
4 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
5 Based on a port by Sid Manning <sid@us.ibm.com>.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "arch-utils.h"
24 #include "gdbtypes.h"
25 #include "gdbcmd.h"
26 #include "gdbcore.h"
27 #include <string.h>
28 #include "gdb_assert.h"
29 #include "frame.h"
30 #include "frame-unwind.h"
31 #include "frame-base.h"
32 #include "trad-frame.h"
33 #include "symtab.h"
34 #include "symfile.h"
35 #include "value.h"
36 #include "inferior.h"
37 #include "dis-asm.h"
38 #include "objfiles.h"
39 #include "language.h"
40 #include "regcache.h"
41 #include "reggroups.h"
42 #include "floatformat.h"
43 #include "block.h"
44 #include "observer.h"
45 #include "infcall.h"
46 #include "dwarf2.h"
47 #include "exceptions.h"
48 #include "spu-tdep.h"
49
50
51 /* The list of available "set spu " and "show spu " commands. */
52 static struct cmd_list_element *setspucmdlist = NULL;
53 static struct cmd_list_element *showspucmdlist = NULL;
54
55 /* Whether to stop for new SPE contexts. */
56 static int spu_stop_on_load_p = 0;
57 /* Whether to automatically flush the SW-managed cache. */
58 static int spu_auto_flush_cache_p = 1;
59
60
61 /* The tdep structure. */
62 struct gdbarch_tdep
63 {
64 /* The spufs ID identifying our address space. */
65 int id;
66
67 /* SPU-specific vector type. */
68 struct type *spu_builtin_type_vec128;
69 };
70
71
72 /* SPU-specific vector type. */
73 static struct type *
74 spu_builtin_type_vec128 (struct gdbarch *gdbarch)
75 {
76 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
77
78 if (!tdep->spu_builtin_type_vec128)
79 {
80 const struct builtin_type *bt = builtin_type (gdbarch);
81 struct type *t;
82
83 t = arch_composite_type (gdbarch,
84 "__spu_builtin_type_vec128", TYPE_CODE_UNION);
85 append_composite_type_field (t, "uint128", bt->builtin_int128);
86 append_composite_type_field (t, "v2_int64",
87 init_vector_type (bt->builtin_int64, 2));
88 append_composite_type_field (t, "v4_int32",
89 init_vector_type (bt->builtin_int32, 4));
90 append_composite_type_field (t, "v8_int16",
91 init_vector_type (bt->builtin_int16, 8));
92 append_composite_type_field (t, "v16_int8",
93 init_vector_type (bt->builtin_int8, 16));
94 append_composite_type_field (t, "v2_double",
95 init_vector_type (bt->builtin_double, 2));
96 append_composite_type_field (t, "v4_float",
97 init_vector_type (bt->builtin_float, 4));
98
99 TYPE_VECTOR (t) = 1;
100 TYPE_NAME (t) = "spu_builtin_type_vec128";
101
102 tdep->spu_builtin_type_vec128 = t;
103 }
104
105 return tdep->spu_builtin_type_vec128;
106 }
107
108
109 /* The list of available "info spu " commands. */
110 static struct cmd_list_element *infospucmdlist = NULL;
111
112 /* Registers. */
113
114 static const char *
115 spu_register_name (struct gdbarch *gdbarch, int reg_nr)
116 {
117 static char *register_names[] =
118 {
119 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
120 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
121 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
122 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
123 "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
124 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
125 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
126 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
127 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
128 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
129 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
130 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
131 "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
132 "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
133 "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
134 "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
135 "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status"
136 };
137
138 if (reg_nr < 0)
139 return NULL;
140 if (reg_nr >= sizeof register_names / sizeof *register_names)
141 return NULL;
142
143 return register_names[reg_nr];
144 }
145
146 static struct type *
147 spu_register_type (struct gdbarch *gdbarch, int reg_nr)
148 {
149 if (reg_nr < SPU_NUM_GPRS)
150 return spu_builtin_type_vec128 (gdbarch);
151
152 switch (reg_nr)
153 {
154 case SPU_ID_REGNUM:
155 return builtin_type (gdbarch)->builtin_uint32;
156
157 case SPU_PC_REGNUM:
158 return builtin_type (gdbarch)->builtin_func_ptr;
159
160 case SPU_SP_REGNUM:
161 return builtin_type (gdbarch)->builtin_data_ptr;
162
163 case SPU_FPSCR_REGNUM:
164 return builtin_type (gdbarch)->builtin_uint128;
165
166 case SPU_SRR0_REGNUM:
167 return builtin_type (gdbarch)->builtin_uint32;
168
169 case SPU_LSLR_REGNUM:
170 return builtin_type (gdbarch)->builtin_uint32;
171
172 case SPU_DECR_REGNUM:
173 return builtin_type (gdbarch)->builtin_uint32;
174
175 case SPU_DECR_STATUS_REGNUM:
176 return builtin_type (gdbarch)->builtin_uint32;
177
178 default:
179 internal_error (__FILE__, __LINE__, _("invalid regnum"));
180 }
181 }
182
183 /* Pseudo registers for preferred slots - stack pointer. */
184
185 static enum register_status
186 spu_pseudo_register_read_spu (struct regcache *regcache, const char *regname,
187 gdb_byte *buf)
188 {
189 struct gdbarch *gdbarch = get_regcache_arch (regcache);
190 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
191 enum register_status status;
192 gdb_byte reg[32];
193 char annex[32];
194 ULONGEST id;
195 ULONGEST ul;
196
197 status = regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
198 if (status != REG_VALID)
199 return status;
200 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
201 memset (reg, 0, sizeof reg);
202 target_read (&current_target, TARGET_OBJECT_SPU, annex,
203 reg, 0, sizeof reg);
204
205 ul = strtoulst ((char *) reg, NULL, 16);
206 store_unsigned_integer (buf, 4, byte_order, ul);
207 return REG_VALID;
208 }
209
210 static enum register_status
211 spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
212 int regnum, gdb_byte *buf)
213 {
214 gdb_byte reg[16];
215 char annex[32];
216 ULONGEST id;
217 enum register_status status;
218
219 switch (regnum)
220 {
221 case SPU_SP_REGNUM:
222 status = regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
223 if (status != REG_VALID)
224 return status;
225 memcpy (buf, reg, 4);
226 return status;
227
228 case SPU_FPSCR_REGNUM:
229 status = regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
230 if (status != REG_VALID)
231 return status;
232 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
233 target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
234 return status;
235
236 case SPU_SRR0_REGNUM:
237 return spu_pseudo_register_read_spu (regcache, "srr0", buf);
238
239 case SPU_LSLR_REGNUM:
240 return spu_pseudo_register_read_spu (regcache, "lslr", buf);
241
242 case SPU_DECR_REGNUM:
243 return spu_pseudo_register_read_spu (regcache, "decr", buf);
244
245 case SPU_DECR_STATUS_REGNUM:
246 return spu_pseudo_register_read_spu (regcache, "decr_status", buf);
247
248 default:
249 internal_error (__FILE__, __LINE__, _("invalid regnum"));
250 }
251 }
252
253 static void
254 spu_pseudo_register_write_spu (struct regcache *regcache, const char *regname,
255 const gdb_byte *buf)
256 {
257 struct gdbarch *gdbarch = get_regcache_arch (regcache);
258 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
259 char reg[32];
260 char annex[32];
261 ULONGEST id;
262
263 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
264 xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
265 xsnprintf (reg, sizeof reg, "0x%s",
266 phex_nz (extract_unsigned_integer (buf, 4, byte_order), 4));
267 target_write (&current_target, TARGET_OBJECT_SPU, annex,
268 (gdb_byte *) reg, 0, strlen (reg));
269 }
270
271 static void
272 spu_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
273 int regnum, const gdb_byte *buf)
274 {
275 gdb_byte reg[16];
276 char annex[32];
277 ULONGEST id;
278
279 switch (regnum)
280 {
281 case SPU_SP_REGNUM:
282 regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
283 memcpy (reg, buf, 4);
284 regcache_raw_write (regcache, SPU_RAW_SP_REGNUM, reg);
285 break;
286
287 case SPU_FPSCR_REGNUM:
288 regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
289 xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
290 target_write (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
291 break;
292
293 case SPU_SRR0_REGNUM:
294 spu_pseudo_register_write_spu (regcache, "srr0", buf);
295 break;
296
297 case SPU_LSLR_REGNUM:
298 spu_pseudo_register_write_spu (regcache, "lslr", buf);
299 break;
300
301 case SPU_DECR_REGNUM:
302 spu_pseudo_register_write_spu (regcache, "decr", buf);
303 break;
304
305 case SPU_DECR_STATUS_REGNUM:
306 spu_pseudo_register_write_spu (regcache, "decr_status", buf);
307 break;
308
309 default:
310 internal_error (__FILE__, __LINE__, _("invalid regnum"));
311 }
312 }
313
314 /* Value conversion -- access scalar values at the preferred slot. */
315
316 static struct value *
317 spu_value_from_register (struct gdbarch *gdbarch, struct type *type,
318 int regnum, struct frame_id frame_id)
319 {
320 struct value *value = default_value_from_register (gdbarch, type,
321 regnum, frame_id);
322 int len = TYPE_LENGTH (type);
323
324 if (regnum < SPU_NUM_GPRS && len < 16)
325 {
326 int preferred_slot = len < 4 ? 4 - len : 0;
327 set_value_offset (value, preferred_slot);
328 }
329
330 return value;
331 }
332
333 /* Register groups. */
334
335 static int
336 spu_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
337 struct reggroup *group)
338 {
339 /* Registers displayed via 'info regs'. */
340 if (group == general_reggroup)
341 return 1;
342
343 /* Registers displayed via 'info float'. */
344 if (group == float_reggroup)
345 return 0;
346
347 /* Registers that need to be saved/restored in order to
348 push or pop frames. */
349 if (group == save_reggroup || group == restore_reggroup)
350 return 1;
351
352 return default_register_reggroup_p (gdbarch, regnum, group);
353 }
354
355
356 /* Address handling. */
357
358 static int
359 spu_gdbarch_id (struct gdbarch *gdbarch)
360 {
361 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
362 int id = tdep->id;
363
364 /* The objfile architecture of a standalone SPU executable does not
365 provide an SPU ID. Retrieve it from the objfile's relocated
366 address range in this special case. */
367 if (id == -1
368 && symfile_objfile && symfile_objfile->obfd
369 && bfd_get_arch (symfile_objfile->obfd) == bfd_arch_spu
370 && symfile_objfile->sections != symfile_objfile->sections_end)
371 id = SPUADDR_SPU (obj_section_addr (symfile_objfile->sections));
372
373 return id;
374 }
375
376 static int
377 spu_address_class_type_flags (int byte_size, int dwarf2_addr_class)
378 {
379 if (dwarf2_addr_class == 1)
380 return TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1;
381 else
382 return 0;
383 }
384
385 static const char *
386 spu_address_class_type_flags_to_name (struct gdbarch *gdbarch, int type_flags)
387 {
388 if (type_flags & TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1)
389 return "__ea";
390 else
391 return NULL;
392 }
393
394 static int
395 spu_address_class_name_to_type_flags (struct gdbarch *gdbarch,
396 const char *name, int *type_flags_ptr)
397 {
398 if (strcmp (name, "__ea") == 0)
399 {
400 *type_flags_ptr = TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1;
401 return 1;
402 }
403 else
404 return 0;
405 }
406
407 static void
408 spu_address_to_pointer (struct gdbarch *gdbarch,
409 struct type *type, gdb_byte *buf, CORE_ADDR addr)
410 {
411 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
412 store_unsigned_integer (buf, TYPE_LENGTH (type), byte_order,
413 SPUADDR_ADDR (addr));
414 }
415
416 static CORE_ADDR
417 spu_pointer_to_address (struct gdbarch *gdbarch,
418 struct type *type, const gdb_byte *buf)
419 {
420 int id = spu_gdbarch_id (gdbarch);
421 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
422 ULONGEST addr
423 = extract_unsigned_integer (buf, TYPE_LENGTH (type), byte_order);
424
425 /* Do not convert __ea pointers. */
426 if (TYPE_ADDRESS_CLASS_1 (type))
427 return addr;
428
429 return addr? SPUADDR (id, addr) : 0;
430 }
431
432 static CORE_ADDR
433 spu_integer_to_address (struct gdbarch *gdbarch,
434 struct type *type, const gdb_byte *buf)
435 {
436 int id = spu_gdbarch_id (gdbarch);
437 ULONGEST addr = unpack_long (type, buf);
438
439 return SPUADDR (id, addr);
440 }
441
442
443 /* Decoding SPU instructions. */
444
445 enum
446 {
447 op_lqd = 0x34,
448 op_lqx = 0x3c4,
449 op_lqa = 0x61,
450 op_lqr = 0x67,
451 op_stqd = 0x24,
452 op_stqx = 0x144,
453 op_stqa = 0x41,
454 op_stqr = 0x47,
455
456 op_il = 0x081,
457 op_ila = 0x21,
458 op_a = 0x0c0,
459 op_ai = 0x1c,
460
461 op_selb = 0x8,
462
463 op_br = 0x64,
464 op_bra = 0x60,
465 op_brsl = 0x66,
466 op_brasl = 0x62,
467 op_brnz = 0x42,
468 op_brz = 0x40,
469 op_brhnz = 0x46,
470 op_brhz = 0x44,
471 op_bi = 0x1a8,
472 op_bisl = 0x1a9,
473 op_biz = 0x128,
474 op_binz = 0x129,
475 op_bihz = 0x12a,
476 op_bihnz = 0x12b,
477 };
478
479 static int
480 is_rr (unsigned int insn, int op, int *rt, int *ra, int *rb)
481 {
482 if ((insn >> 21) == op)
483 {
484 *rt = insn & 127;
485 *ra = (insn >> 7) & 127;
486 *rb = (insn >> 14) & 127;
487 return 1;
488 }
489
490 return 0;
491 }
492
493 static int
494 is_rrr (unsigned int insn, int op, int *rt, int *ra, int *rb, int *rc)
495 {
496 if ((insn >> 28) == op)
497 {
498 *rt = (insn >> 21) & 127;
499 *ra = (insn >> 7) & 127;
500 *rb = (insn >> 14) & 127;
501 *rc = insn & 127;
502 return 1;
503 }
504
505 return 0;
506 }
507
508 static int
509 is_ri7 (unsigned int insn, int op, int *rt, int *ra, int *i7)
510 {
511 if ((insn >> 21) == op)
512 {
513 *rt = insn & 127;
514 *ra = (insn >> 7) & 127;
515 *i7 = (((insn >> 14) & 127) ^ 0x40) - 0x40;
516 return 1;
517 }
518
519 return 0;
520 }
521
522 static int
523 is_ri10 (unsigned int insn, int op, int *rt, int *ra, int *i10)
524 {
525 if ((insn >> 24) == op)
526 {
527 *rt = insn & 127;
528 *ra = (insn >> 7) & 127;
529 *i10 = (((insn >> 14) & 0x3ff) ^ 0x200) - 0x200;
530 return 1;
531 }
532
533 return 0;
534 }
535
536 static int
537 is_ri16 (unsigned int insn, int op, int *rt, int *i16)
538 {
539 if ((insn >> 23) == op)
540 {
541 *rt = insn & 127;
542 *i16 = (((insn >> 7) & 0xffff) ^ 0x8000) - 0x8000;
543 return 1;
544 }
545
546 return 0;
547 }
548
549 static int
550 is_ri18 (unsigned int insn, int op, int *rt, int *i18)
551 {
552 if ((insn >> 25) == op)
553 {
554 *rt = insn & 127;
555 *i18 = (((insn >> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
556 return 1;
557 }
558
559 return 0;
560 }
561
562 static int
563 is_branch (unsigned int insn, int *offset, int *reg)
564 {
565 int rt, i7, i16;
566
567 if (is_ri16 (insn, op_br, &rt, &i16)
568 || is_ri16 (insn, op_brsl, &rt, &i16)
569 || is_ri16 (insn, op_brnz, &rt, &i16)
570 || is_ri16 (insn, op_brz, &rt, &i16)
571 || is_ri16 (insn, op_brhnz, &rt, &i16)
572 || is_ri16 (insn, op_brhz, &rt, &i16))
573 {
574 *reg = SPU_PC_REGNUM;
575 *offset = i16 << 2;
576 return 1;
577 }
578
579 if (is_ri16 (insn, op_bra, &rt, &i16)
580 || is_ri16 (insn, op_brasl, &rt, &i16))
581 {
582 *reg = -1;
583 *offset = i16 << 2;
584 return 1;
585 }
586
587 if (is_ri7 (insn, op_bi, &rt, reg, &i7)
588 || is_ri7 (insn, op_bisl, &rt, reg, &i7)
589 || is_ri7 (insn, op_biz, &rt, reg, &i7)
590 || is_ri7 (insn, op_binz, &rt, reg, &i7)
591 || is_ri7 (insn, op_bihz, &rt, reg, &i7)
592 || is_ri7 (insn, op_bihnz, &rt, reg, &i7))
593 {
594 *offset = 0;
595 return 1;
596 }
597
598 return 0;
599 }
600
601
602 /* Prolog parsing. */
603
604 struct spu_prologue_data
605 {
606 /* Stack frame size. -1 if analysis was unsuccessful. */
607 int size;
608
609 /* How to find the CFA. The CFA is equal to SP at function entry. */
610 int cfa_reg;
611 int cfa_offset;
612
613 /* Offset relative to CFA where a register is saved. -1 if invalid. */
614 int reg_offset[SPU_NUM_GPRS];
615 };
616
617 static CORE_ADDR
618 spu_analyze_prologue (struct gdbarch *gdbarch,
619 CORE_ADDR start_pc, CORE_ADDR end_pc,
620 struct spu_prologue_data *data)
621 {
622 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
623 int found_sp = 0;
624 int found_fp = 0;
625 int found_lr = 0;
626 int found_bc = 0;
627 int reg_immed[SPU_NUM_GPRS];
628 gdb_byte buf[16];
629 CORE_ADDR prolog_pc = start_pc;
630 CORE_ADDR pc;
631 int i;
632
633
634 /* Initialize DATA to default values. */
635 data->size = -1;
636
637 data->cfa_reg = SPU_RAW_SP_REGNUM;
638 data->cfa_offset = 0;
639
640 for (i = 0; i < SPU_NUM_GPRS; i++)
641 data->reg_offset[i] = -1;
642
643 /* Set up REG_IMMED array. This is non-zero for a register if we know its
644 preferred slot currently holds this immediate value. */
645 for (i = 0; i < SPU_NUM_GPRS; i++)
646 reg_immed[i] = 0;
647
648 /* Scan instructions until the first branch.
649
650 The following instructions are important prolog components:
651
652 - The first instruction to set up the stack pointer.
653 - The first instruction to set up the frame pointer.
654 - The first instruction to save the link register.
655 - The first instruction to save the backchain.
656
657 We return the instruction after the latest of these four,
658 or the incoming PC if none is found. The first instruction
659 to set up the stack pointer also defines the frame size.
660
661 Note that instructions saving incoming arguments to their stack
662 slots are not counted as important, because they are hard to
663 identify with certainty. This should not matter much, because
664 arguments are relevant only in code compiled with debug data,
665 and in such code the GDB core will advance until the first source
666 line anyway, using SAL data.
667
668 For purposes of stack unwinding, we analyze the following types
669 of instructions in addition:
670
671 - Any instruction adding to the current frame pointer.
672 - Any instruction loading an immediate constant into a register.
673 - Any instruction storing a register onto the stack.
674
675 These are used to compute the CFA and REG_OFFSET output. */
676
677 for (pc = start_pc; pc < end_pc; pc += 4)
678 {
679 unsigned int insn;
680 int rt, ra, rb, rc, immed;
681
682 if (target_read_memory (pc, buf, 4))
683 break;
684 insn = extract_unsigned_integer (buf, 4, byte_order);
685
686 /* AI is the typical instruction to set up a stack frame.
687 It is also used to initialize the frame pointer. */
688 if (is_ri10 (insn, op_ai, &rt, &ra, &immed))
689 {
690 if (rt == data->cfa_reg && ra == data->cfa_reg)
691 data->cfa_offset -= immed;
692
693 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
694 && !found_sp)
695 {
696 found_sp = 1;
697 prolog_pc = pc + 4;
698
699 data->size = -immed;
700 }
701 else if (rt == SPU_FP_REGNUM && ra == SPU_RAW_SP_REGNUM
702 && !found_fp)
703 {
704 found_fp = 1;
705 prolog_pc = pc + 4;
706
707 data->cfa_reg = SPU_FP_REGNUM;
708 data->cfa_offset -= immed;
709 }
710 }
711
712 /* A is used to set up stack frames of size >= 512 bytes.
713 If we have tracked the contents of the addend register,
714 we can handle this as well. */
715 else if (is_rr (insn, op_a, &rt, &ra, &rb))
716 {
717 if (rt == data->cfa_reg && ra == data->cfa_reg)
718 {
719 if (reg_immed[rb] != 0)
720 data->cfa_offset -= reg_immed[rb];
721 else
722 data->cfa_reg = -1; /* We don't know the CFA any more. */
723 }
724
725 if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
726 && !found_sp)
727 {
728 found_sp = 1;
729 prolog_pc = pc + 4;
730
731 if (reg_immed[rb] != 0)
732 data->size = -reg_immed[rb];
733 }
734 }
735
736 /* We need to track IL and ILA used to load immediate constants
737 in case they are later used as input to an A instruction. */
738 else if (is_ri16 (insn, op_il, &rt, &immed))
739 {
740 reg_immed[rt] = immed;
741
742 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
743 found_sp = 1;
744 }
745
746 else if (is_ri18 (insn, op_ila, &rt, &immed))
747 {
748 reg_immed[rt] = immed & 0x3ffff;
749
750 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
751 found_sp = 1;
752 }
753
754 /* STQD is used to save registers to the stack. */
755 else if (is_ri10 (insn, op_stqd, &rt, &ra, &immed))
756 {
757 if (ra == data->cfa_reg)
758 data->reg_offset[rt] = data->cfa_offset - (immed << 4);
759
760 if (ra == data->cfa_reg && rt == SPU_LR_REGNUM
761 && !found_lr)
762 {
763 found_lr = 1;
764 prolog_pc = pc + 4;
765 }
766
767 if (ra == SPU_RAW_SP_REGNUM
768 && (found_sp? immed == 0 : rt == SPU_RAW_SP_REGNUM)
769 && !found_bc)
770 {
771 found_bc = 1;
772 prolog_pc = pc + 4;
773 }
774 }
775
776 /* _start uses SELB to set up the stack pointer. */
777 else if (is_rrr (insn, op_selb, &rt, &ra, &rb, &rc))
778 {
779 if (rt == SPU_RAW_SP_REGNUM && !found_sp)
780 found_sp = 1;
781 }
782
783 /* We terminate if we find a branch. */
784 else if (is_branch (insn, &immed, &ra))
785 break;
786 }
787
788
789 /* If we successfully parsed until here, and didn't find any instruction
790 modifying SP, we assume we have a frameless function. */
791 if (!found_sp)
792 data->size = 0;
793
794 /* Return cooked instead of raw SP. */
795 if (data->cfa_reg == SPU_RAW_SP_REGNUM)
796 data->cfa_reg = SPU_SP_REGNUM;
797
798 return prolog_pc;
799 }
800
801 /* Return the first instruction after the prologue starting at PC. */
802 static CORE_ADDR
803 spu_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
804 {
805 struct spu_prologue_data data;
806 return spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
807 }
808
809 /* Return the frame pointer in use at address PC. */
810 static void
811 spu_virtual_frame_pointer (struct gdbarch *gdbarch, CORE_ADDR pc,
812 int *reg, LONGEST *offset)
813 {
814 struct spu_prologue_data data;
815 spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
816
817 if (data.size != -1 && data.cfa_reg != -1)
818 {
819 /* The 'frame pointer' address is CFA minus frame size. */
820 *reg = data.cfa_reg;
821 *offset = data.cfa_offset - data.size;
822 }
823 else
824 {
825 /* ??? We don't really know ... */
826 *reg = SPU_SP_REGNUM;
827 *offset = 0;
828 }
829 }
830
831 /* Return true if we are in the function's epilogue, i.e. after the
832 instruction that destroyed the function's stack frame.
833
834 1) scan forward from the point of execution:
835 a) If you find an instruction that modifies the stack pointer
836 or transfers control (except a return), execution is not in
837 an epilogue, return.
838 b) Stop scanning if you find a return instruction or reach the
839 end of the function or reach the hard limit for the size of
840 an epilogue.
841 2) scan backward from the point of execution:
842 a) If you find an instruction that modifies the stack pointer,
843 execution *is* in an epilogue, return.
844 b) Stop scanning if you reach an instruction that transfers
845 control or the beginning of the function or reach the hard
846 limit for the size of an epilogue. */
847
848 static int
849 spu_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
850 {
851 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
852 CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end;
853 bfd_byte buf[4];
854 unsigned int insn;
855 int rt, ra, rb, immed;
856
857 /* Find the search limits based on function boundaries and hard limit.
858 We assume the epilogue can be up to 64 instructions long. */
859
860 const int spu_max_epilogue_size = 64 * 4;
861
862 if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
863 return 0;
864
865 if (pc - func_start < spu_max_epilogue_size)
866 epilogue_start = func_start;
867 else
868 epilogue_start = pc - spu_max_epilogue_size;
869
870 if (func_end - pc < spu_max_epilogue_size)
871 epilogue_end = func_end;
872 else
873 epilogue_end = pc + spu_max_epilogue_size;
874
875 /* Scan forward until next 'bi $0'. */
876
877 for (scan_pc = pc; scan_pc < epilogue_end; scan_pc += 4)
878 {
879 if (target_read_memory (scan_pc, buf, 4))
880 return 0;
881 insn = extract_unsigned_integer (buf, 4, byte_order);
882
883 if (is_branch (insn, &immed, &ra))
884 {
885 if (immed == 0 && ra == SPU_LR_REGNUM)
886 break;
887
888 return 0;
889 }
890
891 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
892 || is_rr (insn, op_a, &rt, &ra, &rb)
893 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
894 {
895 if (rt == SPU_RAW_SP_REGNUM)
896 return 0;
897 }
898 }
899
900 if (scan_pc >= epilogue_end)
901 return 0;
902
903 /* Scan backward until adjustment to stack pointer (R1). */
904
905 for (scan_pc = pc - 4; scan_pc >= epilogue_start; scan_pc -= 4)
906 {
907 if (target_read_memory (scan_pc, buf, 4))
908 return 0;
909 insn = extract_unsigned_integer (buf, 4, byte_order);
910
911 if (is_branch (insn, &immed, &ra))
912 return 0;
913
914 if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
915 || is_rr (insn, op_a, &rt, &ra, &rb)
916 || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
917 {
918 if (rt == SPU_RAW_SP_REGNUM)
919 return 1;
920 }
921 }
922
923 return 0;
924 }
925
926
927 /* Normal stack frames. */
928
929 struct spu_unwind_cache
930 {
931 CORE_ADDR func;
932 CORE_ADDR frame_base;
933 CORE_ADDR local_base;
934
935 struct trad_frame_saved_reg *saved_regs;
936 };
937
938 static struct spu_unwind_cache *
939 spu_frame_unwind_cache (struct frame_info *this_frame,
940 void **this_prologue_cache)
941 {
942 struct gdbarch *gdbarch = get_frame_arch (this_frame);
943 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
944 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
945 struct spu_unwind_cache *info;
946 struct spu_prologue_data data;
947 CORE_ADDR id = tdep->id;
948 gdb_byte buf[16];
949
950 if (*this_prologue_cache)
951 return *this_prologue_cache;
952
953 info = FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache);
954 *this_prologue_cache = info;
955 info->saved_regs = trad_frame_alloc_saved_regs (this_frame);
956 info->frame_base = 0;
957 info->local_base = 0;
958
959 /* Find the start of the current function, and analyze its prologue. */
960 info->func = get_frame_func (this_frame);
961 if (info->func == 0)
962 {
963 /* Fall back to using the current PC as frame ID. */
964 info->func = get_frame_pc (this_frame);
965 data.size = -1;
966 }
967 else
968 spu_analyze_prologue (gdbarch, info->func, get_frame_pc (this_frame),
969 &data);
970
971 /* If successful, use prologue analysis data. */
972 if (data.size != -1 && data.cfa_reg != -1)
973 {
974 CORE_ADDR cfa;
975 int i;
976
977 /* Determine CFA via unwound CFA_REG plus CFA_OFFSET. */
978 get_frame_register (this_frame, data.cfa_reg, buf);
979 cfa = extract_unsigned_integer (buf, 4, byte_order) + data.cfa_offset;
980 cfa = SPUADDR (id, cfa);
981
982 /* Call-saved register slots. */
983 for (i = 0; i < SPU_NUM_GPRS; i++)
984 if (i == SPU_LR_REGNUM
985 || (i >= SPU_SAVED1_REGNUM && i <= SPU_SAVEDN_REGNUM))
986 if (data.reg_offset[i] != -1)
987 info->saved_regs[i].addr = cfa - data.reg_offset[i];
988
989 /* Frame bases. */
990 info->frame_base = cfa;
991 info->local_base = cfa - data.size;
992 }
993
994 /* Otherwise, fall back to reading the backchain link. */
995 else
996 {
997 CORE_ADDR reg;
998 LONGEST backchain;
999 ULONGEST lslr;
1000 int status;
1001
1002 /* Get local store limit. */
1003 lslr = get_frame_register_unsigned (this_frame, SPU_LSLR_REGNUM);
1004 if (!lslr)
1005 lslr = (ULONGEST) -1;
1006
1007 /* Get the backchain. */
1008 reg = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
1009 status = safe_read_memory_integer (SPUADDR (id, reg), 4, byte_order,
1010 &backchain);
1011
1012 /* A zero backchain terminates the frame chain. Also, sanity
1013 check against the local store size limit. */
1014 if (status && backchain > 0 && backchain <= lslr)
1015 {
1016 /* Assume the link register is saved into its slot. */
1017 if (backchain + 16 <= lslr)
1018 info->saved_regs[SPU_LR_REGNUM].addr = SPUADDR (id,
1019 backchain + 16);
1020
1021 /* Frame bases. */
1022 info->frame_base = SPUADDR (id, backchain);
1023 info->local_base = SPUADDR (id, reg);
1024 }
1025 }
1026
1027 /* If we didn't find a frame, we cannot determine SP / return address. */
1028 if (info->frame_base == 0)
1029 return info;
1030
1031 /* The previous SP is equal to the CFA. */
1032 trad_frame_set_value (info->saved_regs, SPU_SP_REGNUM,
1033 SPUADDR_ADDR (info->frame_base));
1034
1035 /* Read full contents of the unwound link register in order to
1036 be able to determine the return address. */
1037 if (trad_frame_addr_p (info->saved_regs, SPU_LR_REGNUM))
1038 target_read_memory (info->saved_regs[SPU_LR_REGNUM].addr, buf, 16);
1039 else
1040 get_frame_register (this_frame, SPU_LR_REGNUM, buf);
1041
1042 /* Normally, the return address is contained in the slot 0 of the
1043 link register, and slots 1-3 are zero. For an overlay return,
1044 slot 0 contains the address of the overlay manager return stub,
1045 slot 1 contains the partition number of the overlay section to
1046 be returned to, and slot 2 contains the return address within
1047 that section. Return the latter address in that case. */
1048 if (extract_unsigned_integer (buf + 8, 4, byte_order) != 0)
1049 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
1050 extract_unsigned_integer (buf + 8, 4, byte_order));
1051 else
1052 trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
1053 extract_unsigned_integer (buf, 4, byte_order));
1054
1055 return info;
1056 }
1057
1058 static void
1059 spu_frame_this_id (struct frame_info *this_frame,
1060 void **this_prologue_cache, struct frame_id *this_id)
1061 {
1062 struct spu_unwind_cache *info =
1063 spu_frame_unwind_cache (this_frame, this_prologue_cache);
1064
1065 if (info->frame_base == 0)
1066 return;
1067
1068 *this_id = frame_id_build (info->frame_base, info->func);
1069 }
1070
1071 static struct value *
1072 spu_frame_prev_register (struct frame_info *this_frame,
1073 void **this_prologue_cache, int regnum)
1074 {
1075 struct spu_unwind_cache *info
1076 = spu_frame_unwind_cache (this_frame, this_prologue_cache);
1077
1078 /* Special-case the stack pointer. */
1079 if (regnum == SPU_RAW_SP_REGNUM)
1080 regnum = SPU_SP_REGNUM;
1081
1082 return trad_frame_get_prev_register (this_frame, info->saved_regs, regnum);
1083 }
1084
1085 static const struct frame_unwind spu_frame_unwind = {
1086 NORMAL_FRAME,
1087 default_frame_unwind_stop_reason,
1088 spu_frame_this_id,
1089 spu_frame_prev_register,
1090 NULL,
1091 default_frame_sniffer
1092 };
1093
1094 static CORE_ADDR
1095 spu_frame_base_address (struct frame_info *this_frame, void **this_cache)
1096 {
1097 struct spu_unwind_cache *info
1098 = spu_frame_unwind_cache (this_frame, this_cache);
1099 return info->local_base;
1100 }
1101
1102 static const struct frame_base spu_frame_base = {
1103 &spu_frame_unwind,
1104 spu_frame_base_address,
1105 spu_frame_base_address,
1106 spu_frame_base_address
1107 };
1108
1109 static CORE_ADDR
1110 spu_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
1111 {
1112 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1113 CORE_ADDR pc = frame_unwind_register_unsigned (next_frame, SPU_PC_REGNUM);
1114 /* Mask off interrupt enable bit. */
1115 return SPUADDR (tdep->id, pc & -4);
1116 }
1117
1118 static CORE_ADDR
1119 spu_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame)
1120 {
1121 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1122 CORE_ADDR sp = frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
1123 return SPUADDR (tdep->id, sp);
1124 }
1125
1126 static CORE_ADDR
1127 spu_read_pc (struct regcache *regcache)
1128 {
1129 struct gdbarch_tdep *tdep = gdbarch_tdep (get_regcache_arch (regcache));
1130 ULONGEST pc;
1131 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &pc);
1132 /* Mask off interrupt enable bit. */
1133 return SPUADDR (tdep->id, pc & -4);
1134 }
1135
1136 static void
1137 spu_write_pc (struct regcache *regcache, CORE_ADDR pc)
1138 {
1139 /* Keep interrupt enabled state unchanged. */
1140 ULONGEST old_pc;
1141
1142 regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &old_pc);
1143 regcache_cooked_write_unsigned (regcache, SPU_PC_REGNUM,
1144 (SPUADDR_ADDR (pc) & -4) | (old_pc & 3));
1145 }
1146
1147
1148 /* Cell/B.E. cross-architecture unwinder support. */
1149
1150 struct spu2ppu_cache
1151 {
1152 struct frame_id frame_id;
1153 struct regcache *regcache;
1154 };
1155
1156 static struct gdbarch *
1157 spu2ppu_prev_arch (struct frame_info *this_frame, void **this_cache)
1158 {
1159 struct spu2ppu_cache *cache = *this_cache;
1160 return get_regcache_arch (cache->regcache);
1161 }
1162
1163 static void
1164 spu2ppu_this_id (struct frame_info *this_frame,
1165 void **this_cache, struct frame_id *this_id)
1166 {
1167 struct spu2ppu_cache *cache = *this_cache;
1168 *this_id = cache->frame_id;
1169 }
1170
1171 static struct value *
1172 spu2ppu_prev_register (struct frame_info *this_frame,
1173 void **this_cache, int regnum)
1174 {
1175 struct spu2ppu_cache *cache = *this_cache;
1176 struct gdbarch *gdbarch = get_regcache_arch (cache->regcache);
1177 gdb_byte *buf;
1178
1179 buf = alloca (register_size (gdbarch, regnum));
1180 regcache_cooked_read (cache->regcache, regnum, buf);
1181 return frame_unwind_got_bytes (this_frame, regnum, buf);
1182 }
1183
1184 static int
1185 spu2ppu_sniffer (const struct frame_unwind *self,
1186 struct frame_info *this_frame, void **this_prologue_cache)
1187 {
1188 struct gdbarch *gdbarch = get_frame_arch (this_frame);
1189 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1190 CORE_ADDR base, func, backchain;
1191 gdb_byte buf[4];
1192
1193 if (gdbarch_bfd_arch_info (target_gdbarch ())->arch == bfd_arch_spu)
1194 return 0;
1195
1196 base = get_frame_sp (this_frame);
1197 func = get_frame_pc (this_frame);
1198 if (target_read_memory (base, buf, 4))
1199 return 0;
1200 backchain = extract_unsigned_integer (buf, 4, byte_order);
1201
1202 if (!backchain)
1203 {
1204 struct frame_info *fi;
1205
1206 struct spu2ppu_cache *cache
1207 = FRAME_OBSTACK_CALLOC (1, struct spu2ppu_cache);
1208
1209 cache->frame_id = frame_id_build (base + 16, func);
1210
1211 for (fi = get_next_frame (this_frame); fi; fi = get_next_frame (fi))
1212 if (gdbarch_bfd_arch_info (get_frame_arch (fi))->arch != bfd_arch_spu)
1213 break;
1214
1215 if (fi)
1216 {
1217 cache->regcache = frame_save_as_regcache (fi);
1218 *this_prologue_cache = cache;
1219 return 1;
1220 }
1221 else
1222 {
1223 struct regcache *regcache;
1224 regcache = get_thread_arch_regcache (inferior_ptid, target_gdbarch ());
1225 cache->regcache = regcache_dup (regcache);
1226 *this_prologue_cache = cache;
1227 return 1;
1228 }
1229 }
1230
1231 return 0;
1232 }
1233
1234 static void
1235 spu2ppu_dealloc_cache (struct frame_info *self, void *this_cache)
1236 {
1237 struct spu2ppu_cache *cache = this_cache;
1238 regcache_xfree (cache->regcache);
1239 }
1240
1241 static const struct frame_unwind spu2ppu_unwind = {
1242 ARCH_FRAME,
1243 default_frame_unwind_stop_reason,
1244 spu2ppu_this_id,
1245 spu2ppu_prev_register,
1246 NULL,
1247 spu2ppu_sniffer,
1248 spu2ppu_dealloc_cache,
1249 spu2ppu_prev_arch,
1250 };
1251
1252
1253 /* Function calling convention. */
1254
1255 static CORE_ADDR
1256 spu_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1257 {
1258 return sp & ~15;
1259 }
1260
1261 static CORE_ADDR
1262 spu_push_dummy_code (struct gdbarch *gdbarch, CORE_ADDR sp, CORE_ADDR funaddr,
1263 struct value **args, int nargs, struct type *value_type,
1264 CORE_ADDR *real_pc, CORE_ADDR *bp_addr,
1265 struct regcache *regcache)
1266 {
1267 /* Allocate space sufficient for a breakpoint, keeping the stack aligned. */
1268 sp = (sp - 4) & ~15;
1269 /* Store the address of that breakpoint */
1270 *bp_addr = sp;
1271 /* The call starts at the callee's entry point. */
1272 *real_pc = funaddr;
1273
1274 return sp;
1275 }
1276
1277 static int
1278 spu_scalar_value_p (struct type *type)
1279 {
1280 switch (TYPE_CODE (type))
1281 {
1282 case TYPE_CODE_INT:
1283 case TYPE_CODE_ENUM:
1284 case TYPE_CODE_RANGE:
1285 case TYPE_CODE_CHAR:
1286 case TYPE_CODE_BOOL:
1287 case TYPE_CODE_PTR:
1288 case TYPE_CODE_REF:
1289 return TYPE_LENGTH (type) <= 16;
1290
1291 default:
1292 return 0;
1293 }
1294 }
1295
1296 static void
1297 spu_value_to_regcache (struct regcache *regcache, int regnum,
1298 struct type *type, const gdb_byte *in)
1299 {
1300 int len = TYPE_LENGTH (type);
1301
1302 if (spu_scalar_value_p (type))
1303 {
1304 int preferred_slot = len < 4 ? 4 - len : 0;
1305 regcache_cooked_write_part (regcache, regnum, preferred_slot, len, in);
1306 }
1307 else
1308 {
1309 while (len >= 16)
1310 {
1311 regcache_cooked_write (regcache, regnum++, in);
1312 in += 16;
1313 len -= 16;
1314 }
1315
1316 if (len > 0)
1317 regcache_cooked_write_part (regcache, regnum, 0, len, in);
1318 }
1319 }
1320
1321 static void
1322 spu_regcache_to_value (struct regcache *regcache, int regnum,
1323 struct type *type, gdb_byte *out)
1324 {
1325 int len = TYPE_LENGTH (type);
1326
1327 if (spu_scalar_value_p (type))
1328 {
1329 int preferred_slot = len < 4 ? 4 - len : 0;
1330 regcache_cooked_read_part (regcache, regnum, preferred_slot, len, out);
1331 }
1332 else
1333 {
1334 while (len >= 16)
1335 {
1336 regcache_cooked_read (regcache, regnum++, out);
1337 out += 16;
1338 len -= 16;
1339 }
1340
1341 if (len > 0)
1342 regcache_cooked_read_part (regcache, regnum, 0, len, out);
1343 }
1344 }
1345
1346 static CORE_ADDR
1347 spu_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1348 struct regcache *regcache, CORE_ADDR bp_addr,
1349 int nargs, struct value **args, CORE_ADDR sp,
1350 int struct_return, CORE_ADDR struct_addr)
1351 {
1352 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1353 CORE_ADDR sp_delta;
1354 int i;
1355 int regnum = SPU_ARG1_REGNUM;
1356 int stack_arg = -1;
1357 gdb_byte buf[16];
1358
1359 /* Set the return address. */
1360 memset (buf, 0, sizeof buf);
1361 store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (bp_addr));
1362 regcache_cooked_write (regcache, SPU_LR_REGNUM, buf);
1363
1364 /* If STRUCT_RETURN is true, then the struct return address (in
1365 STRUCT_ADDR) will consume the first argument-passing register.
1366 Both adjust the register count and store that value. */
1367 if (struct_return)
1368 {
1369 memset (buf, 0, sizeof buf);
1370 store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (struct_addr));
1371 regcache_cooked_write (regcache, regnum++, buf);
1372 }
1373
1374 /* Fill in argument registers. */
1375 for (i = 0; i < nargs; i++)
1376 {
1377 struct value *arg = args[i];
1378 struct type *type = check_typedef (value_type (arg));
1379 const gdb_byte *contents = value_contents (arg);
1380 int n_regs = align_up (TYPE_LENGTH (type), 16) / 16;
1381
1382 /* If the argument doesn't wholly fit into registers, it and
1383 all subsequent arguments go to the stack. */
1384 if (regnum + n_regs - 1 > SPU_ARGN_REGNUM)
1385 {
1386 stack_arg = i;
1387 break;
1388 }
1389
1390 spu_value_to_regcache (regcache, regnum, type, contents);
1391 regnum += n_regs;
1392 }
1393
1394 /* Overflow arguments go to the stack. */
1395 if (stack_arg != -1)
1396 {
1397 CORE_ADDR ap;
1398
1399 /* Allocate all required stack size. */
1400 for (i = stack_arg; i < nargs; i++)
1401 {
1402 struct type *type = check_typedef (value_type (args[i]));
1403 sp -= align_up (TYPE_LENGTH (type), 16);
1404 }
1405
1406 /* Fill in stack arguments. */
1407 ap = sp;
1408 for (i = stack_arg; i < nargs; i++)
1409 {
1410 struct value *arg = args[i];
1411 struct type *type = check_typedef (value_type (arg));
1412 int len = TYPE_LENGTH (type);
1413 int preferred_slot;
1414
1415 if (spu_scalar_value_p (type))
1416 preferred_slot = len < 4 ? 4 - len : 0;
1417 else
1418 preferred_slot = 0;
1419
1420 target_write_memory (ap + preferred_slot, value_contents (arg), len);
1421 ap += align_up (TYPE_LENGTH (type), 16);
1422 }
1423 }
1424
1425 /* Allocate stack frame header. */
1426 sp -= 32;
1427
1428 /* Store stack back chain. */
1429 regcache_cooked_read (regcache, SPU_RAW_SP_REGNUM, buf);
1430 target_write_memory (sp, buf, 16);
1431
1432 /* Finally, update all slots of the SP register. */
1433 sp_delta = sp - extract_unsigned_integer (buf, 4, byte_order);
1434 for (i = 0; i < 4; i++)
1435 {
1436 CORE_ADDR sp_slot = extract_unsigned_integer (buf + 4*i, 4, byte_order);
1437 store_unsigned_integer (buf + 4*i, 4, byte_order, sp_slot + sp_delta);
1438 }
1439 regcache_cooked_write (regcache, SPU_RAW_SP_REGNUM, buf);
1440
1441 return sp;
1442 }
1443
1444 static struct frame_id
1445 spu_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1446 {
1447 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1448 CORE_ADDR pc = get_frame_register_unsigned (this_frame, SPU_PC_REGNUM);
1449 CORE_ADDR sp = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
1450 return frame_id_build (SPUADDR (tdep->id, sp), SPUADDR (tdep->id, pc & -4));
1451 }
1452
1453 /* Function return value access. */
1454
1455 static enum return_value_convention
1456 spu_return_value (struct gdbarch *gdbarch, struct value *function,
1457 struct type *type, struct regcache *regcache,
1458 gdb_byte *out, const gdb_byte *in)
1459 {
1460 struct type *func_type = function ? value_type (function) : NULL;
1461 enum return_value_convention rvc;
1462 int opencl_vector = 0;
1463
1464 if (func_type)
1465 {
1466 func_type = check_typedef (func_type);
1467
1468 if (TYPE_CODE (func_type) == TYPE_CODE_PTR)
1469 func_type = check_typedef (TYPE_TARGET_TYPE (func_type));
1470
1471 if (TYPE_CODE (func_type) == TYPE_CODE_FUNC
1472 && TYPE_CALLING_CONVENTION (func_type) == DW_CC_GDB_IBM_OpenCL
1473 && TYPE_CODE (type) == TYPE_CODE_ARRAY
1474 && TYPE_VECTOR (type))
1475 opencl_vector = 1;
1476 }
1477
1478 if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16)
1479 rvc = RETURN_VALUE_REGISTER_CONVENTION;
1480 else
1481 rvc = RETURN_VALUE_STRUCT_CONVENTION;
1482
1483 if (in)
1484 {
1485 switch (rvc)
1486 {
1487 case RETURN_VALUE_REGISTER_CONVENTION:
1488 if (opencl_vector && TYPE_LENGTH (type) == 2)
1489 regcache_cooked_write_part (regcache, SPU_ARG1_REGNUM, 2, 2, in);
1490 else
1491 spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in);
1492 break;
1493
1494 case RETURN_VALUE_STRUCT_CONVENTION:
1495 error (_("Cannot set function return value."));
1496 break;
1497 }
1498 }
1499 else if (out)
1500 {
1501 switch (rvc)
1502 {
1503 case RETURN_VALUE_REGISTER_CONVENTION:
1504 if (opencl_vector && TYPE_LENGTH (type) == 2)
1505 regcache_cooked_read_part (regcache, SPU_ARG1_REGNUM, 2, 2, out);
1506 else
1507 spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out);
1508 break;
1509
1510 case RETURN_VALUE_STRUCT_CONVENTION:
1511 error (_("Function return value unknown."));
1512 break;
1513 }
1514 }
1515
1516 return rvc;
1517 }
1518
1519
1520 /* Breakpoints. */
1521
1522 static const gdb_byte *
1523 spu_breakpoint_from_pc (struct gdbarch *gdbarch,
1524 CORE_ADDR * pcptr, int *lenptr)
1525 {
1526 static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff };
1527
1528 *lenptr = sizeof breakpoint;
1529 return breakpoint;
1530 }
1531
1532 static int
1533 spu_memory_remove_breakpoint (struct gdbarch *gdbarch,
1534 struct bp_target_info *bp_tgt)
1535 {
1536 /* We work around a problem in combined Cell/B.E. debugging here. Consider
1537 that in a combined application, we have some breakpoints inserted in SPU
1538 code, and now the application forks (on the PPU side). GDB common code
1539 will assume that the fork system call copied all breakpoints into the new
1540 process' address space, and that all those copies now need to be removed
1541 (see breakpoint.c:detach_breakpoints).
1542
1543 While this is certainly true for PPU side breakpoints, it is not true
1544 for SPU side breakpoints. fork will clone the SPU context file
1545 descriptors, so that all the existing SPU contexts are in accessible
1546 in the new process. However, the contents of the SPU contexts themselves
1547 are *not* cloned. Therefore the effect of detach_breakpoints is to
1548 remove SPU breakpoints from the *original* SPU context's local store
1549 -- this is not the correct behaviour.
1550
1551 The workaround is to check whether the PID we are asked to remove this
1552 breakpoint from (i.e. ptid_get_pid (inferior_ptid)) is different from the
1553 PID of the current inferior (i.e. current_inferior ()->pid). This is only
1554 true in the context of detach_breakpoints. If so, we simply do nothing.
1555 [ Note that for the fork child process, it does not matter if breakpoints
1556 remain inserted, because those SPU contexts are not runnable anyway --
1557 the Linux kernel allows only the original process to invoke spu_run. */
1558
1559 if (ptid_get_pid (inferior_ptid) != current_inferior ()->pid)
1560 return 0;
1561
1562 return default_memory_remove_breakpoint (gdbarch, bp_tgt);
1563 }
1564
1565
1566 /* Software single-stepping support. */
1567
1568 static int
1569 spu_software_single_step (struct frame_info *frame)
1570 {
1571 struct gdbarch *gdbarch = get_frame_arch (frame);
1572 struct address_space *aspace = get_frame_address_space (frame);
1573 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1574 CORE_ADDR pc, next_pc;
1575 unsigned int insn;
1576 int offset, reg;
1577 gdb_byte buf[4];
1578 ULONGEST lslr;
1579
1580 pc = get_frame_pc (frame);
1581
1582 if (target_read_memory (pc, buf, 4))
1583 return 1;
1584 insn = extract_unsigned_integer (buf, 4, byte_order);
1585
1586 /* Get local store limit. */
1587 lslr = get_frame_register_unsigned (frame, SPU_LSLR_REGNUM);
1588 if (!lslr)
1589 lslr = (ULONGEST) -1;
1590
1591 /* Next sequential instruction is at PC + 4, except if the current
1592 instruction is a PPE-assisted call, in which case it is at PC + 8.
1593 Wrap around LS limit to be on the safe side. */
1594 if ((insn & 0xffffff00) == 0x00002100)
1595 next_pc = (SPUADDR_ADDR (pc) + 8) & lslr;
1596 else
1597 next_pc = (SPUADDR_ADDR (pc) + 4) & lslr;
1598
1599 insert_single_step_breakpoint (gdbarch,
1600 aspace, SPUADDR (SPUADDR_SPU (pc), next_pc));
1601
1602 if (is_branch (insn, &offset, &reg))
1603 {
1604 CORE_ADDR target = offset;
1605
1606 if (reg == SPU_PC_REGNUM)
1607 target += SPUADDR_ADDR (pc);
1608 else if (reg != -1)
1609 {
1610 int optim, unavail;
1611
1612 if (get_frame_register_bytes (frame, reg, 0, 4, buf,
1613 &optim, &unavail))
1614 target += extract_unsigned_integer (buf, 4, byte_order) & -4;
1615 else
1616 {
1617 if (optim)
1618 throw_error (OPTIMIZED_OUT_ERROR,
1619 _("Could not determine address of "
1620 "single-step breakpoint."));
1621 if (unavail)
1622 throw_error (NOT_AVAILABLE_ERROR,
1623 _("Could not determine address of "
1624 "single-step breakpoint."));
1625 }
1626 }
1627
1628 target = target & lslr;
1629 if (target != next_pc)
1630 insert_single_step_breakpoint (gdbarch, aspace,
1631 SPUADDR (SPUADDR_SPU (pc), target));
1632 }
1633
1634 return 1;
1635 }
1636
1637
1638 /* Longjmp support. */
1639
1640 static int
1641 spu_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1642 {
1643 struct gdbarch *gdbarch = get_frame_arch (frame);
1644 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1645 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1646 gdb_byte buf[4];
1647 CORE_ADDR jb_addr;
1648 int optim, unavail;
1649
1650 /* Jump buffer is pointed to by the argument register $r3. */
1651 if (!get_frame_register_bytes (frame, SPU_ARG1_REGNUM, 0, 4, buf,
1652 &optim, &unavail))
1653 return 0;
1654
1655 jb_addr = extract_unsigned_integer (buf, 4, byte_order);
1656 if (target_read_memory (SPUADDR (tdep->id, jb_addr), buf, 4))
1657 return 0;
1658
1659 *pc = extract_unsigned_integer (buf, 4, byte_order);
1660 *pc = SPUADDR (tdep->id, *pc);
1661 return 1;
1662 }
1663
1664
1665 /* Disassembler. */
1666
1667 struct spu_dis_asm_data
1668 {
1669 struct gdbarch *gdbarch;
1670 int id;
1671 };
1672
1673 static void
1674 spu_dis_asm_print_address (bfd_vma addr, struct disassemble_info *info)
1675 {
1676 struct spu_dis_asm_data *data = info->application_data;
1677 print_address (data->gdbarch, SPUADDR (data->id, addr), info->stream);
1678 }
1679
1680 static int
1681 gdb_print_insn_spu (bfd_vma memaddr, struct disassemble_info *info)
1682 {
1683 /* The opcodes disassembler does 18-bit address arithmetic. Make
1684 sure the SPU ID encoded in the high bits is added back when we
1685 call print_address. */
1686 struct disassemble_info spu_info = *info;
1687 struct spu_dis_asm_data data;
1688 data.gdbarch = info->application_data;
1689 data.id = SPUADDR_SPU (memaddr);
1690
1691 spu_info.application_data = &data;
1692 spu_info.print_address_func = spu_dis_asm_print_address;
1693 return print_insn_spu (memaddr, &spu_info);
1694 }
1695
1696
1697 /* Target overlays for the SPU overlay manager.
1698
1699 See the documentation of simple_overlay_update for how the
1700 interface is supposed to work.
1701
1702 Data structures used by the overlay manager:
1703
1704 struct ovly_table
1705 {
1706 u32 vma;
1707 u32 size;
1708 u32 pos;
1709 u32 buf;
1710 } _ovly_table[]; -- one entry per overlay section
1711
1712 struct ovly_buf_table
1713 {
1714 u32 mapped;
1715 } _ovly_buf_table[]; -- one entry per overlay buffer
1716
1717 _ovly_table should never change.
1718
1719 Both tables are aligned to a 16-byte boundary, the symbols
1720 _ovly_table and _ovly_buf_table are of type STT_OBJECT and their
1721 size set to the size of the respective array. buf in _ovly_table is
1722 an index into _ovly_buf_table.
1723
1724 mapped is an index into _ovly_table. Both the mapped and buf indices start
1725 from one to reference the first entry in their respective tables. */
1726
1727 /* Using the per-objfile private data mechanism, we store for each
1728 objfile an array of "struct spu_overlay_table" structures, one
1729 for each obj_section of the objfile. This structure holds two
1730 fields, MAPPED_PTR and MAPPED_VAL. If MAPPED_PTR is zero, this
1731 is *not* an overlay section. If it is non-zero, it represents
1732 a target address. The overlay section is mapped iff the target
1733 integer at this location equals MAPPED_VAL. */
1734
1735 static const struct objfile_data *spu_overlay_data;
1736
1737 struct spu_overlay_table
1738 {
1739 CORE_ADDR mapped_ptr;
1740 CORE_ADDR mapped_val;
1741 };
1742
1743 /* Retrieve the overlay table for OBJFILE. If not already cached, read
1744 the _ovly_table data structure from the target and initialize the
1745 spu_overlay_table data structure from it. */
1746 static struct spu_overlay_table *
1747 spu_get_overlay_table (struct objfile *objfile)
1748 {
1749 enum bfd_endian byte_order = bfd_big_endian (objfile->obfd)?
1750 BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
1751 struct bound_minimal_symbol ovly_table_msym, ovly_buf_table_msym;
1752 CORE_ADDR ovly_table_base, ovly_buf_table_base;
1753 unsigned ovly_table_size, ovly_buf_table_size;
1754 struct spu_overlay_table *tbl;
1755 struct obj_section *osect;
1756 gdb_byte *ovly_table;
1757 int i;
1758
1759 tbl = objfile_data (objfile, spu_overlay_data);
1760 if (tbl)
1761 return tbl;
1762
1763 ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile);
1764 if (!ovly_table_msym.minsym)
1765 return NULL;
1766
1767 ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table",
1768 NULL, objfile);
1769 if (!ovly_buf_table_msym.minsym)
1770 return NULL;
1771
1772 ovly_table_base = BMSYMBOL_VALUE_ADDRESS (ovly_table_msym);
1773 ovly_table_size = MSYMBOL_SIZE (ovly_table_msym.minsym);
1774
1775 ovly_buf_table_base = BMSYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
1776 ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym.minsym);
1777
1778 ovly_table = xmalloc (ovly_table_size);
1779 read_memory (ovly_table_base, ovly_table, ovly_table_size);
1780
1781 tbl = OBSTACK_CALLOC (&objfile->objfile_obstack,
1782 objfile->sections_end - objfile->sections,
1783 struct spu_overlay_table);
1784
1785 for (i = 0; i < ovly_table_size / 16; i++)
1786 {
1787 CORE_ADDR vma = extract_unsigned_integer (ovly_table + 16*i + 0,
1788 4, byte_order);
1789 CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4,
1790 4, byte_order);
1791 CORE_ADDR pos = extract_unsigned_integer (ovly_table + 16*i + 8,
1792 4, byte_order);
1793 CORE_ADDR buf = extract_unsigned_integer (ovly_table + 16*i + 12,
1794 4, byte_order);
1795
1796 if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size)
1797 continue;
1798
1799 ALL_OBJFILE_OSECTIONS (objfile, osect)
1800 if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section)
1801 && pos == osect->the_bfd_section->filepos)
1802 {
1803 int ndx = osect - objfile->sections;
1804 tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4;
1805 tbl[ndx].mapped_val = i + 1;
1806 break;
1807 }
1808 }
1809
1810 xfree (ovly_table);
1811 set_objfile_data (objfile, spu_overlay_data, tbl);
1812 return tbl;
1813 }
1814
1815 /* Read _ovly_buf_table entry from the target to dermine whether
1816 OSECT is currently mapped, and update the mapped state. */
1817 static void
1818 spu_overlay_update_osect (struct obj_section *osect)
1819 {
1820 enum bfd_endian byte_order = bfd_big_endian (osect->objfile->obfd)?
1821 BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
1822 struct spu_overlay_table *ovly_table;
1823 CORE_ADDR id, val;
1824
1825 ovly_table = spu_get_overlay_table (osect->objfile);
1826 if (!ovly_table)
1827 return;
1828
1829 ovly_table += osect - osect->objfile->sections;
1830 if (ovly_table->mapped_ptr == 0)
1831 return;
1832
1833 id = SPUADDR_SPU (obj_section_addr (osect));
1834 val = read_memory_unsigned_integer (SPUADDR (id, ovly_table->mapped_ptr),
1835 4, byte_order);
1836 osect->ovly_mapped = (val == ovly_table->mapped_val);
1837 }
1838
1839 /* If OSECT is NULL, then update all sections' mapped state.
1840 If OSECT is non-NULL, then update only OSECT's mapped state. */
1841 static void
1842 spu_overlay_update (struct obj_section *osect)
1843 {
1844 /* Just one section. */
1845 if (osect)
1846 spu_overlay_update_osect (osect);
1847
1848 /* All sections. */
1849 else
1850 {
1851 struct objfile *objfile;
1852
1853 ALL_OBJSECTIONS (objfile, osect)
1854 if (section_is_overlay (osect))
1855 spu_overlay_update_osect (osect);
1856 }
1857 }
1858
1859 /* Whenever a new objfile is loaded, read the target's _ovly_table.
1860 If there is one, go through all sections and make sure for non-
1861 overlay sections LMA equals VMA, while for overlay sections LMA
1862 is larger than SPU_OVERLAY_LMA. */
1863 static void
1864 spu_overlay_new_objfile (struct objfile *objfile)
1865 {
1866 struct spu_overlay_table *ovly_table;
1867 struct obj_section *osect;
1868
1869 /* If we've already touched this file, do nothing. */
1870 if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL)
1871 return;
1872
1873 /* Consider only SPU objfiles. */
1874 if (bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1875 return;
1876
1877 /* Check if this objfile has overlays. */
1878 ovly_table = spu_get_overlay_table (objfile);
1879 if (!ovly_table)
1880 return;
1881
1882 /* Now go and fiddle with all the LMAs. */
1883 ALL_OBJFILE_OSECTIONS (objfile, osect)
1884 {
1885 bfd *obfd = objfile->obfd;
1886 asection *bsect = osect->the_bfd_section;
1887 int ndx = osect - objfile->sections;
1888
1889 if (ovly_table[ndx].mapped_ptr == 0)
1890 bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
1891 else
1892 bfd_section_lma (obfd, bsect) = SPU_OVERLAY_LMA + bsect->filepos;
1893 }
1894 }
1895
1896
1897 /* Insert temporary breakpoint on "main" function of newly loaded
1898 SPE context OBJFILE. */
1899 static void
1900 spu_catch_start (struct objfile *objfile)
1901 {
1902 struct bound_minimal_symbol minsym;
1903 struct symtab *symtab;
1904 CORE_ADDR pc;
1905 char buf[32];
1906
1907 /* Do this only if requested by "set spu stop-on-load on". */
1908 if (!spu_stop_on_load_p)
1909 return;
1910
1911 /* Consider only SPU objfiles. */
1912 if (!objfile || bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1913 return;
1914
1915 /* The main objfile is handled differently. */
1916 if (objfile == symfile_objfile)
1917 return;
1918
1919 /* There can be multiple symbols named "main". Search for the
1920 "main" in *this* objfile. */
1921 minsym = lookup_minimal_symbol ("main", NULL, objfile);
1922 if (!minsym.minsym)
1923 return;
1924
1925 /* If we have debugging information, try to use it -- this
1926 will allow us to properly skip the prologue. */
1927 pc = BMSYMBOL_VALUE_ADDRESS (minsym);
1928 symtab = find_pc_sect_symtab (pc, MSYMBOL_OBJ_SECTION (minsym.objfile,
1929 minsym.minsym));
1930 if (symtab != NULL)
1931 {
1932 struct blockvector *bv = BLOCKVECTOR (symtab);
1933 struct block *block = BLOCKVECTOR_BLOCK (bv, GLOBAL_BLOCK);
1934 struct symbol *sym;
1935 struct symtab_and_line sal;
1936
1937 sym = lookup_block_symbol (block, "main", VAR_DOMAIN);
1938 if (sym)
1939 {
1940 fixup_symbol_section (sym, objfile);
1941 sal = find_function_start_sal (sym, 1);
1942 pc = sal.pc;
1943 }
1944 }
1945
1946 /* Use a numerical address for the set_breakpoint command to avoid having
1947 the breakpoint re-set incorrectly. */
1948 xsnprintf (buf, sizeof buf, "*%s", core_addr_to_string (pc));
1949 create_breakpoint (get_objfile_arch (objfile), buf /* arg */,
1950 NULL /* cond_string */, -1 /* thread */,
1951 NULL /* extra_string */,
1952 0 /* parse_condition_and_thread */, 1 /* tempflag */,
1953 bp_breakpoint /* type_wanted */,
1954 0 /* ignore_count */,
1955 AUTO_BOOLEAN_FALSE /* pending_break_support */,
1956 &bkpt_breakpoint_ops /* ops */, 0 /* from_tty */,
1957 1 /* enabled */, 0 /* internal */, 0);
1958 }
1959
1960
1961 /* Look up OBJFILE loaded into FRAME's SPU context. */
1962 static struct objfile *
1963 spu_objfile_from_frame (struct frame_info *frame)
1964 {
1965 struct gdbarch *gdbarch = get_frame_arch (frame);
1966 struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1967 struct objfile *obj;
1968
1969 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
1970 return NULL;
1971
1972 ALL_OBJFILES (obj)
1973 {
1974 if (obj->sections != obj->sections_end
1975 && SPUADDR_SPU (obj_section_addr (obj->sections)) == tdep->id)
1976 return obj;
1977 }
1978
1979 return NULL;
1980 }
1981
1982 /* Flush cache for ea pointer access if available. */
1983 static void
1984 flush_ea_cache (void)
1985 {
1986 struct bound_minimal_symbol msymbol;
1987 struct objfile *obj;
1988
1989 if (!has_stack_frames ())
1990 return;
1991
1992 obj = spu_objfile_from_frame (get_current_frame ());
1993 if (obj == NULL)
1994 return;
1995
1996 /* Lookup inferior function __cache_flush. */
1997 msymbol = lookup_minimal_symbol ("__cache_flush", NULL, obj);
1998 if (msymbol.minsym != NULL)
1999 {
2000 struct type *type;
2001 CORE_ADDR addr;
2002
2003 type = objfile_type (obj)->builtin_void;
2004 type = lookup_function_type (type);
2005 type = lookup_pointer_type (type);
2006 addr = BMSYMBOL_VALUE_ADDRESS (msymbol);
2007
2008 call_function_by_hand (value_from_pointer (type, addr), 0, NULL);
2009 }
2010 }
2011
2012 /* This handler is called when the inferior has stopped. If it is stopped in
2013 SPU architecture then flush the ea cache if used. */
2014 static void
2015 spu_attach_normal_stop (struct bpstats *bs, int print_frame)
2016 {
2017 if (!spu_auto_flush_cache_p)
2018 return;
2019
2020 /* Temporarily reset spu_auto_flush_cache_p to avoid recursively
2021 re-entering this function when __cache_flush stops. */
2022 spu_auto_flush_cache_p = 0;
2023 flush_ea_cache ();
2024 spu_auto_flush_cache_p = 1;
2025 }
2026
2027
2028 /* "info spu" commands. */
2029
2030 static void
2031 info_spu_event_command (char *args, int from_tty)
2032 {
2033 struct frame_info *frame = get_selected_frame (NULL);
2034 ULONGEST event_status = 0;
2035 ULONGEST event_mask = 0;
2036 struct cleanup *chain;
2037 gdb_byte buf[100];
2038 char annex[32];
2039 LONGEST len;
2040 int id;
2041
2042 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
2043 error (_("\"info spu\" is only supported on the SPU architecture."));
2044
2045 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2046
2047 xsnprintf (annex, sizeof annex, "%d/event_status", id);
2048 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2049 buf, 0, (sizeof (buf) - 1));
2050 if (len <= 0)
2051 error (_("Could not read event_status."));
2052 buf[len] = '\0';
2053 event_status = strtoulst ((char *) buf, NULL, 16);
2054
2055 xsnprintf (annex, sizeof annex, "%d/event_mask", id);
2056 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2057 buf, 0, (sizeof (buf) - 1));
2058 if (len <= 0)
2059 error (_("Could not read event_mask."));
2060 buf[len] = '\0';
2061 event_mask = strtoulst ((char *) buf, NULL, 16);
2062
2063 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoEvent");
2064
2065 if (ui_out_is_mi_like_p (current_uiout))
2066 {
2067 ui_out_field_fmt (current_uiout, "event_status",
2068 "0x%s", phex_nz (event_status, 4));
2069 ui_out_field_fmt (current_uiout, "event_mask",
2070 "0x%s", phex_nz (event_mask, 4));
2071 }
2072 else
2073 {
2074 printf_filtered (_("Event Status 0x%s\n"), phex (event_status, 4));
2075 printf_filtered (_("Event Mask 0x%s\n"), phex (event_mask, 4));
2076 }
2077
2078 do_cleanups (chain);
2079 }
2080
2081 static void
2082 info_spu_signal_command (char *args, int from_tty)
2083 {
2084 struct frame_info *frame = get_selected_frame (NULL);
2085 struct gdbarch *gdbarch = get_frame_arch (frame);
2086 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2087 ULONGEST signal1 = 0;
2088 ULONGEST signal1_type = 0;
2089 int signal1_pending = 0;
2090 ULONGEST signal2 = 0;
2091 ULONGEST signal2_type = 0;
2092 int signal2_pending = 0;
2093 struct cleanup *chain;
2094 char annex[32];
2095 gdb_byte buf[100];
2096 LONGEST len;
2097 int id;
2098
2099 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2100 error (_("\"info spu\" is only supported on the SPU architecture."));
2101
2102 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2103
2104 xsnprintf (annex, sizeof annex, "%d/signal1", id);
2105 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
2106 if (len < 0)
2107 error (_("Could not read signal1."));
2108 else if (len == 4)
2109 {
2110 signal1 = extract_unsigned_integer (buf, 4, byte_order);
2111 signal1_pending = 1;
2112 }
2113
2114 xsnprintf (annex, sizeof annex, "%d/signal1_type", id);
2115 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2116 buf, 0, (sizeof (buf) - 1));
2117 if (len <= 0)
2118 error (_("Could not read signal1_type."));
2119 buf[len] = '\0';
2120 signal1_type = strtoulst ((char *) buf, NULL, 16);
2121
2122 xsnprintf (annex, sizeof annex, "%d/signal2", id);
2123 len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
2124 if (len < 0)
2125 error (_("Could not read signal2."));
2126 else if (len == 4)
2127 {
2128 signal2 = extract_unsigned_integer (buf, 4, byte_order);
2129 signal2_pending = 1;
2130 }
2131
2132 xsnprintf (annex, sizeof annex, "%d/signal2_type", id);
2133 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2134 buf, 0, (sizeof (buf) - 1));
2135 if (len <= 0)
2136 error (_("Could not read signal2_type."));
2137 buf[len] = '\0';
2138 signal2_type = strtoulst ((char *) buf, NULL, 16);
2139
2140 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoSignal");
2141
2142 if (ui_out_is_mi_like_p (current_uiout))
2143 {
2144 ui_out_field_int (current_uiout, "signal1_pending", signal1_pending);
2145 ui_out_field_fmt (current_uiout, "signal1", "0x%s", phex_nz (signal1, 4));
2146 ui_out_field_int (current_uiout, "signal1_type", signal1_type);
2147 ui_out_field_int (current_uiout, "signal2_pending", signal2_pending);
2148 ui_out_field_fmt (current_uiout, "signal2", "0x%s", phex_nz (signal2, 4));
2149 ui_out_field_int (current_uiout, "signal2_type", signal2_type);
2150 }
2151 else
2152 {
2153 if (signal1_pending)
2154 printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1, 4));
2155 else
2156 printf_filtered (_("Signal 1 not pending "));
2157
2158 if (signal1_type)
2159 printf_filtered (_("(Type Or)\n"));
2160 else
2161 printf_filtered (_("(Type Overwrite)\n"));
2162
2163 if (signal2_pending)
2164 printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2, 4));
2165 else
2166 printf_filtered (_("Signal 2 not pending "));
2167
2168 if (signal2_type)
2169 printf_filtered (_("(Type Or)\n"));
2170 else
2171 printf_filtered (_("(Type Overwrite)\n"));
2172 }
2173
2174 do_cleanups (chain);
2175 }
2176
2177 static void
2178 info_spu_mailbox_list (gdb_byte *buf, int nr, enum bfd_endian byte_order,
2179 const char *field, const char *msg)
2180 {
2181 struct cleanup *chain;
2182 int i;
2183
2184 if (nr <= 0)
2185 return;
2186
2187 chain = make_cleanup_ui_out_table_begin_end (current_uiout, 1, nr, "mbox");
2188
2189 ui_out_table_header (current_uiout, 32, ui_left, field, msg);
2190 ui_out_table_body (current_uiout);
2191
2192 for (i = 0; i < nr; i++)
2193 {
2194 struct cleanup *val_chain;
2195 ULONGEST val;
2196 val_chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "mbox");
2197 val = extract_unsigned_integer (buf + 4*i, 4, byte_order);
2198 ui_out_field_fmt (current_uiout, field, "0x%s", phex (val, 4));
2199 do_cleanups (val_chain);
2200
2201 if (!ui_out_is_mi_like_p (current_uiout))
2202 printf_filtered ("\n");
2203 }
2204
2205 do_cleanups (chain);
2206 }
2207
2208 static void
2209 info_spu_mailbox_command (char *args, int from_tty)
2210 {
2211 struct frame_info *frame = get_selected_frame (NULL);
2212 struct gdbarch *gdbarch = get_frame_arch (frame);
2213 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2214 struct cleanup *chain;
2215 char annex[32];
2216 gdb_byte buf[1024];
2217 LONGEST len;
2218 int id;
2219
2220 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2221 error (_("\"info spu\" is only supported on the SPU architecture."));
2222
2223 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2224
2225 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoMailbox");
2226
2227 xsnprintf (annex, sizeof annex, "%d/mbox_info", id);
2228 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2229 buf, 0, sizeof buf);
2230 if (len < 0)
2231 error (_("Could not read mbox_info."));
2232
2233 info_spu_mailbox_list (buf, len / 4, byte_order,
2234 "mbox", "SPU Outbound Mailbox");
2235
2236 xsnprintf (annex, sizeof annex, "%d/ibox_info", id);
2237 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2238 buf, 0, sizeof buf);
2239 if (len < 0)
2240 error (_("Could not read ibox_info."));
2241
2242 info_spu_mailbox_list (buf, len / 4, byte_order,
2243 "ibox", "SPU Outbound Interrupt Mailbox");
2244
2245 xsnprintf (annex, sizeof annex, "%d/wbox_info", id);
2246 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2247 buf, 0, sizeof buf);
2248 if (len < 0)
2249 error (_("Could not read wbox_info."));
2250
2251 info_spu_mailbox_list (buf, len / 4, byte_order,
2252 "wbox", "SPU Inbound Mailbox");
2253
2254 do_cleanups (chain);
2255 }
2256
2257 static ULONGEST
2258 spu_mfc_get_bitfield (ULONGEST word, int first, int last)
2259 {
2260 ULONGEST mask = ~(~(ULONGEST)0 << (last - first + 1));
2261 return (word >> (63 - last)) & mask;
2262 }
2263
2264 static void
2265 info_spu_dma_cmdlist (gdb_byte *buf, int nr, enum bfd_endian byte_order)
2266 {
2267 static char *spu_mfc_opcode[256] =
2268 {
2269 /* 00 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2270 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2271 /* 10 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2272 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2273 /* 20 */ "put", "putb", "putf", NULL, "putl", "putlb", "putlf", NULL,
2274 "puts", "putbs", "putfs", NULL, NULL, NULL, NULL, NULL,
2275 /* 30 */ "putr", "putrb", "putrf", NULL, "putrl", "putrlb", "putrlf", NULL,
2276 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2277 /* 40 */ "get", "getb", "getf", NULL, "getl", "getlb", "getlf", NULL,
2278 "gets", "getbs", "getfs", NULL, NULL, NULL, NULL, NULL,
2279 /* 50 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2280 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2281 /* 60 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2282 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2283 /* 70 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2284 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2285 /* 80 */ "sdcrt", "sdcrtst", NULL, NULL, NULL, NULL, NULL, NULL,
2286 NULL, "sdcrz", NULL, NULL, NULL, "sdcrst", NULL, "sdcrf",
2287 /* 90 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2288 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2289 /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL, NULL, NULL, NULL, NULL,
2290 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2291 /* b0 */ "putlluc", NULL, NULL, NULL, "putllc", NULL, NULL, NULL,
2292 "putqlluc", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2293 /* c0 */ "barrier", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2294 "mfceieio", NULL, NULL, NULL, "mfcsync", NULL, NULL, NULL,
2295 /* d0 */ "getllar", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2296 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2297 /* e0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2298 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2299 /* f0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2300 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2301 };
2302
2303 int *seq = alloca (nr * sizeof (int));
2304 int done = 0;
2305 struct cleanup *chain;
2306 int i, j;
2307
2308
2309 /* Determine sequence in which to display (valid) entries. */
2310 for (i = 0; i < nr; i++)
2311 {
2312 /* Search for the first valid entry all of whose
2313 dependencies are met. */
2314 for (j = 0; j < nr; j++)
2315 {
2316 ULONGEST mfc_cq_dw3;
2317 ULONGEST dependencies;
2318
2319 if (done & (1 << (nr - 1 - j)))
2320 continue;
2321
2322 mfc_cq_dw3
2323 = extract_unsigned_integer (buf + 32*j + 24,8, byte_order);
2324 if (!spu_mfc_get_bitfield (mfc_cq_dw3, 16, 16))
2325 continue;
2326
2327 dependencies = spu_mfc_get_bitfield (mfc_cq_dw3, 0, nr - 1);
2328 if ((dependencies & done) != dependencies)
2329 continue;
2330
2331 seq[i] = j;
2332 done |= 1 << (nr - 1 - j);
2333 break;
2334 }
2335
2336 if (j == nr)
2337 break;
2338 }
2339
2340 nr = i;
2341
2342
2343 chain = make_cleanup_ui_out_table_begin_end (current_uiout, 10, nr,
2344 "dma_cmd");
2345
2346 ui_out_table_header (current_uiout, 7, ui_left, "opcode", "Opcode");
2347 ui_out_table_header (current_uiout, 3, ui_left, "tag", "Tag");
2348 ui_out_table_header (current_uiout, 3, ui_left, "tid", "TId");
2349 ui_out_table_header (current_uiout, 3, ui_left, "rid", "RId");
2350 ui_out_table_header (current_uiout, 18, ui_left, "ea", "EA");
2351 ui_out_table_header (current_uiout, 7, ui_left, "lsa", "LSA");
2352 ui_out_table_header (current_uiout, 7, ui_left, "size", "Size");
2353 ui_out_table_header (current_uiout, 7, ui_left, "lstaddr", "LstAddr");
2354 ui_out_table_header (current_uiout, 7, ui_left, "lstsize", "LstSize");
2355 ui_out_table_header (current_uiout, 1, ui_left, "error_p", "E");
2356
2357 ui_out_table_body (current_uiout);
2358
2359 for (i = 0; i < nr; i++)
2360 {
2361 struct cleanup *cmd_chain;
2362 ULONGEST mfc_cq_dw0;
2363 ULONGEST mfc_cq_dw1;
2364 ULONGEST mfc_cq_dw2;
2365 int mfc_cmd_opcode, mfc_cmd_tag, rclass_id, tclass_id;
2366 int list_lsa, list_size, mfc_lsa, mfc_size;
2367 ULONGEST mfc_ea;
2368 int list_valid_p, noop_valid_p, qw_valid_p, ea_valid_p, cmd_error_p;
2369
2370 /* Decode contents of MFC Command Queue Context Save/Restore Registers.
2371 See "Cell Broadband Engine Registers V1.3", section 3.3.2.1. */
2372
2373 mfc_cq_dw0
2374 = extract_unsigned_integer (buf + 32*seq[i], 8, byte_order);
2375 mfc_cq_dw1
2376 = extract_unsigned_integer (buf + 32*seq[i] + 8, 8, byte_order);
2377 mfc_cq_dw2
2378 = extract_unsigned_integer (buf + 32*seq[i] + 16, 8, byte_order);
2379
2380 list_lsa = spu_mfc_get_bitfield (mfc_cq_dw0, 0, 14);
2381 list_size = spu_mfc_get_bitfield (mfc_cq_dw0, 15, 26);
2382 mfc_cmd_opcode = spu_mfc_get_bitfield (mfc_cq_dw0, 27, 34);
2383 mfc_cmd_tag = spu_mfc_get_bitfield (mfc_cq_dw0, 35, 39);
2384 list_valid_p = spu_mfc_get_bitfield (mfc_cq_dw0, 40, 40);
2385 rclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 41, 43);
2386 tclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 44, 46);
2387
2388 mfc_ea = spu_mfc_get_bitfield (mfc_cq_dw1, 0, 51) << 12
2389 | spu_mfc_get_bitfield (mfc_cq_dw2, 25, 36);
2390
2391 mfc_lsa = spu_mfc_get_bitfield (mfc_cq_dw2, 0, 13);
2392 mfc_size = spu_mfc_get_bitfield (mfc_cq_dw2, 14, 24);
2393 noop_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 37, 37);
2394 qw_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 38, 38);
2395 ea_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 39, 39);
2396 cmd_error_p = spu_mfc_get_bitfield (mfc_cq_dw2, 40, 40);
2397
2398 cmd_chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "cmd");
2399
2400 if (spu_mfc_opcode[mfc_cmd_opcode])
2401 ui_out_field_string (current_uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]);
2402 else
2403 ui_out_field_int (current_uiout, "opcode", mfc_cmd_opcode);
2404
2405 ui_out_field_int (current_uiout, "tag", mfc_cmd_tag);
2406 ui_out_field_int (current_uiout, "tid", tclass_id);
2407 ui_out_field_int (current_uiout, "rid", rclass_id);
2408
2409 if (ea_valid_p)
2410 ui_out_field_fmt (current_uiout, "ea", "0x%s", phex (mfc_ea, 8));
2411 else
2412 ui_out_field_skip (current_uiout, "ea");
2413
2414 ui_out_field_fmt (current_uiout, "lsa", "0x%05x", mfc_lsa << 4);
2415 if (qw_valid_p)
2416 ui_out_field_fmt (current_uiout, "size", "0x%05x", mfc_size << 4);
2417 else
2418 ui_out_field_fmt (current_uiout, "size", "0x%05x", mfc_size);
2419
2420 if (list_valid_p)
2421 {
2422 ui_out_field_fmt (current_uiout, "lstaddr", "0x%05x", list_lsa << 3);
2423 ui_out_field_fmt (current_uiout, "lstsize", "0x%05x", list_size << 3);
2424 }
2425 else
2426 {
2427 ui_out_field_skip (current_uiout, "lstaddr");
2428 ui_out_field_skip (current_uiout, "lstsize");
2429 }
2430
2431 if (cmd_error_p)
2432 ui_out_field_string (current_uiout, "error_p", "*");
2433 else
2434 ui_out_field_skip (current_uiout, "error_p");
2435
2436 do_cleanups (cmd_chain);
2437
2438 if (!ui_out_is_mi_like_p (current_uiout))
2439 printf_filtered ("\n");
2440 }
2441
2442 do_cleanups (chain);
2443 }
2444
2445 static void
2446 info_spu_dma_command (char *args, int from_tty)
2447 {
2448 struct frame_info *frame = get_selected_frame (NULL);
2449 struct gdbarch *gdbarch = get_frame_arch (frame);
2450 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2451 ULONGEST dma_info_type;
2452 ULONGEST dma_info_mask;
2453 ULONGEST dma_info_status;
2454 ULONGEST dma_info_stall_and_notify;
2455 ULONGEST dma_info_atomic_command_status;
2456 struct cleanup *chain;
2457 char annex[32];
2458 gdb_byte buf[1024];
2459 LONGEST len;
2460 int id;
2461
2462 if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
2463 error (_("\"info spu\" is only supported on the SPU architecture."));
2464
2465 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2466
2467 xsnprintf (annex, sizeof annex, "%d/dma_info", id);
2468 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2469 buf, 0, 40 + 16 * 32);
2470 if (len <= 0)
2471 error (_("Could not read dma_info."));
2472
2473 dma_info_type
2474 = extract_unsigned_integer (buf, 8, byte_order);
2475 dma_info_mask
2476 = extract_unsigned_integer (buf + 8, 8, byte_order);
2477 dma_info_status
2478 = extract_unsigned_integer (buf + 16, 8, byte_order);
2479 dma_info_stall_and_notify
2480 = extract_unsigned_integer (buf + 24, 8, byte_order);
2481 dma_info_atomic_command_status
2482 = extract_unsigned_integer (buf + 32, 8, byte_order);
2483
2484 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout, "SPUInfoDMA");
2485
2486 if (ui_out_is_mi_like_p (current_uiout))
2487 {
2488 ui_out_field_fmt (current_uiout, "dma_info_type", "0x%s",
2489 phex_nz (dma_info_type, 4));
2490 ui_out_field_fmt (current_uiout, "dma_info_mask", "0x%s",
2491 phex_nz (dma_info_mask, 4));
2492 ui_out_field_fmt (current_uiout, "dma_info_status", "0x%s",
2493 phex_nz (dma_info_status, 4));
2494 ui_out_field_fmt (current_uiout, "dma_info_stall_and_notify", "0x%s",
2495 phex_nz (dma_info_stall_and_notify, 4));
2496 ui_out_field_fmt (current_uiout, "dma_info_atomic_command_status", "0x%s",
2497 phex_nz (dma_info_atomic_command_status, 4));
2498 }
2499 else
2500 {
2501 const char *query_msg = _("no query pending");
2502
2503 if (dma_info_type & 4)
2504 switch (dma_info_type & 3)
2505 {
2506 case 1: query_msg = _("'any' query pending"); break;
2507 case 2: query_msg = _("'all' query pending"); break;
2508 default: query_msg = _("undefined query type"); break;
2509 }
2510
2511 printf_filtered (_("Tag-Group Status 0x%s\n"),
2512 phex (dma_info_status, 4));
2513 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2514 phex (dma_info_mask, 4), query_msg);
2515 printf_filtered (_("Stall-and-Notify 0x%s\n"),
2516 phex (dma_info_stall_and_notify, 4));
2517 printf_filtered (_("Atomic Cmd Status 0x%s\n"),
2518 phex (dma_info_atomic_command_status, 4));
2519 printf_filtered ("\n");
2520 }
2521
2522 info_spu_dma_cmdlist (buf + 40, 16, byte_order);
2523 do_cleanups (chain);
2524 }
2525
2526 static void
2527 info_spu_proxydma_command (char *args, int from_tty)
2528 {
2529 struct frame_info *frame = get_selected_frame (NULL);
2530 struct gdbarch *gdbarch = get_frame_arch (frame);
2531 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2532 ULONGEST dma_info_type;
2533 ULONGEST dma_info_mask;
2534 ULONGEST dma_info_status;
2535 struct cleanup *chain;
2536 char annex[32];
2537 gdb_byte buf[1024];
2538 LONGEST len;
2539 int id;
2540
2541 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2542 error (_("\"info spu\" is only supported on the SPU architecture."));
2543
2544 id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2545
2546 xsnprintf (annex, sizeof annex, "%d/proxydma_info", id);
2547 len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2548 buf, 0, 24 + 8 * 32);
2549 if (len <= 0)
2550 error (_("Could not read proxydma_info."));
2551
2552 dma_info_type = extract_unsigned_integer (buf, 8, byte_order);
2553 dma_info_mask = extract_unsigned_integer (buf + 8, 8, byte_order);
2554 dma_info_status = extract_unsigned_integer (buf + 16, 8, byte_order);
2555
2556 chain = make_cleanup_ui_out_tuple_begin_end (current_uiout,
2557 "SPUInfoProxyDMA");
2558
2559 if (ui_out_is_mi_like_p (current_uiout))
2560 {
2561 ui_out_field_fmt (current_uiout, "proxydma_info_type", "0x%s",
2562 phex_nz (dma_info_type, 4));
2563 ui_out_field_fmt (current_uiout, "proxydma_info_mask", "0x%s",
2564 phex_nz (dma_info_mask, 4));
2565 ui_out_field_fmt (current_uiout, "proxydma_info_status", "0x%s",
2566 phex_nz (dma_info_status, 4));
2567 }
2568 else
2569 {
2570 const char *query_msg;
2571
2572 switch (dma_info_type & 3)
2573 {
2574 case 0: query_msg = _("no query pending"); break;
2575 case 1: query_msg = _("'any' query pending"); break;
2576 case 2: query_msg = _("'all' query pending"); break;
2577 default: query_msg = _("undefined query type"); break;
2578 }
2579
2580 printf_filtered (_("Tag-Group Status 0x%s\n"),
2581 phex (dma_info_status, 4));
2582 printf_filtered (_("Tag-Group Mask 0x%s (%s)\n"),
2583 phex (dma_info_mask, 4), query_msg);
2584 printf_filtered ("\n");
2585 }
2586
2587 info_spu_dma_cmdlist (buf + 24, 8, byte_order);
2588 do_cleanups (chain);
2589 }
2590
2591 static void
2592 info_spu_command (char *args, int from_tty)
2593 {
2594 printf_unfiltered (_("\"info spu\" must be followed by "
2595 "the name of an SPU facility.\n"));
2596 help_list (infospucmdlist, "info spu ", -1, gdb_stdout);
2597 }
2598
2599
2600 /* Root of all "set spu "/"show spu " commands. */
2601
2602 static void
2603 show_spu_command (char *args, int from_tty)
2604 {
2605 help_list (showspucmdlist, "show spu ", all_commands, gdb_stdout);
2606 }
2607
2608 static void
2609 set_spu_command (char *args, int from_tty)
2610 {
2611 help_list (setspucmdlist, "set spu ", all_commands, gdb_stdout);
2612 }
2613
2614 static void
2615 show_spu_stop_on_load (struct ui_file *file, int from_tty,
2616 struct cmd_list_element *c, const char *value)
2617 {
2618 fprintf_filtered (file, _("Stopping for new SPE threads is %s.\n"),
2619 value);
2620 }
2621
2622 static void
2623 show_spu_auto_flush_cache (struct ui_file *file, int from_tty,
2624 struct cmd_list_element *c, const char *value)
2625 {
2626 fprintf_filtered (file, _("Automatic software-cache flush is %s.\n"),
2627 value);
2628 }
2629
2630
2631 /* Set up gdbarch struct. */
2632
2633 static struct gdbarch *
2634 spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2635 {
2636 struct gdbarch *gdbarch;
2637 struct gdbarch_tdep *tdep;
2638 int id = -1;
2639
2640 /* Which spufs ID was requested as address space? */
2641 if (info.tdep_info)
2642 id = *(int *)info.tdep_info;
2643 /* For objfile architectures of SPU solibs, decode the ID from the name.
2644 This assumes the filename convention employed by solib-spu.c. */
2645 else if (info.abfd)
2646 {
2647 char *name = strrchr (info.abfd->filename, '@');
2648 if (name)
2649 sscanf (name, "@0x%*x <%d>", &id);
2650 }
2651
2652 /* Find a candidate among extant architectures. */
2653 for (arches = gdbarch_list_lookup_by_info (arches, &info);
2654 arches != NULL;
2655 arches = gdbarch_list_lookup_by_info (arches->next, &info))
2656 {
2657 tdep = gdbarch_tdep (arches->gdbarch);
2658 if (tdep && tdep->id == id)
2659 return arches->gdbarch;
2660 }
2661
2662 /* None found, so create a new architecture. */
2663 tdep = XCNEW (struct gdbarch_tdep);
2664 tdep->id = id;
2665 gdbarch = gdbarch_alloc (&info, tdep);
2666
2667 /* Disassembler. */
2668 set_gdbarch_print_insn (gdbarch, gdb_print_insn_spu);
2669
2670 /* Registers. */
2671 set_gdbarch_num_regs (gdbarch, SPU_NUM_REGS);
2672 set_gdbarch_num_pseudo_regs (gdbarch, SPU_NUM_PSEUDO_REGS);
2673 set_gdbarch_sp_regnum (gdbarch, SPU_SP_REGNUM);
2674 set_gdbarch_pc_regnum (gdbarch, SPU_PC_REGNUM);
2675 set_gdbarch_read_pc (gdbarch, spu_read_pc);
2676 set_gdbarch_write_pc (gdbarch, spu_write_pc);
2677 set_gdbarch_register_name (gdbarch, spu_register_name);
2678 set_gdbarch_register_type (gdbarch, spu_register_type);
2679 set_gdbarch_pseudo_register_read (gdbarch, spu_pseudo_register_read);
2680 set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write);
2681 set_gdbarch_value_from_register (gdbarch, spu_value_from_register);
2682 set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p);
2683
2684 /* Data types. */
2685 set_gdbarch_char_signed (gdbarch, 0);
2686 set_gdbarch_ptr_bit (gdbarch, 32);
2687 set_gdbarch_addr_bit (gdbarch, 32);
2688 set_gdbarch_short_bit (gdbarch, 16);
2689 set_gdbarch_int_bit (gdbarch, 32);
2690 set_gdbarch_long_bit (gdbarch, 32);
2691 set_gdbarch_long_long_bit (gdbarch, 64);
2692 set_gdbarch_float_bit (gdbarch, 32);
2693 set_gdbarch_double_bit (gdbarch, 64);
2694 set_gdbarch_long_double_bit (gdbarch, 64);
2695 set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2696 set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2697 set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
2698
2699 /* Address handling. */
2700 set_gdbarch_address_to_pointer (gdbarch, spu_address_to_pointer);
2701 set_gdbarch_pointer_to_address (gdbarch, spu_pointer_to_address);
2702 set_gdbarch_integer_to_address (gdbarch, spu_integer_to_address);
2703 set_gdbarch_address_class_type_flags (gdbarch, spu_address_class_type_flags);
2704 set_gdbarch_address_class_type_flags_to_name
2705 (gdbarch, spu_address_class_type_flags_to_name);
2706 set_gdbarch_address_class_name_to_type_flags
2707 (gdbarch, spu_address_class_name_to_type_flags);
2708
2709
2710 /* Inferior function calls. */
2711 set_gdbarch_call_dummy_location (gdbarch, ON_STACK);
2712 set_gdbarch_frame_align (gdbarch, spu_frame_align);
2713 set_gdbarch_frame_red_zone_size (gdbarch, 2000);
2714 set_gdbarch_push_dummy_code (gdbarch, spu_push_dummy_code);
2715 set_gdbarch_push_dummy_call (gdbarch, spu_push_dummy_call);
2716 set_gdbarch_dummy_id (gdbarch, spu_dummy_id);
2717 set_gdbarch_return_value (gdbarch, spu_return_value);
2718
2719 /* Frame handling. */
2720 set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2721 frame_unwind_append_unwinder (gdbarch, &spu_frame_unwind);
2722 frame_base_set_default (gdbarch, &spu_frame_base);
2723 set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc);
2724 set_gdbarch_unwind_sp (gdbarch, spu_unwind_sp);
2725 set_gdbarch_virtual_frame_pointer (gdbarch, spu_virtual_frame_pointer);
2726 set_gdbarch_frame_args_skip (gdbarch, 0);
2727 set_gdbarch_skip_prologue (gdbarch, spu_skip_prologue);
2728 set_gdbarch_in_function_epilogue_p (gdbarch, spu_in_function_epilogue_p);
2729
2730 /* Cell/B.E. cross-architecture unwinder support. */
2731 frame_unwind_prepend_unwinder (gdbarch, &spu2ppu_unwind);
2732
2733 /* Breakpoints. */
2734 set_gdbarch_decr_pc_after_break (gdbarch, 4);
2735 set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc);
2736 set_gdbarch_memory_remove_breakpoint (gdbarch, spu_memory_remove_breakpoint);
2737 set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
2738 set_gdbarch_software_single_step (gdbarch, spu_software_single_step);
2739 set_gdbarch_get_longjmp_target (gdbarch, spu_get_longjmp_target);
2740
2741 /* Overlays. */
2742 set_gdbarch_overlay_update (gdbarch, spu_overlay_update);
2743
2744 return gdbarch;
2745 }
2746
2747 /* Provide a prototype to silence -Wmissing-prototypes. */
2748 extern initialize_file_ftype _initialize_spu_tdep;
2749
2750 void
2751 _initialize_spu_tdep (void)
2752 {
2753 register_gdbarch_init (bfd_arch_spu, spu_gdbarch_init);
2754
2755 /* Add ourselves to objfile event chain. */
2756 observer_attach_new_objfile (spu_overlay_new_objfile);
2757 spu_overlay_data = register_objfile_data ();
2758
2759 /* Install spu stop-on-load handler. */
2760 observer_attach_new_objfile (spu_catch_start);
2761
2762 /* Add ourselves to normal_stop event chain. */
2763 observer_attach_normal_stop (spu_attach_normal_stop);
2764
2765 /* Add root prefix command for all "set spu"/"show spu" commands. */
2766 add_prefix_cmd ("spu", no_class, set_spu_command,
2767 _("Various SPU specific commands."),
2768 &setspucmdlist, "set spu ", 0, &setlist);
2769 add_prefix_cmd ("spu", no_class, show_spu_command,
2770 _("Various SPU specific commands."),
2771 &showspucmdlist, "show spu ", 0, &showlist);
2772
2773 /* Toggle whether or not to add a temporary breakpoint at the "main"
2774 function of new SPE contexts. */
2775 add_setshow_boolean_cmd ("stop-on-load", class_support,
2776 &spu_stop_on_load_p, _("\
2777 Set whether to stop for new SPE threads."),
2778 _("\
2779 Show whether to stop for new SPE threads."),
2780 _("\
2781 Use \"on\" to give control to the user when a new SPE thread\n\
2782 enters its \"main\" function.\n\
2783 Use \"off\" to disable stopping for new SPE threads."),
2784 NULL,
2785 show_spu_stop_on_load,
2786 &setspucmdlist, &showspucmdlist);
2787
2788 /* Toggle whether or not to automatically flush the software-managed
2789 cache whenever SPE execution stops. */
2790 add_setshow_boolean_cmd ("auto-flush-cache", class_support,
2791 &spu_auto_flush_cache_p, _("\
2792 Set whether to automatically flush the software-managed cache."),
2793 _("\
2794 Show whether to automatically flush the software-managed cache."),
2795 _("\
2796 Use \"on\" to automatically flush the software-managed cache\n\
2797 whenever SPE execution stops.\n\
2798 Use \"off\" to never automatically flush the software-managed cache."),
2799 NULL,
2800 show_spu_auto_flush_cache,
2801 &setspucmdlist, &showspucmdlist);
2802
2803 /* Add root prefix command for all "info spu" commands. */
2804 add_prefix_cmd ("spu", class_info, info_spu_command,
2805 _("Various SPU specific commands."),
2806 &infospucmdlist, "info spu ", 0, &infolist);
2807
2808 /* Add various "info spu" commands. */
2809 add_cmd ("event", class_info, info_spu_event_command,
2810 _("Display SPU event facility status.\n"),
2811 &infospucmdlist);
2812 add_cmd ("signal", class_info, info_spu_signal_command,
2813 _("Display SPU signal notification facility status.\n"),
2814 &infospucmdlist);
2815 add_cmd ("mailbox", class_info, info_spu_mailbox_command,
2816 _("Display SPU mailbox facility status.\n"),
2817 &infospucmdlist);
2818 add_cmd ("dma", class_info, info_spu_dma_command,
2819 _("Display MFC DMA status.\n"),
2820 &infospucmdlist);
2821 add_cmd ("proxydma", class_info, info_spu_proxydma_command,
2822 _("Display MFC Proxy-DMA status.\n"),
2823 &infospucmdlist);
2824 }