]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/spu-multiarch.c
Remove some unused declarations
[thirdparty/binutils-gdb.git] / gdb / spu-multiarch.c
1 /* Cell SPU GNU/Linux multi-architecture debugging support.
2 Copyright (C) 2009-2017 Free Software Foundation, Inc.
3
4 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "gdbcore.h"
23 #include "gdbcmd.h"
24 #include "arch-utils.h"
25 #include "observer.h"
26 #include "inferior.h"
27 #include "regcache.h"
28 #include "symfile.h"
29 #include "objfiles.h"
30 #include "solib.h"
31 #include "solist.h"
32
33 #include "ppc-tdep.h"
34 #include "ppc-linux-tdep.h"
35 #include "spu-tdep.h"
36
37 /* This module's target vector. */
38 static struct target_ops spu_ops;
39
40 /* Number of SPE objects loaded into the current inferior. */
41 static int spu_nr_solib;
42
43 /* Stand-alone SPE executable? */
44 #define spu_standalone_p() \
45 (symfile_objfile && symfile_objfile->obfd \
46 && bfd_get_arch (symfile_objfile->obfd) == bfd_arch_spu)
47
48 /* PPU side system calls. */
49 #define INSTR_SC 0x44000002
50 #define NR_spu_run 0x0116
51
52 /* If the PPU thread is currently stopped on a spu_run system call,
53 return to FD and ADDR the file handle and NPC parameter address
54 used with the system call. Return non-zero if successful. */
55 static int
56 parse_spufs_run (ptid_t ptid, int *fd, CORE_ADDR *addr)
57 {
58 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
59 struct gdbarch_tdep *tdep;
60 struct regcache *regcache;
61 gdb_byte buf[4];
62 ULONGEST regval;
63
64 /* If we're not on PPU, there's nothing to detect. */
65 if (gdbarch_bfd_arch_info (target_gdbarch ())->arch != bfd_arch_powerpc)
66 return 0;
67
68 /* If we're called too early (e.g. after fork), we cannot
69 access the inferior yet. */
70 if (find_inferior_ptid (ptid) == NULL)
71 return 0;
72
73 /* Get PPU-side registers. */
74 regcache = get_thread_arch_regcache (ptid, target_gdbarch ());
75 tdep = gdbarch_tdep (target_gdbarch ());
76
77 /* Fetch instruction preceding current NIP. */
78 {
79 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
80 inferior_ptid = ptid;
81 regval = target_read_memory (regcache_read_pc (regcache) - 4, buf, 4);
82 }
83 if (regval != 0)
84 return 0;
85 /* It should be a "sc" instruction. */
86 if (extract_unsigned_integer (buf, 4, byte_order) != INSTR_SC)
87 return 0;
88 /* System call number should be NR_spu_run. */
89 regcache_cooked_read_unsigned (regcache, tdep->ppc_gp0_regnum, &regval);
90 if (regval != NR_spu_run)
91 return 0;
92
93 /* Register 3 contains fd, register 4 the NPC param pointer. */
94 regcache_cooked_read_unsigned (regcache, PPC_ORIG_R3_REGNUM, &regval);
95 *fd = (int) regval;
96 regcache_cooked_read_unsigned (regcache, tdep->ppc_gp0_regnum + 4, &regval);
97 *addr = (CORE_ADDR) regval;
98 return 1;
99 }
100
101 /* Find gdbarch for SPU context SPUFS_FD. */
102 static struct gdbarch *
103 spu_gdbarch (int spufs_fd)
104 {
105 struct gdbarch_info info;
106 gdbarch_info_init (&info);
107 info.bfd_arch_info = bfd_lookup_arch (bfd_arch_spu, bfd_mach_spu);
108 info.byte_order = BFD_ENDIAN_BIG;
109 info.osabi = GDB_OSABI_LINUX;
110 info.id = &spufs_fd;
111 return gdbarch_find_by_info (info);
112 }
113
114 /* Override the to_thread_architecture routine. */
115 static struct gdbarch *
116 spu_thread_architecture (struct target_ops *ops, ptid_t ptid)
117 {
118 int spufs_fd;
119 CORE_ADDR spufs_addr;
120
121 if (parse_spufs_run (ptid, &spufs_fd, &spufs_addr))
122 return spu_gdbarch (spufs_fd);
123
124 return target_gdbarch ();
125 }
126
127 /* Override the to_region_ok_for_hw_watchpoint routine. */
128 static int
129 spu_region_ok_for_hw_watchpoint (struct target_ops *self,
130 CORE_ADDR addr, int len)
131 {
132 struct target_ops *ops_beneath = find_target_beneath (self);
133
134 /* We cannot watch SPU local store. */
135 if (SPUADDR_SPU (addr) != -1)
136 return 0;
137
138 return ops_beneath->to_region_ok_for_hw_watchpoint (ops_beneath, addr, len);
139 }
140
141 /* Override the to_fetch_registers routine. */
142 static void
143 spu_fetch_registers (struct target_ops *ops,
144 struct regcache *regcache, int regno)
145 {
146 struct gdbarch *gdbarch = get_regcache_arch (regcache);
147 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
148 struct target_ops *ops_beneath = find_target_beneath (ops);
149 int spufs_fd;
150 CORE_ADDR spufs_addr;
151
152 /* Since we use functions that rely on inferior_ptid, we need to set and
153 restore it. */
154 scoped_restore save_ptid
155 = make_scoped_restore (&inferior_ptid, regcache_get_ptid (regcache));
156
157 /* This version applies only if we're currently in spu_run. */
158 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
159 {
160 ops_beneath->to_fetch_registers (ops_beneath, regcache, regno);
161 return;
162 }
163
164 /* We must be stopped on a spu_run system call. */
165 if (!parse_spufs_run (inferior_ptid, &spufs_fd, &spufs_addr))
166 return;
167
168 /* The ID register holds the spufs file handle. */
169 if (regno == -1 || regno == SPU_ID_REGNUM)
170 {
171 gdb_byte buf[4];
172 store_unsigned_integer (buf, 4, byte_order, spufs_fd);
173 regcache_raw_supply (regcache, SPU_ID_REGNUM, buf);
174 }
175
176 /* The NPC register is found in PPC memory at SPUFS_ADDR. */
177 if (regno == -1 || regno == SPU_PC_REGNUM)
178 {
179 gdb_byte buf[4];
180
181 if (target_read (ops_beneath, TARGET_OBJECT_MEMORY, NULL,
182 buf, spufs_addr, sizeof buf) == sizeof buf)
183 regcache_raw_supply (regcache, SPU_PC_REGNUM, buf);
184 }
185
186 /* The GPRs are found in the "regs" spufs file. */
187 if (regno == -1 || (regno >= 0 && regno < SPU_NUM_GPRS))
188 {
189 gdb_byte buf[16 * SPU_NUM_GPRS];
190 char annex[32];
191 int i;
192
193 xsnprintf (annex, sizeof annex, "%d/regs", spufs_fd);
194 if (target_read (ops_beneath, TARGET_OBJECT_SPU, annex,
195 buf, 0, sizeof buf) == sizeof buf)
196 for (i = 0; i < SPU_NUM_GPRS; i++)
197 regcache_raw_supply (regcache, i, buf + i*16);
198 }
199 }
200
201 /* Override the to_store_registers routine. */
202 static void
203 spu_store_registers (struct target_ops *ops,
204 struct regcache *regcache, int regno)
205 {
206 struct gdbarch *gdbarch = get_regcache_arch (regcache);
207 struct target_ops *ops_beneath = find_target_beneath (ops);
208 int spufs_fd;
209 CORE_ADDR spufs_addr;
210
211 /* Since we use functions that rely on inferior_ptid, we need to set and
212 restore it. */
213 scoped_restore save_ptid
214 = make_scoped_restore (&inferior_ptid, regcache_get_ptid (regcache));
215
216 /* This version applies only if we're currently in spu_run. */
217 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
218 {
219 ops_beneath->to_store_registers (ops_beneath, regcache, regno);
220 return;
221 }
222
223 /* We must be stopped on a spu_run system call. */
224 if (!parse_spufs_run (inferior_ptid, &spufs_fd, &spufs_addr))
225 return;
226
227 /* The NPC register is found in PPC memory at SPUFS_ADDR. */
228 if (regno == -1 || regno == SPU_PC_REGNUM)
229 {
230 gdb_byte buf[4];
231 regcache_raw_collect (regcache, SPU_PC_REGNUM, buf);
232
233 target_write (ops_beneath, TARGET_OBJECT_MEMORY, NULL,
234 buf, spufs_addr, sizeof buf);
235 }
236
237 /* The GPRs are found in the "regs" spufs file. */
238 if (regno == -1 || (regno >= 0 && regno < SPU_NUM_GPRS))
239 {
240 gdb_byte buf[16 * SPU_NUM_GPRS];
241 char annex[32];
242 int i;
243
244 for (i = 0; i < SPU_NUM_GPRS; i++)
245 regcache_raw_collect (regcache, i, buf + i*16);
246
247 xsnprintf (annex, sizeof annex, "%d/regs", spufs_fd);
248 target_write (ops_beneath, TARGET_OBJECT_SPU, annex,
249 buf, 0, sizeof buf);
250 }
251 }
252
253 /* Override the to_xfer_partial routine. */
254 static enum target_xfer_status
255 spu_xfer_partial (struct target_ops *ops, enum target_object object,
256 const char *annex, gdb_byte *readbuf,
257 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
258 ULONGEST *xfered_len)
259 {
260 struct target_ops *ops_beneath = find_target_beneath (ops);
261
262 /* Use the "mem" spufs file to access SPU local store. */
263 if (object == TARGET_OBJECT_MEMORY)
264 {
265 int fd = SPUADDR_SPU (offset);
266 CORE_ADDR addr = SPUADDR_ADDR (offset);
267 char mem_annex[32], lslr_annex[32];
268 gdb_byte buf[32];
269 ULONGEST lslr;
270 enum target_xfer_status ret;
271
272 if (fd >= 0)
273 {
274 xsnprintf (mem_annex, sizeof mem_annex, "%d/mem", fd);
275 ret = ops_beneath->to_xfer_partial (ops_beneath, TARGET_OBJECT_SPU,
276 mem_annex, readbuf, writebuf,
277 addr, len, xfered_len);
278 if (ret == TARGET_XFER_OK)
279 return ret;
280
281 /* SPU local store access wraps the address around at the
282 local store limit. We emulate this here. To avoid needing
283 an extra access to retrieve the LSLR, we only do that after
284 trying the original address first, and getting end-of-file. */
285 xsnprintf (lslr_annex, sizeof lslr_annex, "%d/lslr", fd);
286 memset (buf, 0, sizeof buf);
287 if (ops_beneath->to_xfer_partial (ops_beneath, TARGET_OBJECT_SPU,
288 lslr_annex, buf, NULL,
289 0, sizeof buf, xfered_len)
290 != TARGET_XFER_OK)
291 return ret;
292
293 lslr = strtoulst ((char *) buf, NULL, 16);
294 return ops_beneath->to_xfer_partial (ops_beneath, TARGET_OBJECT_SPU,
295 mem_annex, readbuf, writebuf,
296 addr & lslr, len, xfered_len);
297 }
298 }
299
300 return ops_beneath->to_xfer_partial (ops_beneath, object, annex,
301 readbuf, writebuf, offset, len, xfered_len);
302 }
303
304 /* Override the to_search_memory routine. */
305 static int
306 spu_search_memory (struct target_ops* ops,
307 CORE_ADDR start_addr, ULONGEST search_space_len,
308 const gdb_byte *pattern, ULONGEST pattern_len,
309 CORE_ADDR *found_addrp)
310 {
311 struct target_ops *ops_beneath = find_target_beneath (ops);
312
313 /* For SPU local store, always fall back to the simple method. */
314 if (SPUADDR_SPU (start_addr) >= 0)
315 return simple_search_memory (ops,
316 start_addr, search_space_len,
317 pattern, pattern_len, found_addrp);
318
319 return ops_beneath->to_search_memory (ops_beneath,
320 start_addr, search_space_len,
321 pattern, pattern_len, found_addrp);
322 }
323
324
325 /* Push and pop the SPU multi-architecture support target. */
326
327 static void
328 spu_multiarch_activate (void)
329 {
330 /* If GDB was configured without SPU architecture support,
331 we cannot install SPU multi-architecture support either. */
332 if (spu_gdbarch (-1) == NULL)
333 return;
334
335 push_target (&spu_ops);
336
337 /* Make sure the thread architecture is re-evaluated. */
338 registers_changed ();
339 }
340
341 static void
342 spu_multiarch_deactivate (void)
343 {
344 unpush_target (&spu_ops);
345
346 /* Make sure the thread architecture is re-evaluated. */
347 registers_changed ();
348 }
349
350 static void
351 spu_multiarch_inferior_created (struct target_ops *ops, int from_tty)
352 {
353 if (spu_standalone_p ())
354 spu_multiarch_activate ();
355 }
356
357 static void
358 spu_multiarch_solib_loaded (struct so_list *so)
359 {
360 if (!spu_standalone_p ())
361 if (so->abfd && bfd_get_arch (so->abfd) == bfd_arch_spu)
362 if (spu_nr_solib++ == 0)
363 spu_multiarch_activate ();
364 }
365
366 static void
367 spu_multiarch_solib_unloaded (struct so_list *so)
368 {
369 if (!spu_standalone_p ())
370 if (so->abfd && bfd_get_arch (so->abfd) == bfd_arch_spu)
371 if (--spu_nr_solib == 0)
372 spu_multiarch_deactivate ();
373 }
374
375 static void
376 spu_mourn_inferior (struct target_ops *ops)
377 {
378 struct target_ops *ops_beneath = find_target_beneath (ops);
379
380 ops_beneath->to_mourn_inferior (ops_beneath);
381 spu_multiarch_deactivate ();
382 }
383
384
385 /* Initialize the SPU multi-architecture support target. */
386
387 static void
388 init_spu_ops (void)
389 {
390 spu_ops.to_shortname = "spu";
391 spu_ops.to_longname = "SPU multi-architecture support.";
392 spu_ops.to_doc = "SPU multi-architecture support.";
393 spu_ops.to_mourn_inferior = spu_mourn_inferior;
394 spu_ops.to_fetch_registers = spu_fetch_registers;
395 spu_ops.to_store_registers = spu_store_registers;
396 spu_ops.to_xfer_partial = spu_xfer_partial;
397 spu_ops.to_search_memory = spu_search_memory;
398 spu_ops.to_region_ok_for_hw_watchpoint = spu_region_ok_for_hw_watchpoint;
399 spu_ops.to_thread_architecture = spu_thread_architecture;
400 spu_ops.to_stratum = arch_stratum;
401 spu_ops.to_magic = OPS_MAGIC;
402 }
403
404 void
405 _initialize_spu_multiarch (void)
406 {
407 /* Install ourselves on the target stack. */
408 init_spu_ops ();
409 complete_target_initialization (&spu_ops);
410
411 /* Install observers to watch for SPU objects. */
412 observer_attach_inferior_created (spu_multiarch_inferior_created);
413 observer_attach_solib_loaded (spu_multiarch_solib_loaded);
414 observer_attach_solib_unloaded (spu_multiarch_solib_unloaded);
415 }
416