]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/spu-multiarch.c
Fix TID parser bug
[thirdparty/binutils-gdb.git] / gdb / spu-multiarch.c
1 /* Cell SPU GNU/Linux multi-architecture debugging support.
2 Copyright (C) 2009-2019 Free Software Foundation, Inc.
3
4 Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "gdbcore.h"
23 #include "gdbcmd.h"
24 #include "arch-utils.h"
25 #include "observable.h"
26 #include "inferior.h"
27 #include "regcache.h"
28 #include "symfile.h"
29 #include "objfiles.h"
30 #include "solib.h"
31 #include "solist.h"
32
33 #include "ppc-tdep.h"
34 #include "ppc-linux-tdep.h"
35 #include "spu-tdep.h"
36
37 /* The SPU multi-architecture support target. */
38
39 static const target_info spu_multiarch_target_info = {
40 "spu",
41 N_("SPU multi-architecture support."),
42 N_("SPU multi-architecture support.")
43 };
44
45 struct spu_multiarch_target final : public target_ops
46 {
47 const target_info &info () const override
48 { return spu_multiarch_target_info; }
49
50 strata stratum () const override { return arch_stratum; }
51
52 void mourn_inferior () override;
53
54 void fetch_registers (struct regcache *, int) override;
55 void store_registers (struct regcache *, int) override;
56
57 enum target_xfer_status xfer_partial (enum target_object object,
58 const char *annex,
59 gdb_byte *readbuf,
60 const gdb_byte *writebuf,
61 ULONGEST offset, ULONGEST len,
62 ULONGEST *xfered_len) override;
63
64 int search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
65 const gdb_byte *pattern, ULONGEST pattern_len,
66 CORE_ADDR *found_addrp) override;
67
68 int region_ok_for_hw_watchpoint (CORE_ADDR, int) override;
69
70 struct gdbarch *thread_architecture (ptid_t) override;
71 };
72
73 static spu_multiarch_target spu_ops;
74
75 /* Number of SPE objects loaded into the current inferior. */
76 static int spu_nr_solib;
77
78 /* Stand-alone SPE executable? */
79 #define spu_standalone_p() \
80 (symfile_objfile && symfile_objfile->obfd \
81 && bfd_get_arch (symfile_objfile->obfd) == bfd_arch_spu)
82
83 /* PPU side system calls. */
84 #define INSTR_SC 0x44000002
85 #define NR_spu_run 0x0116
86
87 /* If the PPU thread is currently stopped on a spu_run system call,
88 return to FD and ADDR the file handle and NPC parameter address
89 used with the system call. Return non-zero if successful. */
90 static int
91 parse_spufs_run (ptid_t ptid, int *fd, CORE_ADDR *addr)
92 {
93 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
94 struct gdbarch_tdep *tdep;
95 struct regcache *regcache;
96 gdb_byte buf[4];
97 ULONGEST regval;
98
99 /* If we're not on PPU, there's nothing to detect. */
100 if (gdbarch_bfd_arch_info (target_gdbarch ())->arch != bfd_arch_powerpc)
101 return 0;
102
103 /* If we're called too early (e.g. after fork), we cannot
104 access the inferior yet. */
105 if (find_inferior_ptid (ptid) == NULL)
106 return 0;
107
108 /* Get PPU-side registers. */
109 regcache = get_thread_arch_regcache (ptid, target_gdbarch ());
110 tdep = gdbarch_tdep (target_gdbarch ());
111
112 /* Fetch instruction preceding current NIP. */
113 {
114 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
115 inferior_ptid = ptid;
116 regval = target_read_memory (regcache_read_pc (regcache) - 4, buf, 4);
117 }
118 if (regval != 0)
119 return 0;
120 /* It should be a "sc" instruction. */
121 if (extract_unsigned_integer (buf, 4, byte_order) != INSTR_SC)
122 return 0;
123 /* System call number should be NR_spu_run. */
124 regcache_cooked_read_unsigned (regcache, tdep->ppc_gp0_regnum, &regval);
125 if (regval != NR_spu_run)
126 return 0;
127
128 /* Register 3 contains fd, register 4 the NPC param pointer. */
129 regcache_cooked_read_unsigned (regcache, PPC_ORIG_R3_REGNUM, &regval);
130 *fd = (int) regval;
131 regcache_cooked_read_unsigned (regcache, tdep->ppc_gp0_regnum + 4, &regval);
132 *addr = (CORE_ADDR) regval;
133 return 1;
134 }
135
136 /* Find gdbarch for SPU context SPUFS_FD. */
137 static struct gdbarch *
138 spu_gdbarch (int spufs_fd)
139 {
140 struct gdbarch_info info;
141 gdbarch_info_init (&info);
142 info.bfd_arch_info = bfd_lookup_arch (bfd_arch_spu, bfd_mach_spu);
143 info.byte_order = BFD_ENDIAN_BIG;
144 info.osabi = GDB_OSABI_LINUX;
145 info.id = &spufs_fd;
146 return gdbarch_find_by_info (info);
147 }
148
149 /* Override the to_thread_architecture routine. */
150 struct gdbarch *
151 spu_multiarch_target::thread_architecture (ptid_t ptid)
152 {
153 int spufs_fd;
154 CORE_ADDR spufs_addr;
155
156 if (parse_spufs_run (ptid, &spufs_fd, &spufs_addr))
157 return spu_gdbarch (spufs_fd);
158
159 return beneath ()->thread_architecture (ptid);
160 }
161
162 /* Override the to_region_ok_for_hw_watchpoint routine. */
163
164 int
165 spu_multiarch_target::region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
166 {
167 /* We cannot watch SPU local store. */
168 if (SPUADDR_SPU (addr) != -1)
169 return 0;
170
171 return beneath ()->region_ok_for_hw_watchpoint (addr, len);
172 }
173
174 /* Override the to_fetch_registers routine. */
175
176 void
177 spu_multiarch_target::fetch_registers (struct regcache *regcache, int regno)
178 {
179 struct gdbarch *gdbarch = regcache->arch ();
180 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
181 int spufs_fd;
182 CORE_ADDR spufs_addr;
183
184 /* Since we use functions that rely on inferior_ptid, we need to set and
185 restore it. */
186 scoped_restore save_ptid
187 = make_scoped_restore (&inferior_ptid, regcache->ptid ());
188
189 /* This version applies only if we're currently in spu_run. */
190 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
191 {
192 beneath ()->fetch_registers (regcache, regno);
193 return;
194 }
195
196 /* We must be stopped on a spu_run system call. */
197 if (!parse_spufs_run (inferior_ptid, &spufs_fd, &spufs_addr))
198 return;
199
200 /* The ID register holds the spufs file handle. */
201 if (regno == -1 || regno == SPU_ID_REGNUM)
202 {
203 gdb_byte buf[4];
204 store_unsigned_integer (buf, 4, byte_order, spufs_fd);
205 regcache->raw_supply (SPU_ID_REGNUM, buf);
206 }
207
208 /* The NPC register is found in PPC memory at SPUFS_ADDR. */
209 if (regno == -1 || regno == SPU_PC_REGNUM)
210 {
211 gdb_byte buf[4];
212
213 if (target_read (beneath (), TARGET_OBJECT_MEMORY, NULL,
214 buf, spufs_addr, sizeof buf) == sizeof buf)
215 regcache->raw_supply (SPU_PC_REGNUM, buf);
216 }
217
218 /* The GPRs are found in the "regs" spufs file. */
219 if (regno == -1 || (regno >= 0 && regno < SPU_NUM_GPRS))
220 {
221 gdb_byte buf[16 * SPU_NUM_GPRS];
222 char annex[32];
223 int i;
224
225 xsnprintf (annex, sizeof annex, "%d/regs", spufs_fd);
226 if (target_read (beneath (), TARGET_OBJECT_SPU, annex,
227 buf, 0, sizeof buf) == sizeof buf)
228 for (i = 0; i < SPU_NUM_GPRS; i++)
229 regcache->raw_supply (i, buf + i*16);
230 }
231 }
232
233 /* Override the to_store_registers routine. */
234
235 void
236 spu_multiarch_target::store_registers (struct regcache *regcache, int regno)
237 {
238 struct gdbarch *gdbarch = regcache->arch ();
239 int spufs_fd;
240 CORE_ADDR spufs_addr;
241
242 /* Since we use functions that rely on inferior_ptid, we need to set and
243 restore it. */
244 scoped_restore save_ptid
245 = make_scoped_restore (&inferior_ptid, regcache->ptid ());
246
247 /* This version applies only if we're currently in spu_run. */
248 if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
249 {
250 beneath ()->store_registers (regcache, regno);
251 return;
252 }
253
254 /* We must be stopped on a spu_run system call. */
255 if (!parse_spufs_run (inferior_ptid, &spufs_fd, &spufs_addr))
256 return;
257
258 /* The NPC register is found in PPC memory at SPUFS_ADDR. */
259 if (regno == -1 || regno == SPU_PC_REGNUM)
260 {
261 gdb_byte buf[4];
262 regcache->raw_collect (SPU_PC_REGNUM, buf);
263
264 target_write (beneath (), TARGET_OBJECT_MEMORY, NULL,
265 buf, spufs_addr, sizeof buf);
266 }
267
268 /* The GPRs are found in the "regs" spufs file. */
269 if (regno == -1 || (regno >= 0 && regno < SPU_NUM_GPRS))
270 {
271 gdb_byte buf[16 * SPU_NUM_GPRS];
272 char annex[32];
273 int i;
274
275 for (i = 0; i < SPU_NUM_GPRS; i++)
276 regcache->raw_collect (i, buf + i*16);
277
278 xsnprintf (annex, sizeof annex, "%d/regs", spufs_fd);
279 target_write (beneath (), TARGET_OBJECT_SPU, annex,
280 buf, 0, sizeof buf);
281 }
282 }
283
284 /* Override the to_xfer_partial routine. */
285
286 enum target_xfer_status
287 spu_multiarch_target::xfer_partial (enum target_object object,
288 const char *annex, gdb_byte *readbuf,
289 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
290 ULONGEST *xfered_len)
291 {
292 struct target_ops *ops_beneath = this->beneath ();
293
294 /* Use the "mem" spufs file to access SPU local store. */
295 if (object == TARGET_OBJECT_MEMORY)
296 {
297 int fd = SPUADDR_SPU (offset);
298 CORE_ADDR addr = SPUADDR_ADDR (offset);
299 char mem_annex[32], lslr_annex[32];
300 gdb_byte buf[32];
301 ULONGEST lslr;
302 enum target_xfer_status ret;
303
304 if (fd >= 0)
305 {
306 xsnprintf (mem_annex, sizeof mem_annex, "%d/mem", fd);
307 ret = ops_beneath->xfer_partial (TARGET_OBJECT_SPU,
308 mem_annex, readbuf, writebuf,
309 addr, len, xfered_len);
310 if (ret == TARGET_XFER_OK)
311 return ret;
312
313 /* SPU local store access wraps the address around at the
314 local store limit. We emulate this here. To avoid needing
315 an extra access to retrieve the LSLR, we only do that after
316 trying the original address first, and getting end-of-file. */
317 xsnprintf (lslr_annex, sizeof lslr_annex, "%d/lslr", fd);
318 memset (buf, 0, sizeof buf);
319 if (ops_beneath->xfer_partial (TARGET_OBJECT_SPU,
320 lslr_annex, buf, NULL,
321 0, sizeof buf, xfered_len)
322 != TARGET_XFER_OK)
323 return ret;
324
325 lslr = strtoulst ((char *) buf, NULL, 16);
326 return ops_beneath->xfer_partial (TARGET_OBJECT_SPU,
327 mem_annex, readbuf, writebuf,
328 addr & lslr, len, xfered_len);
329 }
330 }
331
332 return ops_beneath->xfer_partial (object, annex,
333 readbuf, writebuf, offset, len, xfered_len);
334 }
335
336 /* Override the to_search_memory routine. */
337 int
338 spu_multiarch_target::search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
339 const gdb_byte *pattern, ULONGEST pattern_len,
340 CORE_ADDR *found_addrp)
341 {
342 /* For SPU local store, always fall back to the simple method. */
343 if (SPUADDR_SPU (start_addr) >= 0)
344 return simple_search_memory (this, start_addr, search_space_len,
345 pattern, pattern_len, found_addrp);
346
347 return beneath ()->search_memory (start_addr, search_space_len,
348 pattern, pattern_len, found_addrp);
349 }
350
351
352 /* Push and pop the SPU multi-architecture support target. */
353
354 static void
355 spu_multiarch_activate (void)
356 {
357 /* If GDB was configured without SPU architecture support,
358 we cannot install SPU multi-architecture support either. */
359 if (spu_gdbarch (-1) == NULL)
360 return;
361
362 push_target (&spu_ops);
363
364 /* Make sure the thread architecture is re-evaluated. */
365 registers_changed ();
366 }
367
368 static void
369 spu_multiarch_deactivate (void)
370 {
371 unpush_target (&spu_ops);
372
373 /* Make sure the thread architecture is re-evaluated. */
374 registers_changed ();
375 }
376
377 static void
378 spu_multiarch_inferior_created (struct target_ops *ops, int from_tty)
379 {
380 if (spu_standalone_p ())
381 spu_multiarch_activate ();
382 }
383
384 static void
385 spu_multiarch_solib_loaded (struct so_list *so)
386 {
387 if (!spu_standalone_p ())
388 if (so->abfd && bfd_get_arch (so->abfd) == bfd_arch_spu)
389 if (spu_nr_solib++ == 0)
390 spu_multiarch_activate ();
391 }
392
393 static void
394 spu_multiarch_solib_unloaded (struct so_list *so)
395 {
396 if (!spu_standalone_p ())
397 if (so->abfd && bfd_get_arch (so->abfd) == bfd_arch_spu)
398 if (--spu_nr_solib == 0)
399 spu_multiarch_deactivate ();
400 }
401
402 void
403 spu_multiarch_target::mourn_inferior ()
404 {
405 beneath ()->mourn_inferior ();
406 spu_multiarch_deactivate ();
407 }
408
409 void
410 _initialize_spu_multiarch (void)
411 {
412 /* Install observers to watch for SPU objects. */
413 gdb::observers::inferior_created.attach (spu_multiarch_inferior_created);
414 gdb::observers::solib_loaded.attach (spu_multiarch_solib_loaded);
415 gdb::observers::solib_unloaded.attach (spu_multiarch_solib_unloaded);
416 }
417