]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/ia64-hpux-nat.c
Change to_xfer_partial 'len' type to ULONGEST.
[thirdparty/binutils-gdb.git] / gdb / ia64-hpux-nat.c
CommitLineData
ecd75fc8 1/* Copyright (C) 2010-2014 Free Software Foundation, Inc.
92c9a463
JB
2
3 This file is part of GDB.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 3 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program. If not, see <http://www.gnu.org/licenses/>. */
17
18#include "defs.h"
19#include "ia64-tdep.h"
20#include "inferior.h"
21#include "inf-ttrace.h"
22#include "regcache.h"
23#include "solib-ia64-hpux.h"
24
25#include <ia64/sys/uregs.h>
26#include <sys/ttrace.h>
27
28/* The offsets used with ttrace to read the value of the raw registers. */
29
30static int u_offsets[] =
31{ /* Static General Registers. */
32 -1, __r1, __r2, __r3, __r4, __r5, __r6, __r7,
33 __r8, __r9, __r10, __r11, __r12, __r13, __r14, __r15,
34 __r16, __r17, __r18, __r19, __r20, __r21, __r22, __r23,
35 __r24, __r25, __r26, __r27, __r28, __r29, __r30, __r31,
36 -1, -1, -1, -1, -1, -1, -1, -1,
37 -1, -1, -1, -1, -1, -1, -1, -1,
38 -1, -1, -1, -1, -1, -1, -1, -1,
39 -1, -1, -1, -1, -1, -1, -1, -1,
40 -1, -1, -1, -1, -1, -1, -1, -1,
41 -1, -1, -1, -1, -1, -1, -1, -1,
42 -1, -1, -1, -1, -1, -1, -1, -1,
43 -1, -1, -1, -1, -1, -1, -1, -1,
44 -1, -1, -1, -1, -1, -1, -1, -1,
45 -1, -1, -1, -1, -1, -1, -1, -1,
46 -1, -1, -1, -1, -1, -1, -1, -1,
47 -1, -1, -1, -1, -1, -1, -1, -1,
48
49 /* Static Floating-Point Registers. */
50 -1, -1, __f2, __f3, __f4, __f5, __f6, __f7,
51 __f8, __f9, __f10, __f11, __f12, __f13, __f14, __f15,
52 __f16, __f17, __f18, __f19, __f20, __f21, __f22, __f23,
53 __f24, __f25, __f26, __f27, __f28, __f29, __f30, __f31,
54 __f32, __f33, __f34, __f35, __f36, __f37, __f38, __f39,
55 __f40, __f41, __f42, __f43, __f44, __f45, __f46, __f47,
56 __f48, __f49, __f50, __f51, __f52, __f53, __f54, __f55,
57 __f56, __f57, __f58, __f59, __f60, __f61, __f62, __f63,
58 __f64, __f65, __f66, __f67, __f68, __f69, __f70, __f71,
59 __f72, __f73, __f74, __f75, __f76, __f77, __f78, __f79,
60 __f80, __f81, __f82, __f83, __f84, __f85, __f86, __f87,
61 __f88, __f89, __f90, __f91, __f92, __f93, __f94, __f95,
62 __f96, __f97, __f98, __f99, __f100, __f101, __f102, __f103,
63 __f104, __f105, __f106, __f107, __f108, __f109, __f110, __f111,
64 __f112, __f113, __f114, __f115, __f116, __f117, __f118, __f119,
65 __f120, __f121, __f122, __f123, __f124, __f125, __f126, __f127,
66
67 -1, -1, -1, -1, -1, -1, -1, -1,
68 -1, -1, -1, -1, -1, -1, -1, -1,
69 -1, -1, -1, -1, -1, -1, -1, -1,
70 -1, -1, -1, -1, -1, -1, -1, -1,
71 -1, -1, -1, -1, -1, -1, -1, -1,
72 -1, -1, -1, -1, -1, -1, -1, -1,
73 -1, -1, -1, -1, -1, -1, -1, -1,
74 -1, -1, -1, -1, -1, -1, -1, -1,
75
76 /* Branch Registers. */
77 __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7,
78
79 /* Virtual frame pointer and virtual return address pointer. */
80 -1, -1,
81
82 /* Other registers. */
83 __pr, __ip, __cr_ipsr, __cfm,
84
85 /* Kernel registers. */
86 -1, -1, -1, -1,
87 -1, -1, -1, -1,
88
89 -1, -1, -1, -1, -1, -1, -1, -1,
90
91 /* Some application registers. */
92 __ar_rsc, __ar_bsp, __ar_bspstore, __ar_rnat,
93
94 -1,
95 -1, /* Not available: FCR, IA32 floating control register. */
96 -1, -1,
97
98 -1, /* Not available: EFLAG. */
99 -1, /* Not available: CSD. */
100 -1, /* Not available: SSD. */
101 -1, /* Not available: CFLG. */
102 -1, /* Not available: FSR. */
103 -1, /* Not available: FIR. */
104 -1, /* Not available: FDR. */
105 -1,
106 __ar_ccv, -1, -1, -1, __ar_unat, -1, -1, -1,
107 __ar_fpsr, -1, -1, -1,
108 -1, /* Not available: ITC. */
109 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
110 -1, -1, -1, -1, -1, -1, -1, -1, -1,
111 __ar_pfs, __ar_lc, __ar_ec,
112 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
113 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
114 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
115 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
116 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
117 -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
118 -1
119 /* All following registers, starting with nat0, are handled as
120 pseudo registers, and hence are handled separately. */
121};
122
123/* Some register have a fixed value and can not be modified.
124 Store their value in static constant buffers that can be used
125 later to fill the register cache. */
126static const char r0_value[8] = {0x00, 0x00, 0x00, 0x00,
127 0x00, 0x00, 0x00, 0x00};
128static const char f0_value[16] = {0x00, 0x00, 0x00, 0x00,
129 0x00, 0x00, 0x00, 0x00,
130 0x00, 0x00, 0x00, 0x00,
131 0x00, 0x00, 0x00, 0x00};
132static const char f1_value[16] = {0x00, 0x00, 0x00, 0x00,
133 0x00, 0x00, 0xff, 0xff,
134 0x80, 0x00, 0x00, 0x00,
135 0x00, 0x00, 0x00, 0x00};
136
137/* The "to_wait" routine from the "inf-ttrace" layer. */
138
139static ptid_t (*super_to_wait) (struct target_ops *, ptid_t,
140 struct target_waitstatus *, int);
141
142/* The "to_wait" target_ops routine routine for ia64-hpux. */
143
144static ptid_t
145ia64_hpux_wait (struct target_ops *ops, ptid_t ptid,
146 struct target_waitstatus *ourstatus, int options)
147{
148 ptid_t new_ptid;
149
150 new_ptid = super_to_wait (ops, ptid, ourstatus, options);
151
152 /* If this is a DLD event (hard-coded breakpoint instruction
153 that was activated by the solib-ia64-hpux module), we need to
154 process it, and then resume the execution as if the event did
155 not happen. */
156 if (ourstatus->kind == TARGET_WAITKIND_STOPPED
a493e3e2 157 && ourstatus->value.sig == GDB_SIGNAL_TRAP
92c9a463
JB
158 && ia64_hpux_at_dld_breakpoint_p (new_ptid))
159 {
160 ia64_hpux_handle_dld_breakpoint (new_ptid);
161
a493e3e2 162 target_resume (new_ptid, 0, GDB_SIGNAL_0);
92c9a463
JB
163 ourstatus->kind = TARGET_WAITKIND_IGNORE;
164 }
165
166 return new_ptid;
167}
168
169/* Fetch the RNAT register and supply it to the REGCACHE. */
170
171static void
172ia64_hpux_fetch_rnat_register (struct regcache *regcache)
173{
174 CORE_ADDR addr;
175 gdb_byte buf[8];
176 int status;
177
178 /* The value of RNAT is stored at bsp|0x1f8, and must be read using
179 TT_LWP_RDRSEBS. */
180
181 regcache_raw_read_unsigned (regcache, IA64_BSP_REGNUM, &addr);
182 addr |= 0x1f8;
183
184 status = ttrace (TT_LWP_RDRSEBS, ptid_get_pid (inferior_ptid),
185 ptid_get_lwp (inferior_ptid), addr, sizeof (buf),
186 (uintptr_t) buf);
187 if (status < 0)
188 error (_("failed to read RNAT register at %s"),
189 paddress (get_regcache_arch(regcache), addr));
190
191 regcache_raw_supply (regcache, IA64_RNAT_REGNUM, buf);
192}
193
194/* Read the value of the register saved at OFFSET in the save_state_t
195 structure, and store its value in BUF. LEN is the size of the register
196 to be read. */
197
198static int
199ia64_hpux_read_register_from_save_state_t (int offset, gdb_byte *buf, int len)
200{
201 int status;
202
203 status = ttrace (TT_LWP_RUREGS, ptid_get_pid (inferior_ptid),
204 ptid_get_lwp (inferior_ptid), offset, len, (uintptr_t) buf);
205
206 return status;
207}
208
209/* Fetch register REGNUM from the inferior. */
210
211static void
212ia64_hpux_fetch_register (struct regcache *regcache, int regnum)
213{
214 struct gdbarch *gdbarch = get_regcache_arch (regcache);
215 int offset, len, status;
216 gdb_byte *buf;
217
218 if (regnum == IA64_GR0_REGNUM)
219 {
220 /* r0 is always 0. */
221 regcache_raw_supply (regcache, regnum, r0_value);
222 return;
223 }
224
225 if (regnum == IA64_FR0_REGNUM)
226 {
227 /* f0 is always 0.0. */
228 regcache_raw_supply (regcache, regnum, f0_value);
229 return;
230 }
231
232 if (regnum == IA64_FR1_REGNUM)
233 {
234 /* f1 is always 1.0. */
235 regcache_raw_supply (regcache, regnum, f1_value);
236 return;
237 }
238
239 if (regnum == IA64_RNAT_REGNUM)
240 {
241 ia64_hpux_fetch_rnat_register (regcache);
242 return;
243 }
244
245 /* Get the register location. If the register can not be fetched,
246 then return now. */
247 offset = u_offsets[regnum];
248 if (offset == -1)
249 return;
250
251 len = register_size (gdbarch, regnum);
252 buf = alloca (len * sizeof (gdb_byte));
253 status = ia64_hpux_read_register_from_save_state_t (offset, buf, len);
254 if (status < 0)
a9df6b22 255 warning (_("Failed to read register value for %s."),
92c9a463
JB
256 gdbarch_register_name (gdbarch, regnum));
257
258 regcache_raw_supply (regcache, regnum, buf);
259}
260
261/* The "to_fetch_registers" target_ops routine for ia64-hpux. */
262
263static void
264ia64_hpux_fetch_registers (struct target_ops *ops,
265 struct regcache *regcache, int regnum)
266{
267 if (regnum == -1)
268 for (regnum = 0;
269 regnum < gdbarch_num_regs (get_regcache_arch (regcache));
270 regnum++)
271 ia64_hpux_fetch_register (regcache, regnum);
272 else
273 ia64_hpux_fetch_register (regcache, regnum);
274}
275
276/* Save register REGNUM (stored in BUF) in the save_state_t structure.
277 LEN is the size of the register in bytes.
278
279 Return the value from the corresponding ttrace call (a negative value
280 means that the operation failed). */
281
282static int
283ia64_hpux_write_register_to_saved_state_t (int offset, gdb_byte *buf, int len)
284{
285 return ttrace (TT_LWP_WUREGS, ptid_get_pid (inferior_ptid),
286 ptid_get_lwp (inferior_ptid), offset, len, (uintptr_t) buf);
287}
288
289/* Store register REGNUM into the inferior. */
290
291static void
292ia64_hpux_store_register (const struct regcache *regcache, int regnum)
293{
294 struct gdbarch *gdbarch = get_regcache_arch (regcache);
295 int offset = u_offsets[regnum];
296 gdb_byte *buf;
297 int len, status;
298
299 /* If the register can not be stored, then return now. */
300 if (offset == -1)
301 return;
302
303 /* I don't know how to store that register for now. So just ignore any
304 request to store it, to avoid an internal error. */
305 if (regnum == IA64_PSR_REGNUM)
306 return;
307
308 len = register_size (gdbarch, regnum);
309 buf = alloca (len * sizeof (gdb_byte));
310 regcache_raw_collect (regcache, regnum, buf);
311
312 status = ia64_hpux_write_register_to_saved_state_t (offset, buf, len);
313
314 if (status < 0)
a9df6b22 315 error (_("failed to write register value for %s."),
92c9a463
JB
316 gdbarch_register_name (gdbarch, regnum));
317}
318
319/* The "to_store_registers" target_ops routine for ia64-hpux. */
320
321static void
322ia64_hpux_store_registers (struct target_ops *ops,
323 struct regcache *regcache, int regnum)
324{
325 if (regnum == -1)
326 for (regnum = 0;
327 regnum < gdbarch_num_regs (get_regcache_arch (regcache));
328 regnum++)
329 ia64_hpux_store_register (regcache, regnum);
330 else
331 ia64_hpux_store_register (regcache, regnum);
332}
333
334/* The "xfer_partial" routine from the "inf-ttrace" target layer.
335 Ideally, we would like to use this routine for all transfer
336 requests, but this platforms has a lot of special cases that
337 need to be handled manually. So we override this routine and
338 delegate back if we detect that we are not in a special case. */
339
4ac248ca 340static target_xfer_partial_ftype *super_xfer_partial;
92c9a463
JB
341
342/* The "xfer_partial" routine for a memory region that is completely
343 outside of the backing-store region. */
344
345static LONGEST
346ia64_hpux_xfer_memory_no_bs (struct target_ops *ops, const char *annex,
347 gdb_byte *readbuf, const gdb_byte *writebuf,
348 CORE_ADDR addr, LONGEST len)
349{
350 /* Memory writes need to be aligned on 16byte boundaries, at least
351 when writing in the text section. On the other hand, the size
352 of the buffer does not need to be a multiple of 16bytes.
353
354 No such restriction when performing memory reads. */
355
356 if (writebuf && addr & 0x0f)
357 {
358 const CORE_ADDR aligned_addr = addr & ~0x0f;
359 const int aligned_len = len + (addr - aligned_addr);
360 gdb_byte *aligned_buf = alloca (aligned_len * sizeof (gdb_byte));
361 LONGEST status;
362
363 /* Read the portion of memory between ALIGNED_ADDR and ADDR, so
364 that we can write it back during our aligned memory write. */
365 status = super_xfer_partial (ops, TARGET_OBJECT_MEMORY, annex,
366 aligned_buf /* read */,
367 NULL /* write */,
368 aligned_addr, addr - aligned_addr);
369 if (status <= 0)
370 return 0;
371 memcpy (aligned_buf + (addr - aligned_addr), writebuf, len);
372
373 return super_xfer_partial (ops, TARGET_OBJECT_MEMORY, annex,
374 NULL /* read */, aligned_buf /* write */,
375 aligned_addr, aligned_len);
376 }
377 else
378 /* Memory read or properly aligned memory write. */
379 return super_xfer_partial (ops, TARGET_OBJECT_MEMORY, annex, readbuf,
380 writebuf, addr, len);
381}
382
383/* Read LEN bytes at ADDR from memory, and store it in BUF. This memory
384 region is assumed to be inside the backing store.
385
386 Return zero if the operation failed. */
387
388static int
389ia64_hpux_read_memory_bs (gdb_byte *buf, CORE_ADDR addr, int len)
390{
391 gdb_byte tmp_buf[8];
392 CORE_ADDR tmp_addr = addr & ~0x7;
393
394 while (tmp_addr < addr + len)
395 {
396 int status;
397 int skip_lo = 0;
398 int skip_hi = 0;
399
400 status = ttrace (TT_LWP_RDRSEBS, ptid_get_pid (inferior_ptid),
401 ptid_get_lwp (inferior_ptid), tmp_addr,
402 sizeof (tmp_buf), (uintptr_t) tmp_buf);
403 if (status < 0)
404 return 0;
405
406 if (tmp_addr < addr)
407 skip_lo = addr - tmp_addr;
408
409 if (tmp_addr + sizeof (tmp_buf) > addr + len)
410 skip_hi = (tmp_addr + sizeof (tmp_buf)) - (addr + len);
411
412 memcpy (buf + (tmp_addr + skip_lo - addr),
413 tmp_buf + skip_lo,
414 sizeof (tmp_buf) - skip_lo - skip_hi);
415
416 tmp_addr += sizeof (tmp_buf);
417 }
418
419 return 1;
420}
421
422/* Write LEN bytes from BUF in memory at ADDR. This memory region is assumed
423 to be inside the backing store.
424
425 Return zero if the operation failed. */
426
427static int
428ia64_hpux_write_memory_bs (const gdb_byte *buf, CORE_ADDR addr, int len)
429{
430 gdb_byte tmp_buf[8];
431 CORE_ADDR tmp_addr = addr & ~0x7;
432
433 while (tmp_addr < addr + len)
434 {
435 int status;
436 int lo = 0;
437 int hi = 7;
438
439 if (tmp_addr < addr || tmp_addr + sizeof (tmp_buf) > addr + len)
440 /* Part of the 8byte region pointed by tmp_addr needs to be preserved.
441 So read it in before we copy the data that needs to be changed. */
442 if (!ia64_hpux_read_memory_bs (tmp_buf, tmp_addr, sizeof (tmp_buf)))
443 return 0;
444
445 if (tmp_addr < addr)
446 lo = addr - tmp_addr;
447
448 if (tmp_addr + sizeof (tmp_buf) > addr + len)
449 hi = addr - tmp_addr + len - 1;
450
451 memcpy (tmp_buf + lo, buf + tmp_addr - addr + lo, hi - lo + 1);
452
453 status = ttrace (TT_LWP_WRRSEBS, ptid_get_pid (inferior_ptid),
454 ptid_get_lwp (inferior_ptid), tmp_addr,
455 sizeof (tmp_buf), (uintptr_t) tmp_buf);
456 if (status < 0)
457 return 0;
458
459 tmp_addr += sizeof (tmp_buf);
460 }
461
462 return 1;
463}
464
465/* The "xfer_partial" routine for a memory region that is completely
466 inside of the backing-store region. */
467
468static LONGEST
469ia64_hpux_xfer_memory_bs (struct target_ops *ops, const char *annex,
470 gdb_byte *readbuf, const gdb_byte *writebuf,
471 CORE_ADDR addr, LONGEST len)
472{
473 int success;
474
475 if (readbuf)
476 success = ia64_hpux_read_memory_bs (readbuf, addr, len);
477 else
478 success = ia64_hpux_write_memory_bs (writebuf, addr, len);
479
480 if (success)
481 return len;
482 else
483 return 0;
484}
485
973e3cf7
JB
486/* Get a register value as a unsigned value directly from the system,
487 instead of going through the regcache.
488
489 This function is meant to be used when inferior_ptid is not
490 a thread/process known to GDB. */
491
492static ULONGEST
493ia64_hpux_get_register_from_save_state_t (int regnum, int reg_size)
494{
495 gdb_byte *buf = alloca (reg_size);
496 int offset = u_offsets[regnum];
497 int status;
498
499 /* The register is assumed to be available for fetching. */
500 gdb_assert (offset != -1);
501
502 status = ia64_hpux_read_register_from_save_state_t (offset, buf, reg_size);
503 if (status < 0)
504 {
505 /* This really should not happen. If it does, emit a warning
506 and pretend the register value is zero. Not exactly the best
507 error recovery mechanism, but better than nothing. We will
508 try to do better if we can demonstrate that this can happen
509 under normal circumstances. */
510 warning (_("Failed to read value of register number %d."), regnum);
511 return 0;
512 }
513
514 return extract_unsigned_integer (buf, reg_size, BFD_ENDIAN_BIG);
515}
516
92c9a463
JB
517/* The "xfer_partial" target_ops routine for ia64-hpux, in the case
518 where the requested object is TARGET_OBJECT_MEMORY. */
519
520static LONGEST
521ia64_hpux_xfer_memory (struct target_ops *ops, const char *annex,
522 gdb_byte *readbuf, const gdb_byte *writebuf,
b55e14c7 523 CORE_ADDR addr, ULONGEST len)
92c9a463
JB
524{
525 CORE_ADDR bsp, bspstore;
526 CORE_ADDR start_addr, short_len;
527 int status = 0;
528
529 /* The back-store region cannot be read/written by the standard memory
530 read/write operations. So we handle the memory region piecemeal:
531 (1) and (2) The regions before and after the backing-store region,
532 which can be treated as normal memory;
533 (3) The region inside the backing-store, which needs to be
534 read/written specially. */
535
973e3cf7
JB
536 if (in_inferior_list (ptid_get_pid (inferior_ptid)))
537 {
538 struct regcache *regcache = get_current_regcache ();
539
540 regcache_raw_read_unsigned (regcache, IA64_BSP_REGNUM, &bsp);
541 regcache_raw_read_unsigned (regcache, IA64_BSPSTORE_REGNUM, &bspstore);
542 }
543 else
544 {
545 /* This is probably a child of our inferior created by a fork.
546 Because this process has not been added to our inferior list
547 (we are probably in the process of handling that child
548 process), we do not have a regcache to read the registers
549 from. So get those values directly from the kernel. */
550 bsp = ia64_hpux_get_register_from_save_state_t (IA64_BSP_REGNUM, 8);
551 bspstore =
552 ia64_hpux_get_register_from_save_state_t (IA64_BSPSTORE_REGNUM, 8);
553 }
92c9a463
JB
554
555 /* 1. Memory region before BSPSTORE. */
556
557 if (addr < bspstore)
558 {
559 short_len = len;
560 if (addr + len > bspstore)
561 short_len = bspstore - addr;
562
563 status = ia64_hpux_xfer_memory_no_bs (ops, annex, readbuf, writebuf,
564 addr, short_len);
565 if (status <= 0)
566 return 0;
567 }
568
569 /* 2. Memory region after BSP. */
570
571 if (addr + len > bsp)
572 {
573 start_addr = addr;
574 if (start_addr < bsp)
575 start_addr = bsp;
576 short_len = len + addr - start_addr;
577
578 status = ia64_hpux_xfer_memory_no_bs
579 (ops, annex,
580 readbuf ? readbuf + (start_addr - addr) : NULL,
581 writebuf ? writebuf + (start_addr - addr) : NULL,
582 start_addr, short_len);
583 if (status <= 0)
584 return 0;
585 }
586
587 /* 3. Memory region between BSPSTORE and BSP. */
588
589 if (bspstore != bsp
590 && ((addr < bspstore && addr + len > bspstore)
591 || (addr + len <= bsp && addr + len > bsp)))
592 {
593 start_addr = addr;
594 if (addr < bspstore)
595 start_addr = bspstore;
596 short_len = len + addr - start_addr;
597
598 if (start_addr + short_len > bsp)
599 short_len = bsp - start_addr;
600
601 gdb_assert (short_len > 0);
602
603 status = ia64_hpux_xfer_memory_bs
604 (ops, annex,
605 readbuf ? readbuf + (start_addr - addr) : NULL,
606 writebuf ? writebuf + (start_addr - addr) : NULL,
607 start_addr, short_len);
608 if (status < 0)
609 return 0;
610 }
611
612 return len;
613}
614
77ca787b
JB
615/* Handle the transfer of TARGET_OBJECT_HPUX_UREGS objects on ia64-hpux.
616 ANNEX is currently ignored.
617
618 The current implementation does not support write transfers (because
619 we do not currently do not need these transfers), and will raise
620 a failed assertion if WRITEBUF is not NULL. */
621
622static LONGEST
623ia64_hpux_xfer_uregs (struct target_ops *ops, const char *annex,
624 gdb_byte *readbuf, const gdb_byte *writebuf,
625 ULONGEST offset, LONGEST len)
626{
627 int status;
628
629 gdb_assert (writebuf == NULL);
630
631 status = ia64_hpux_read_register_from_save_state_t (offset, readbuf, len);
632 if (status < 0)
633 return -1;
634 return len;
635}
636
c4de7027
JB
637/* Handle the transfer of TARGET_OBJECT_HPUX_SOLIB_GOT objects on ia64-hpux.
638
639 The current implementation does not support write transfers (because
640 we do not currently do not need these transfers), and will raise
641 a failed assertion if WRITEBUF is not NULL. */
642
643static LONGEST
644ia64_hpux_xfer_solib_got (struct target_ops *ops, const char *annex,
645 gdb_byte *readbuf, const gdb_byte *writebuf,
b55e14c7 646 ULONGEST offset, ULONGEST len)
c4de7027
JB
647{
648 CORE_ADDR fun_addr;
649 /* The linkage pointer. We use a uint64_t to make sure that the size
650 of the object we are returning is always 64 bits long, as explained
651 in the description of the TARGET_OBJECT_HPUX_SOLIB_GOT object.
652 This is probably paranoia, but we do not use a CORE_ADDR because
653 it could conceivably be larger than uint64_t. */
654 uint64_t got;
655
656 gdb_assert (writebuf == NULL);
657
658 if (offset > sizeof (got))
659 return 0;
660
661 fun_addr = string_to_core_addr (annex);
662 got = ia64_hpux_get_solib_linkage_addr (fun_addr);
663
664 if (len > sizeof (got) - offset)
665 len = sizeof (got) - offset;
666 memcpy (readbuf, &got + offset, len);
667
668 return len;
669}
670
92c9a463
JB
671/* The "to_xfer_partial" target_ops routine for ia64-hpux. */
672
673static LONGEST
674ia64_hpux_xfer_partial (struct target_ops *ops, enum target_object object,
675 const char *annex, gdb_byte *readbuf,
b55e14c7 676 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len)
92c9a463
JB
677{
678 LONGEST val;
679
680 if (object == TARGET_OBJECT_MEMORY)
681 val = ia64_hpux_xfer_memory (ops, annex, readbuf, writebuf, offset, len);
77ca787b
JB
682 else if (object == TARGET_OBJECT_HPUX_UREGS)
683 val = ia64_hpux_xfer_uregs (ops, annex, readbuf, writebuf, offset, len);
c4de7027
JB
684 else if (object == TARGET_OBJECT_HPUX_SOLIB_GOT)
685 val = ia64_hpux_xfer_solib_got (ops, annex, readbuf, writebuf, offset,
686 len);
92c9a463
JB
687 else
688 val = super_xfer_partial (ops, object, annex, readbuf, writebuf, offset,
689 len);
690
691 return val;
692}
693
694/* The "to_can_use_hw_breakpoint" target_ops routine for ia64-hpux. */
695
696static int
697ia64_hpux_can_use_hw_breakpoint (int type, int cnt, int othertype)
698{
699 /* No hardware watchpoint/breakpoint support yet. */
700 return 0;
701}
702
703/* The "to_mourn_inferior" routine from the "inf-ttrace" target_ops layer. */
704
705static void (*super_mourn_inferior) (struct target_ops *);
706
707/* The "to_mourn_inferior" target_ops routine for ia64-hpux. */
708
709static void
710ia64_hpux_mourn_inferior (struct target_ops *ops)
711{
712 const int pid = ptid_get_pid (inferior_ptid);
713 int status;
714
715 super_mourn_inferior (ops);
716
717 /* On this platform, the process still exists even after we received
718 an exit event. Detaching from the process isn't sufficient either,
719 as it only turns the process into a zombie. So the only solution
720 we found is to kill it. */
721 ttrace (TT_PROC_EXIT, pid, 0, 0, 0, 0);
722 wait (&status);
723}
724
725/* Prevent warning from -Wmissing-prototypes. */
45717bac 726void _initialize_ia64_hpux_nat (void);
92c9a463
JB
727
728void
45717bac 729_initialize_ia64_hpux_nat (void)
92c9a463
JB
730{
731 struct target_ops *t;
732
733 t = inf_ttrace_target ();
734 super_to_wait = t->to_wait;
735 super_xfer_partial = t->to_xfer_partial;
736 super_mourn_inferior = t->to_mourn_inferior;
737
738 t->to_wait = ia64_hpux_wait;
739 t->to_fetch_registers = ia64_hpux_fetch_registers;
740 t->to_store_registers = ia64_hpux_store_registers;
741 t->to_xfer_partial = ia64_hpux_xfer_partial;
742 t->to_can_use_hw_breakpoint = ia64_hpux_can_use_hw_breakpoint;
743 t->to_mourn_inferior = ia64_hpux_mourn_inferior;
744 t->to_attach_no_wait = 1;
745
746 add_target (t);
747}