]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/target.c
gdb/
[thirdparty/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2012 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include "gdb_string.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "symtab.h"
28 #include "inferior.h"
29 #include "bfd.h"
30 #include "symfile.h"
31 #include "objfiles.h"
32 #include "dcache.h"
33 #include <signal.h>
34 #include "regcache.h"
35 #include "gdb_assert.h"
36 #include "gdbcore.h"
37 #include "exceptions.h"
38 #include "target-descriptions.h"
39 #include "gdbthread.h"
40 #include "solib.h"
41 #include "exec.h"
42 #include "inline-frame.h"
43 #include "tracepoint.h"
44 #include "gdb/fileio.h"
45 #include "agent.h"
46
47 static void target_info (char *, int);
48
49 static void default_terminal_info (char *, int);
50
51 static int default_watchpoint_addr_within_range (struct target_ops *,
52 CORE_ADDR, CORE_ADDR, int);
53
54 static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
55
56 static void tcomplain (void) ATTRIBUTE_NORETURN;
57
58 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
59
60 static int return_zero (void);
61
62 static int return_one (void);
63
64 static int return_minus_one (void);
65
66 void target_ignore (void);
67
68 static void target_command (char *, int);
69
70 static struct target_ops *find_default_run_target (char *);
71
72 static LONGEST default_xfer_partial (struct target_ops *ops,
73 enum target_object object,
74 const char *annex, gdb_byte *readbuf,
75 const gdb_byte *writebuf,
76 ULONGEST offset, LONGEST len);
77
78 static LONGEST current_xfer_partial (struct target_ops *ops,
79 enum target_object object,
80 const char *annex, gdb_byte *readbuf,
81 const gdb_byte *writebuf,
82 ULONGEST offset, LONGEST len);
83
84 static LONGEST target_xfer_partial (struct target_ops *ops,
85 enum target_object object,
86 const char *annex,
87 void *readbuf, const void *writebuf,
88 ULONGEST offset, LONGEST len);
89
90 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
91 ptid_t ptid);
92
93 static void init_dummy_target (void);
94
95 static struct target_ops debug_target;
96
97 static void debug_to_open (char *, int);
98
99 static void debug_to_prepare_to_store (struct regcache *);
100
101 static void debug_to_files_info (struct target_ops *);
102
103 static int debug_to_insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *);
105
106 static int debug_to_remove_breakpoint (struct gdbarch *,
107 struct bp_target_info *);
108
109 static int debug_to_can_use_hw_breakpoint (int, int, int);
110
111 static int debug_to_insert_hw_breakpoint (struct gdbarch *,
112 struct bp_target_info *);
113
114 static int debug_to_remove_hw_breakpoint (struct gdbarch *,
115 struct bp_target_info *);
116
117 static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
118 struct expression *);
119
120 static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
121 struct expression *);
122
123 static int debug_to_stopped_by_watchpoint (void);
124
125 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
126
127 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
128 CORE_ADDR, CORE_ADDR, int);
129
130 static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
131
132 static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
133 struct expression *);
134
135 static void debug_to_terminal_init (void);
136
137 static void debug_to_terminal_inferior (void);
138
139 static void debug_to_terminal_ours_for_output (void);
140
141 static void debug_to_terminal_save_ours (void);
142
143 static void debug_to_terminal_ours (void);
144
145 static void debug_to_terminal_info (char *, int);
146
147 static void debug_to_load (char *, int);
148
149 static int debug_to_can_run (void);
150
151 static void debug_to_stop (ptid_t);
152
153 /* Pointer to array of target architecture structures; the size of the
154 array; the current index into the array; the allocated size of the
155 array. */
156 struct target_ops **target_structs;
157 unsigned target_struct_size;
158 unsigned target_struct_index;
159 unsigned target_struct_allocsize;
160 #define DEFAULT_ALLOCSIZE 10
161
162 /* The initial current target, so that there is always a semi-valid
163 current target. */
164
165 static struct target_ops dummy_target;
166
167 /* Top of target stack. */
168
169 static struct target_ops *target_stack;
170
171 /* The target structure we are currently using to talk to a process
172 or file or whatever "inferior" we have. */
173
174 struct target_ops current_target;
175
176 /* Command list for target. */
177
178 static struct cmd_list_element *targetlist = NULL;
179
180 /* Nonzero if we should trust readonly sections from the
181 executable when reading memory. */
182
183 static int trust_readonly = 0;
184
185 /* Nonzero if we should show true memory content including
186 memory breakpoint inserted by gdb. */
187
188 static int show_memory_breakpoints = 0;
189
190 /* These globals control whether GDB attempts to perform these
191 operations; they are useful for targets that need to prevent
192 inadvertant disruption, such as in non-stop mode. */
193
194 int may_write_registers = 1;
195
196 int may_write_memory = 1;
197
198 int may_insert_breakpoints = 1;
199
200 int may_insert_tracepoints = 1;
201
202 int may_insert_fast_tracepoints = 1;
203
204 int may_stop = 1;
205
206 /* Non-zero if we want to see trace of target level stuff. */
207
208 static int targetdebug = 0;
209 static void
210 show_targetdebug (struct ui_file *file, int from_tty,
211 struct cmd_list_element *c, const char *value)
212 {
213 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
214 }
215
216 static void setup_target_debug (void);
217
218 /* The option sets this. */
219 static int stack_cache_enabled_p_1 = 1;
220 /* And set_stack_cache_enabled_p updates this.
221 The reason for the separation is so that we don't flush the cache for
222 on->on transitions. */
223 static int stack_cache_enabled_p = 1;
224
225 /* This is called *after* the stack-cache has been set.
226 Flush the cache for off->on and on->off transitions.
227 There's no real need to flush the cache for on->off transitions,
228 except cleanliness. */
229
230 static void
231 set_stack_cache_enabled_p (char *args, int from_tty,
232 struct cmd_list_element *c)
233 {
234 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
235 target_dcache_invalidate ();
236
237 stack_cache_enabled_p = stack_cache_enabled_p_1;
238 }
239
240 static void
241 show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
242 struct cmd_list_element *c, const char *value)
243 {
244 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
245 }
246
247 /* Cache of memory operations, to speed up remote access. */
248 static DCACHE *target_dcache;
249
250 /* Invalidate the target dcache. */
251
252 void
253 target_dcache_invalidate (void)
254 {
255 dcache_invalidate (target_dcache);
256 }
257
258 /* The user just typed 'target' without the name of a target. */
259
260 static void
261 target_command (char *arg, int from_tty)
262 {
263 fputs_filtered ("Argument required (target name). Try `help target'\n",
264 gdb_stdout);
265 }
266
267 /* Default target_has_* methods for process_stratum targets. */
268
269 int
270 default_child_has_all_memory (struct target_ops *ops)
271 {
272 /* If no inferior selected, then we can't read memory here. */
273 if (ptid_equal (inferior_ptid, null_ptid))
274 return 0;
275
276 return 1;
277 }
278
279 int
280 default_child_has_memory (struct target_ops *ops)
281 {
282 /* If no inferior selected, then we can't read memory here. */
283 if (ptid_equal (inferior_ptid, null_ptid))
284 return 0;
285
286 return 1;
287 }
288
289 int
290 default_child_has_stack (struct target_ops *ops)
291 {
292 /* If no inferior selected, there's no stack. */
293 if (ptid_equal (inferior_ptid, null_ptid))
294 return 0;
295
296 return 1;
297 }
298
299 int
300 default_child_has_registers (struct target_ops *ops)
301 {
302 /* Can't read registers from no inferior. */
303 if (ptid_equal (inferior_ptid, null_ptid))
304 return 0;
305
306 return 1;
307 }
308
309 int
310 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
311 {
312 /* If there's no thread selected, then we can't make it run through
313 hoops. */
314 if (ptid_equal (the_ptid, null_ptid))
315 return 0;
316
317 return 1;
318 }
319
320
321 int
322 target_has_all_memory_1 (void)
323 {
324 struct target_ops *t;
325
326 for (t = current_target.beneath; t != NULL; t = t->beneath)
327 if (t->to_has_all_memory (t))
328 return 1;
329
330 return 0;
331 }
332
333 int
334 target_has_memory_1 (void)
335 {
336 struct target_ops *t;
337
338 for (t = current_target.beneath; t != NULL; t = t->beneath)
339 if (t->to_has_memory (t))
340 return 1;
341
342 return 0;
343 }
344
345 int
346 target_has_stack_1 (void)
347 {
348 struct target_ops *t;
349
350 for (t = current_target.beneath; t != NULL; t = t->beneath)
351 if (t->to_has_stack (t))
352 return 1;
353
354 return 0;
355 }
356
357 int
358 target_has_registers_1 (void)
359 {
360 struct target_ops *t;
361
362 for (t = current_target.beneath; t != NULL; t = t->beneath)
363 if (t->to_has_registers (t))
364 return 1;
365
366 return 0;
367 }
368
369 int
370 target_has_execution_1 (ptid_t the_ptid)
371 {
372 struct target_ops *t;
373
374 for (t = current_target.beneath; t != NULL; t = t->beneath)
375 if (t->to_has_execution (t, the_ptid))
376 return 1;
377
378 return 0;
379 }
380
381 int
382 target_has_execution_current (void)
383 {
384 return target_has_execution_1 (inferior_ptid);
385 }
386
387 /* Add a possible target architecture to the list. */
388
389 void
390 add_target (struct target_ops *t)
391 {
392 /* Provide default values for all "must have" methods. */
393 if (t->to_xfer_partial == NULL)
394 t->to_xfer_partial = default_xfer_partial;
395
396 if (t->to_has_all_memory == NULL)
397 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
398
399 if (t->to_has_memory == NULL)
400 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
401
402 if (t->to_has_stack == NULL)
403 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
404
405 if (t->to_has_registers == NULL)
406 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
407
408 if (t->to_has_execution == NULL)
409 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
410
411 if (!target_structs)
412 {
413 target_struct_allocsize = DEFAULT_ALLOCSIZE;
414 target_structs = (struct target_ops **) xmalloc
415 (target_struct_allocsize * sizeof (*target_structs));
416 }
417 if (target_struct_size >= target_struct_allocsize)
418 {
419 target_struct_allocsize *= 2;
420 target_structs = (struct target_ops **)
421 xrealloc ((char *) target_structs,
422 target_struct_allocsize * sizeof (*target_structs));
423 }
424 target_structs[target_struct_size++] = t;
425
426 if (targetlist == NULL)
427 add_prefix_cmd ("target", class_run, target_command, _("\
428 Connect to a target machine or process.\n\
429 The first argument is the type or protocol of the target machine.\n\
430 Remaining arguments are interpreted by the target protocol. For more\n\
431 information on the arguments for a particular protocol, type\n\
432 `help target ' followed by the protocol name."),
433 &targetlist, "target ", 0, &cmdlist);
434 add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc, &targetlist);
435 }
436
437 /* Stub functions */
438
439 void
440 target_ignore (void)
441 {
442 }
443
444 void
445 target_kill (void)
446 {
447 struct target_ops *t;
448
449 for (t = current_target.beneath; t != NULL; t = t->beneath)
450 if (t->to_kill != NULL)
451 {
452 if (targetdebug)
453 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
454
455 t->to_kill (t);
456 return;
457 }
458
459 noprocess ();
460 }
461
462 void
463 target_load (char *arg, int from_tty)
464 {
465 target_dcache_invalidate ();
466 (*current_target.to_load) (arg, from_tty);
467 }
468
469 void
470 target_create_inferior (char *exec_file, char *args,
471 char **env, int from_tty)
472 {
473 struct target_ops *t;
474
475 for (t = current_target.beneath; t != NULL; t = t->beneath)
476 {
477 if (t->to_create_inferior != NULL)
478 {
479 t->to_create_inferior (t, exec_file, args, env, from_tty);
480 if (targetdebug)
481 fprintf_unfiltered (gdb_stdlog,
482 "target_create_inferior (%s, %s, xxx, %d)\n",
483 exec_file, args, from_tty);
484 return;
485 }
486 }
487
488 internal_error (__FILE__, __LINE__,
489 _("could not find a target to create inferior"));
490 }
491
492 void
493 target_terminal_inferior (void)
494 {
495 /* A background resume (``run&'') should leave GDB in control of the
496 terminal. Use target_can_async_p, not target_is_async_p, since at
497 this point the target is not async yet. However, if sync_execution
498 is not set, we know it will become async prior to resume. */
499 if (target_can_async_p () && !sync_execution)
500 return;
501
502 /* If GDB is resuming the inferior in the foreground, install
503 inferior's terminal modes. */
504 (*current_target.to_terminal_inferior) ();
505 }
506
507 static int
508 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
509 struct target_ops *t)
510 {
511 errno = EIO; /* Can't read/write this location. */
512 return 0; /* No bytes handled. */
513 }
514
515 static void
516 tcomplain (void)
517 {
518 error (_("You can't do that when your target is `%s'"),
519 current_target.to_shortname);
520 }
521
522 void
523 noprocess (void)
524 {
525 error (_("You can't do that without a process to debug."));
526 }
527
528 static void
529 default_terminal_info (char *args, int from_tty)
530 {
531 printf_unfiltered (_("No saved terminal information.\n"));
532 }
533
534 /* A default implementation for the to_get_ada_task_ptid target method.
535
536 This function builds the PTID by using both LWP and TID as part of
537 the PTID lwp and tid elements. The pid used is the pid of the
538 inferior_ptid. */
539
540 static ptid_t
541 default_get_ada_task_ptid (long lwp, long tid)
542 {
543 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
544 }
545
546 static enum exec_direction_kind
547 default_execution_direction (void)
548 {
549 if (!target_can_execute_reverse)
550 return EXEC_FORWARD;
551 else if (!target_can_async_p ())
552 return EXEC_FORWARD;
553 else
554 gdb_assert_not_reached ("\
555 to_execution_direction must be implemented for reverse async");
556 }
557
558 /* Go through the target stack from top to bottom, copying over zero
559 entries in current_target, then filling in still empty entries. In
560 effect, we are doing class inheritance through the pushed target
561 vectors.
562
563 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
564 is currently implemented, is that it discards any knowledge of
565 which target an inherited method originally belonged to.
566 Consequently, new new target methods should instead explicitly and
567 locally search the target stack for the target that can handle the
568 request. */
569
570 static void
571 update_current_target (void)
572 {
573 struct target_ops *t;
574
575 /* First, reset current's contents. */
576 memset (&current_target, 0, sizeof (current_target));
577
578 #define INHERIT(FIELD, TARGET) \
579 if (!current_target.FIELD) \
580 current_target.FIELD = (TARGET)->FIELD
581
582 for (t = target_stack; t; t = t->beneath)
583 {
584 INHERIT (to_shortname, t);
585 INHERIT (to_longname, t);
586 INHERIT (to_doc, t);
587 /* Do not inherit to_open. */
588 /* Do not inherit to_close. */
589 /* Do not inherit to_attach. */
590 INHERIT (to_post_attach, t);
591 INHERIT (to_attach_no_wait, t);
592 /* Do not inherit to_detach. */
593 /* Do not inherit to_disconnect. */
594 /* Do not inherit to_resume. */
595 /* Do not inherit to_wait. */
596 /* Do not inherit to_fetch_registers. */
597 /* Do not inherit to_store_registers. */
598 INHERIT (to_prepare_to_store, t);
599 INHERIT (deprecated_xfer_memory, t);
600 INHERIT (to_files_info, t);
601 INHERIT (to_insert_breakpoint, t);
602 INHERIT (to_remove_breakpoint, t);
603 INHERIT (to_can_use_hw_breakpoint, t);
604 INHERIT (to_insert_hw_breakpoint, t);
605 INHERIT (to_remove_hw_breakpoint, t);
606 /* Do not inherit to_ranged_break_num_registers. */
607 INHERIT (to_insert_watchpoint, t);
608 INHERIT (to_remove_watchpoint, t);
609 /* Do not inherit to_insert_mask_watchpoint. */
610 /* Do not inherit to_remove_mask_watchpoint. */
611 INHERIT (to_stopped_data_address, t);
612 INHERIT (to_have_steppable_watchpoint, t);
613 INHERIT (to_have_continuable_watchpoint, t);
614 INHERIT (to_stopped_by_watchpoint, t);
615 INHERIT (to_watchpoint_addr_within_range, t);
616 INHERIT (to_region_ok_for_hw_watchpoint, t);
617 INHERIT (to_can_accel_watchpoint_condition, t);
618 /* Do not inherit to_masked_watch_num_registers. */
619 INHERIT (to_terminal_init, t);
620 INHERIT (to_terminal_inferior, t);
621 INHERIT (to_terminal_ours_for_output, t);
622 INHERIT (to_terminal_ours, t);
623 INHERIT (to_terminal_save_ours, t);
624 INHERIT (to_terminal_info, t);
625 /* Do not inherit to_kill. */
626 INHERIT (to_load, t);
627 /* Do no inherit to_create_inferior. */
628 INHERIT (to_post_startup_inferior, t);
629 INHERIT (to_insert_fork_catchpoint, t);
630 INHERIT (to_remove_fork_catchpoint, t);
631 INHERIT (to_insert_vfork_catchpoint, t);
632 INHERIT (to_remove_vfork_catchpoint, t);
633 /* Do not inherit to_follow_fork. */
634 INHERIT (to_insert_exec_catchpoint, t);
635 INHERIT (to_remove_exec_catchpoint, t);
636 INHERIT (to_set_syscall_catchpoint, t);
637 INHERIT (to_has_exited, t);
638 /* Do not inherit to_mourn_inferior. */
639 INHERIT (to_can_run, t);
640 /* Do not inherit to_pass_signals. */
641 /* Do not inherit to_program_signals. */
642 /* Do not inherit to_thread_alive. */
643 /* Do not inherit to_find_new_threads. */
644 /* Do not inherit to_pid_to_str. */
645 INHERIT (to_extra_thread_info, t);
646 INHERIT (to_thread_name, t);
647 INHERIT (to_stop, t);
648 /* Do not inherit to_xfer_partial. */
649 INHERIT (to_rcmd, t);
650 INHERIT (to_pid_to_exec_file, t);
651 INHERIT (to_log_command, t);
652 INHERIT (to_stratum, t);
653 /* Do not inherit to_has_all_memory. */
654 /* Do not inherit to_has_memory. */
655 /* Do not inherit to_has_stack. */
656 /* Do not inherit to_has_registers. */
657 /* Do not inherit to_has_execution. */
658 INHERIT (to_has_thread_control, t);
659 INHERIT (to_can_async_p, t);
660 INHERIT (to_is_async_p, t);
661 INHERIT (to_async, t);
662 INHERIT (to_find_memory_regions, t);
663 INHERIT (to_make_corefile_notes, t);
664 INHERIT (to_get_bookmark, t);
665 INHERIT (to_goto_bookmark, t);
666 /* Do not inherit to_get_thread_local_address. */
667 INHERIT (to_can_execute_reverse, t);
668 INHERIT (to_execution_direction, t);
669 INHERIT (to_thread_architecture, t);
670 /* Do not inherit to_read_description. */
671 INHERIT (to_get_ada_task_ptid, t);
672 /* Do not inherit to_search_memory. */
673 INHERIT (to_supports_multi_process, t);
674 INHERIT (to_supports_enable_disable_tracepoint, t);
675 INHERIT (to_supports_string_tracing, t);
676 INHERIT (to_trace_init, t);
677 INHERIT (to_download_tracepoint, t);
678 INHERIT (to_can_download_tracepoint, t);
679 INHERIT (to_download_trace_state_variable, t);
680 INHERIT (to_enable_tracepoint, t);
681 INHERIT (to_disable_tracepoint, t);
682 INHERIT (to_trace_set_readonly_regions, t);
683 INHERIT (to_trace_start, t);
684 INHERIT (to_get_trace_status, t);
685 INHERIT (to_get_tracepoint_status, t);
686 INHERIT (to_trace_stop, t);
687 INHERIT (to_trace_find, t);
688 INHERIT (to_get_trace_state_variable_value, t);
689 INHERIT (to_save_trace_data, t);
690 INHERIT (to_upload_tracepoints, t);
691 INHERIT (to_upload_trace_state_variables, t);
692 INHERIT (to_get_raw_trace_data, t);
693 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
694 INHERIT (to_set_disconnected_tracing, t);
695 INHERIT (to_set_circular_trace_buffer, t);
696 INHERIT (to_set_trace_notes, t);
697 INHERIT (to_get_tib_address, t);
698 INHERIT (to_set_permissions, t);
699 INHERIT (to_static_tracepoint_marker_at, t);
700 INHERIT (to_static_tracepoint_markers_by_strid, t);
701 INHERIT (to_traceframe_info, t);
702 INHERIT (to_use_agent, t);
703 INHERIT (to_can_use_agent, t);
704 INHERIT (to_magic, t);
705 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
706 /* Do not inherit to_memory_map. */
707 /* Do not inherit to_flash_erase. */
708 /* Do not inherit to_flash_done. */
709 }
710 #undef INHERIT
711
712 /* Clean up a target struct so it no longer has any zero pointers in
713 it. Some entries are defaulted to a method that print an error,
714 others are hard-wired to a standard recursive default. */
715
716 #define de_fault(field, value) \
717 if (!current_target.field) \
718 current_target.field = value
719
720 de_fault (to_open,
721 (void (*) (char *, int))
722 tcomplain);
723 de_fault (to_close,
724 (void (*) (int))
725 target_ignore);
726 de_fault (to_post_attach,
727 (void (*) (int))
728 target_ignore);
729 de_fault (to_prepare_to_store,
730 (void (*) (struct regcache *))
731 noprocess);
732 de_fault (deprecated_xfer_memory,
733 (int (*) (CORE_ADDR, gdb_byte *, int, int,
734 struct mem_attrib *, struct target_ops *))
735 nomemory);
736 de_fault (to_files_info,
737 (void (*) (struct target_ops *))
738 target_ignore);
739 de_fault (to_insert_breakpoint,
740 memory_insert_breakpoint);
741 de_fault (to_remove_breakpoint,
742 memory_remove_breakpoint);
743 de_fault (to_can_use_hw_breakpoint,
744 (int (*) (int, int, int))
745 return_zero);
746 de_fault (to_insert_hw_breakpoint,
747 (int (*) (struct gdbarch *, struct bp_target_info *))
748 return_minus_one);
749 de_fault (to_remove_hw_breakpoint,
750 (int (*) (struct gdbarch *, struct bp_target_info *))
751 return_minus_one);
752 de_fault (to_insert_watchpoint,
753 (int (*) (CORE_ADDR, int, int, struct expression *))
754 return_minus_one);
755 de_fault (to_remove_watchpoint,
756 (int (*) (CORE_ADDR, int, int, struct expression *))
757 return_minus_one);
758 de_fault (to_stopped_by_watchpoint,
759 (int (*) (void))
760 return_zero);
761 de_fault (to_stopped_data_address,
762 (int (*) (struct target_ops *, CORE_ADDR *))
763 return_zero);
764 de_fault (to_watchpoint_addr_within_range,
765 default_watchpoint_addr_within_range);
766 de_fault (to_region_ok_for_hw_watchpoint,
767 default_region_ok_for_hw_watchpoint);
768 de_fault (to_can_accel_watchpoint_condition,
769 (int (*) (CORE_ADDR, int, int, struct expression *))
770 return_zero);
771 de_fault (to_terminal_init,
772 (void (*) (void))
773 target_ignore);
774 de_fault (to_terminal_inferior,
775 (void (*) (void))
776 target_ignore);
777 de_fault (to_terminal_ours_for_output,
778 (void (*) (void))
779 target_ignore);
780 de_fault (to_terminal_ours,
781 (void (*) (void))
782 target_ignore);
783 de_fault (to_terminal_save_ours,
784 (void (*) (void))
785 target_ignore);
786 de_fault (to_terminal_info,
787 default_terminal_info);
788 de_fault (to_load,
789 (void (*) (char *, int))
790 tcomplain);
791 de_fault (to_post_startup_inferior,
792 (void (*) (ptid_t))
793 target_ignore);
794 de_fault (to_insert_fork_catchpoint,
795 (int (*) (int))
796 return_one);
797 de_fault (to_remove_fork_catchpoint,
798 (int (*) (int))
799 return_one);
800 de_fault (to_insert_vfork_catchpoint,
801 (int (*) (int))
802 return_one);
803 de_fault (to_remove_vfork_catchpoint,
804 (int (*) (int))
805 return_one);
806 de_fault (to_insert_exec_catchpoint,
807 (int (*) (int))
808 return_one);
809 de_fault (to_remove_exec_catchpoint,
810 (int (*) (int))
811 return_one);
812 de_fault (to_set_syscall_catchpoint,
813 (int (*) (int, int, int, int, int *))
814 return_one);
815 de_fault (to_has_exited,
816 (int (*) (int, int, int *))
817 return_zero);
818 de_fault (to_can_run,
819 return_zero);
820 de_fault (to_extra_thread_info,
821 (char *(*) (struct thread_info *))
822 return_zero);
823 de_fault (to_thread_name,
824 (char *(*) (struct thread_info *))
825 return_zero);
826 de_fault (to_stop,
827 (void (*) (ptid_t))
828 target_ignore);
829 current_target.to_xfer_partial = current_xfer_partial;
830 de_fault (to_rcmd,
831 (void (*) (char *, struct ui_file *))
832 tcomplain);
833 de_fault (to_pid_to_exec_file,
834 (char *(*) (int))
835 return_zero);
836 de_fault (to_async,
837 (void (*) (void (*) (enum inferior_event_type, void*), void*))
838 tcomplain);
839 de_fault (to_thread_architecture,
840 default_thread_architecture);
841 current_target.to_read_description = NULL;
842 de_fault (to_get_ada_task_ptid,
843 (ptid_t (*) (long, long))
844 default_get_ada_task_ptid);
845 de_fault (to_supports_multi_process,
846 (int (*) (void))
847 return_zero);
848 de_fault (to_supports_enable_disable_tracepoint,
849 (int (*) (void))
850 return_zero);
851 de_fault (to_supports_string_tracing,
852 (int (*) (void))
853 return_zero);
854 de_fault (to_trace_init,
855 (void (*) (void))
856 tcomplain);
857 de_fault (to_download_tracepoint,
858 (void (*) (struct bp_location *))
859 tcomplain);
860 de_fault (to_can_download_tracepoint,
861 (int (*) (void))
862 return_zero);
863 de_fault (to_download_trace_state_variable,
864 (void (*) (struct trace_state_variable *))
865 tcomplain);
866 de_fault (to_enable_tracepoint,
867 (void (*) (struct bp_location *))
868 tcomplain);
869 de_fault (to_disable_tracepoint,
870 (void (*) (struct bp_location *))
871 tcomplain);
872 de_fault (to_trace_set_readonly_regions,
873 (void (*) (void))
874 tcomplain);
875 de_fault (to_trace_start,
876 (void (*) (void))
877 tcomplain);
878 de_fault (to_get_trace_status,
879 (int (*) (struct trace_status *))
880 return_minus_one);
881 de_fault (to_get_tracepoint_status,
882 (void (*) (struct breakpoint *, struct uploaded_tp *))
883 tcomplain);
884 de_fault (to_trace_stop,
885 (void (*) (void))
886 tcomplain);
887 de_fault (to_trace_find,
888 (int (*) (enum trace_find_type, int, ULONGEST, ULONGEST, int *))
889 return_minus_one);
890 de_fault (to_get_trace_state_variable_value,
891 (int (*) (int, LONGEST *))
892 return_zero);
893 de_fault (to_save_trace_data,
894 (int (*) (const char *))
895 tcomplain);
896 de_fault (to_upload_tracepoints,
897 (int (*) (struct uploaded_tp **))
898 return_zero);
899 de_fault (to_upload_trace_state_variables,
900 (int (*) (struct uploaded_tsv **))
901 return_zero);
902 de_fault (to_get_raw_trace_data,
903 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
904 tcomplain);
905 de_fault (to_get_min_fast_tracepoint_insn_len,
906 (int (*) (void))
907 return_minus_one);
908 de_fault (to_set_disconnected_tracing,
909 (void (*) (int))
910 target_ignore);
911 de_fault (to_set_circular_trace_buffer,
912 (void (*) (int))
913 target_ignore);
914 de_fault (to_set_trace_notes,
915 (int (*) (char *, char *, char *))
916 return_zero);
917 de_fault (to_get_tib_address,
918 (int (*) (ptid_t, CORE_ADDR *))
919 tcomplain);
920 de_fault (to_set_permissions,
921 (void (*) (void))
922 target_ignore);
923 de_fault (to_static_tracepoint_marker_at,
924 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
925 return_zero);
926 de_fault (to_static_tracepoint_markers_by_strid,
927 (VEC(static_tracepoint_marker_p) * (*) (const char *))
928 tcomplain);
929 de_fault (to_traceframe_info,
930 (struct traceframe_info * (*) (void))
931 tcomplain);
932 de_fault (to_supports_evaluation_of_breakpoint_conditions,
933 (int (*) (void))
934 return_zero);
935 de_fault (to_use_agent,
936 (int (*) (int))
937 tcomplain);
938 de_fault (to_can_use_agent,
939 (int (*) (void))
940 return_zero);
941 de_fault (to_execution_direction, default_execution_direction);
942
943 #undef de_fault
944
945 /* Finally, position the target-stack beneath the squashed
946 "current_target". That way code looking for a non-inherited
947 target method can quickly and simply find it. */
948 current_target.beneath = target_stack;
949
950 if (targetdebug)
951 setup_target_debug ();
952 }
953
954 /* Push a new target type into the stack of the existing target accessors,
955 possibly superseding some of the existing accessors.
956
957 Rather than allow an empty stack, we always have the dummy target at
958 the bottom stratum, so we can call the function vectors without
959 checking them. */
960
961 void
962 push_target (struct target_ops *t)
963 {
964 struct target_ops **cur;
965
966 /* Check magic number. If wrong, it probably means someone changed
967 the struct definition, but not all the places that initialize one. */
968 if (t->to_magic != OPS_MAGIC)
969 {
970 fprintf_unfiltered (gdb_stderr,
971 "Magic number of %s target struct wrong\n",
972 t->to_shortname);
973 internal_error (__FILE__, __LINE__,
974 _("failed internal consistency check"));
975 }
976
977 /* Find the proper stratum to install this target in. */
978 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
979 {
980 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
981 break;
982 }
983
984 /* If there's already targets at this stratum, remove them. */
985 /* FIXME: cagney/2003-10-15: I think this should be popping all
986 targets to CUR, and not just those at this stratum level. */
987 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
988 {
989 /* There's already something at this stratum level. Close it,
990 and un-hook it from the stack. */
991 struct target_ops *tmp = (*cur);
992
993 (*cur) = (*cur)->beneath;
994 tmp->beneath = NULL;
995 target_close (tmp, 0);
996 }
997
998 /* We have removed all targets in our stratum, now add the new one. */
999 t->beneath = (*cur);
1000 (*cur) = t;
1001
1002 update_current_target ();
1003 }
1004
1005 /* Remove a target_ops vector from the stack, wherever it may be.
1006 Return how many times it was removed (0 or 1). */
1007
1008 int
1009 unpush_target (struct target_ops *t)
1010 {
1011 struct target_ops **cur;
1012 struct target_ops *tmp;
1013
1014 if (t->to_stratum == dummy_stratum)
1015 internal_error (__FILE__, __LINE__,
1016 _("Attempt to unpush the dummy target"));
1017
1018 /* Look for the specified target. Note that we assume that a target
1019 can only occur once in the target stack. */
1020
1021 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1022 {
1023 if ((*cur) == t)
1024 break;
1025 }
1026
1027 /* If we don't find target_ops, quit. Only open targets should be
1028 closed. */
1029 if ((*cur) == NULL)
1030 return 0;
1031
1032 /* Unchain the target. */
1033 tmp = (*cur);
1034 (*cur) = (*cur)->beneath;
1035 tmp->beneath = NULL;
1036
1037 update_current_target ();
1038
1039 /* Finally close the target. Note we do this after unchaining, so
1040 any target method calls from within the target_close
1041 implementation don't end up in T anymore. */
1042 target_close (t, 0);
1043
1044 return 1;
1045 }
1046
1047 void
1048 pop_target (void)
1049 {
1050 target_close (target_stack, 0); /* Let it clean up. */
1051 if (unpush_target (target_stack) == 1)
1052 return;
1053
1054 fprintf_unfiltered (gdb_stderr,
1055 "pop_target couldn't find target %s\n",
1056 current_target.to_shortname);
1057 internal_error (__FILE__, __LINE__,
1058 _("failed internal consistency check"));
1059 }
1060
1061 void
1062 pop_all_targets_above (enum strata above_stratum, int quitting)
1063 {
1064 while ((int) (current_target.to_stratum) > (int) above_stratum)
1065 {
1066 target_close (target_stack, quitting);
1067 if (!unpush_target (target_stack))
1068 {
1069 fprintf_unfiltered (gdb_stderr,
1070 "pop_all_targets couldn't find target %s\n",
1071 target_stack->to_shortname);
1072 internal_error (__FILE__, __LINE__,
1073 _("failed internal consistency check"));
1074 break;
1075 }
1076 }
1077 }
1078
1079 void
1080 pop_all_targets (int quitting)
1081 {
1082 pop_all_targets_above (dummy_stratum, quitting);
1083 }
1084
1085 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1086
1087 int
1088 target_is_pushed (struct target_ops *t)
1089 {
1090 struct target_ops **cur;
1091
1092 /* Check magic number. If wrong, it probably means someone changed
1093 the struct definition, but not all the places that initialize one. */
1094 if (t->to_magic != OPS_MAGIC)
1095 {
1096 fprintf_unfiltered (gdb_stderr,
1097 "Magic number of %s target struct wrong\n",
1098 t->to_shortname);
1099 internal_error (__FILE__, __LINE__,
1100 _("failed internal consistency check"));
1101 }
1102
1103 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1104 if (*cur == t)
1105 return 1;
1106
1107 return 0;
1108 }
1109
1110 /* Using the objfile specified in OBJFILE, find the address for the
1111 current thread's thread-local storage with offset OFFSET. */
1112 CORE_ADDR
1113 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1114 {
1115 volatile CORE_ADDR addr = 0;
1116 struct target_ops *target;
1117
1118 for (target = current_target.beneath;
1119 target != NULL;
1120 target = target->beneath)
1121 {
1122 if (target->to_get_thread_local_address != NULL)
1123 break;
1124 }
1125
1126 if (target != NULL
1127 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch))
1128 {
1129 ptid_t ptid = inferior_ptid;
1130 volatile struct gdb_exception ex;
1131
1132 TRY_CATCH (ex, RETURN_MASK_ALL)
1133 {
1134 CORE_ADDR lm_addr;
1135
1136 /* Fetch the load module address for this objfile. */
1137 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch,
1138 objfile);
1139 /* If it's 0, throw the appropriate exception. */
1140 if (lm_addr == 0)
1141 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1142 _("TLS load module not found"));
1143
1144 addr = target->to_get_thread_local_address (target, ptid,
1145 lm_addr, offset);
1146 }
1147 /* If an error occurred, print TLS related messages here. Otherwise,
1148 throw the error to some higher catcher. */
1149 if (ex.reason < 0)
1150 {
1151 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1152
1153 switch (ex.error)
1154 {
1155 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1156 error (_("Cannot find thread-local variables "
1157 "in this thread library."));
1158 break;
1159 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1160 if (objfile_is_library)
1161 error (_("Cannot find shared library `%s' in dynamic"
1162 " linker's load module list"), objfile->name);
1163 else
1164 error (_("Cannot find executable file `%s' in dynamic"
1165 " linker's load module list"), objfile->name);
1166 break;
1167 case TLS_NOT_ALLOCATED_YET_ERROR:
1168 if (objfile_is_library)
1169 error (_("The inferior has not yet allocated storage for"
1170 " thread-local variables in\n"
1171 "the shared library `%s'\n"
1172 "for %s"),
1173 objfile->name, target_pid_to_str (ptid));
1174 else
1175 error (_("The inferior has not yet allocated storage for"
1176 " thread-local variables in\n"
1177 "the executable `%s'\n"
1178 "for %s"),
1179 objfile->name, target_pid_to_str (ptid));
1180 break;
1181 case TLS_GENERIC_ERROR:
1182 if (objfile_is_library)
1183 error (_("Cannot find thread-local storage for %s, "
1184 "shared library %s:\n%s"),
1185 target_pid_to_str (ptid),
1186 objfile->name, ex.message);
1187 else
1188 error (_("Cannot find thread-local storage for %s, "
1189 "executable file %s:\n%s"),
1190 target_pid_to_str (ptid),
1191 objfile->name, ex.message);
1192 break;
1193 default:
1194 throw_exception (ex);
1195 break;
1196 }
1197 }
1198 }
1199 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1200 TLS is an ABI-specific thing. But we don't do that yet. */
1201 else
1202 error (_("Cannot find thread-local variables on this target"));
1203
1204 return addr;
1205 }
1206
1207 #undef MIN
1208 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1209
1210 /* target_read_string -- read a null terminated string, up to LEN bytes,
1211 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1212 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1213 is responsible for freeing it. Return the number of bytes successfully
1214 read. */
1215
1216 int
1217 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1218 {
1219 int tlen, origlen, offset, i;
1220 gdb_byte buf[4];
1221 int errcode = 0;
1222 char *buffer;
1223 int buffer_allocated;
1224 char *bufptr;
1225 unsigned int nbytes_read = 0;
1226
1227 gdb_assert (string);
1228
1229 /* Small for testing. */
1230 buffer_allocated = 4;
1231 buffer = xmalloc (buffer_allocated);
1232 bufptr = buffer;
1233
1234 origlen = len;
1235
1236 while (len > 0)
1237 {
1238 tlen = MIN (len, 4 - (memaddr & 3));
1239 offset = memaddr & 3;
1240
1241 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1242 if (errcode != 0)
1243 {
1244 /* The transfer request might have crossed the boundary to an
1245 unallocated region of memory. Retry the transfer, requesting
1246 a single byte. */
1247 tlen = 1;
1248 offset = 0;
1249 errcode = target_read_memory (memaddr, buf, 1);
1250 if (errcode != 0)
1251 goto done;
1252 }
1253
1254 if (bufptr - buffer + tlen > buffer_allocated)
1255 {
1256 unsigned int bytes;
1257
1258 bytes = bufptr - buffer;
1259 buffer_allocated *= 2;
1260 buffer = xrealloc (buffer, buffer_allocated);
1261 bufptr = buffer + bytes;
1262 }
1263
1264 for (i = 0; i < tlen; i++)
1265 {
1266 *bufptr++ = buf[i + offset];
1267 if (buf[i + offset] == '\000')
1268 {
1269 nbytes_read += i + 1;
1270 goto done;
1271 }
1272 }
1273
1274 memaddr += tlen;
1275 len -= tlen;
1276 nbytes_read += tlen;
1277 }
1278 done:
1279 *string = buffer;
1280 if (errnop != NULL)
1281 *errnop = errcode;
1282 return nbytes_read;
1283 }
1284
1285 struct target_section_table *
1286 target_get_section_table (struct target_ops *target)
1287 {
1288 struct target_ops *t;
1289
1290 if (targetdebug)
1291 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1292
1293 for (t = target; t != NULL; t = t->beneath)
1294 if (t->to_get_section_table != NULL)
1295 return (*t->to_get_section_table) (t);
1296
1297 return NULL;
1298 }
1299
1300 /* Find a section containing ADDR. */
1301
1302 struct target_section *
1303 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1304 {
1305 struct target_section_table *table = target_get_section_table (target);
1306 struct target_section *secp;
1307
1308 if (table == NULL)
1309 return NULL;
1310
1311 for (secp = table->sections; secp < table->sections_end; secp++)
1312 {
1313 if (addr >= secp->addr && addr < secp->endaddr)
1314 return secp;
1315 }
1316 return NULL;
1317 }
1318
1319 /* Read memory from the live target, even if currently inspecting a
1320 traceframe. The return is the same as that of target_read. */
1321
1322 static LONGEST
1323 target_read_live_memory (enum target_object object,
1324 ULONGEST memaddr, gdb_byte *myaddr, LONGEST len)
1325 {
1326 int ret;
1327 struct cleanup *cleanup;
1328
1329 /* Switch momentarily out of tfind mode so to access live memory.
1330 Note that this must not clear global state, such as the frame
1331 cache, which must still remain valid for the previous traceframe.
1332 We may be _building_ the frame cache at this point. */
1333 cleanup = make_cleanup_restore_traceframe_number ();
1334 set_traceframe_number (-1);
1335
1336 ret = target_read (current_target.beneath, object, NULL,
1337 myaddr, memaddr, len);
1338
1339 do_cleanups (cleanup);
1340 return ret;
1341 }
1342
1343 /* Using the set of read-only target sections of OPS, read live
1344 read-only memory. Note that the actual reads start from the
1345 top-most target again.
1346
1347 For interface/parameters/return description see target.h,
1348 to_xfer_partial. */
1349
1350 static LONGEST
1351 memory_xfer_live_readonly_partial (struct target_ops *ops,
1352 enum target_object object,
1353 gdb_byte *readbuf, ULONGEST memaddr,
1354 LONGEST len)
1355 {
1356 struct target_section *secp;
1357 struct target_section_table *table;
1358
1359 secp = target_section_by_addr (ops, memaddr);
1360 if (secp != NULL
1361 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1362 & SEC_READONLY))
1363 {
1364 struct target_section *p;
1365 ULONGEST memend = memaddr + len;
1366
1367 table = target_get_section_table (ops);
1368
1369 for (p = table->sections; p < table->sections_end; p++)
1370 {
1371 if (memaddr >= p->addr)
1372 {
1373 if (memend <= p->endaddr)
1374 {
1375 /* Entire transfer is within this section. */
1376 return target_read_live_memory (object, memaddr,
1377 readbuf, len);
1378 }
1379 else if (memaddr >= p->endaddr)
1380 {
1381 /* This section ends before the transfer starts. */
1382 continue;
1383 }
1384 else
1385 {
1386 /* This section overlaps the transfer. Just do half. */
1387 len = p->endaddr - memaddr;
1388 return target_read_live_memory (object, memaddr,
1389 readbuf, len);
1390 }
1391 }
1392 }
1393 }
1394
1395 return 0;
1396 }
1397
1398 /* Perform a partial memory transfer.
1399 For docs see target.h, to_xfer_partial. */
1400
1401 static LONGEST
1402 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1403 void *readbuf, const void *writebuf, ULONGEST memaddr,
1404 LONGEST len)
1405 {
1406 LONGEST res;
1407 int reg_len;
1408 struct mem_region *region;
1409 struct inferior *inf;
1410
1411 /* For accesses to unmapped overlay sections, read directly from
1412 files. Must do this first, as MEMADDR may need adjustment. */
1413 if (readbuf != NULL && overlay_debugging)
1414 {
1415 struct obj_section *section = find_pc_overlay (memaddr);
1416
1417 if (pc_in_unmapped_range (memaddr, section))
1418 {
1419 struct target_section_table *table
1420 = target_get_section_table (ops);
1421 const char *section_name = section->the_bfd_section->name;
1422
1423 memaddr = overlay_mapped_address (memaddr, section);
1424 return section_table_xfer_memory_partial (readbuf, writebuf,
1425 memaddr, len,
1426 table->sections,
1427 table->sections_end,
1428 section_name);
1429 }
1430 }
1431
1432 /* Try the executable files, if "trust-readonly-sections" is set. */
1433 if (readbuf != NULL && trust_readonly)
1434 {
1435 struct target_section *secp;
1436 struct target_section_table *table;
1437
1438 secp = target_section_by_addr (ops, memaddr);
1439 if (secp != NULL
1440 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1441 & SEC_READONLY))
1442 {
1443 table = target_get_section_table (ops);
1444 return section_table_xfer_memory_partial (readbuf, writebuf,
1445 memaddr, len,
1446 table->sections,
1447 table->sections_end,
1448 NULL);
1449 }
1450 }
1451
1452 /* If reading unavailable memory in the context of traceframes, and
1453 this address falls within a read-only section, fallback to
1454 reading from live memory. */
1455 if (readbuf != NULL && get_traceframe_number () != -1)
1456 {
1457 VEC(mem_range_s) *available;
1458
1459 /* If we fail to get the set of available memory, then the
1460 target does not support querying traceframe info, and so we
1461 attempt reading from the traceframe anyway (assuming the
1462 target implements the old QTro packet then). */
1463 if (traceframe_available_memory (&available, memaddr, len))
1464 {
1465 struct cleanup *old_chain;
1466
1467 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1468
1469 if (VEC_empty (mem_range_s, available)
1470 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1471 {
1472 /* Don't read into the traceframe's available
1473 memory. */
1474 if (!VEC_empty (mem_range_s, available))
1475 {
1476 LONGEST oldlen = len;
1477
1478 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1479 gdb_assert (len <= oldlen);
1480 }
1481
1482 do_cleanups (old_chain);
1483
1484 /* This goes through the topmost target again. */
1485 res = memory_xfer_live_readonly_partial (ops, object,
1486 readbuf, memaddr, len);
1487 if (res > 0)
1488 return res;
1489
1490 /* No use trying further, we know some memory starting
1491 at MEMADDR isn't available. */
1492 return -1;
1493 }
1494
1495 /* Don't try to read more than how much is available, in
1496 case the target implements the deprecated QTro packet to
1497 cater for older GDBs (the target's knowledge of read-only
1498 sections may be outdated by now). */
1499 len = VEC_index (mem_range_s, available, 0)->length;
1500
1501 do_cleanups (old_chain);
1502 }
1503 }
1504
1505 /* Try GDB's internal data cache. */
1506 region = lookup_mem_region (memaddr);
1507 /* region->hi == 0 means there's no upper bound. */
1508 if (memaddr + len < region->hi || region->hi == 0)
1509 reg_len = len;
1510 else
1511 reg_len = region->hi - memaddr;
1512
1513 switch (region->attrib.mode)
1514 {
1515 case MEM_RO:
1516 if (writebuf != NULL)
1517 return -1;
1518 break;
1519
1520 case MEM_WO:
1521 if (readbuf != NULL)
1522 return -1;
1523 break;
1524
1525 case MEM_FLASH:
1526 /* We only support writing to flash during "load" for now. */
1527 if (writebuf != NULL)
1528 error (_("Writing to flash memory forbidden in this context"));
1529 break;
1530
1531 case MEM_NONE:
1532 return -1;
1533 }
1534
1535 if (!ptid_equal (inferior_ptid, null_ptid))
1536 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1537 else
1538 inf = NULL;
1539
1540 if (inf != NULL
1541 /* The dcache reads whole cache lines; that doesn't play well
1542 with reading from a trace buffer, because reading outside of
1543 the collected memory range fails. */
1544 && get_traceframe_number () == -1
1545 && (region->attrib.cache
1546 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1547 {
1548 if (readbuf != NULL)
1549 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1550 reg_len, 0);
1551 else
1552 /* FIXME drow/2006-08-09: If we're going to preserve const
1553 correctness dcache_xfer_memory should take readbuf and
1554 writebuf. */
1555 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1556 (void *) writebuf,
1557 reg_len, 1);
1558 if (res <= 0)
1559 return -1;
1560 else
1561 return res;
1562 }
1563
1564 /* If none of those methods found the memory we wanted, fall back
1565 to a target partial transfer. Normally a single call to
1566 to_xfer_partial is enough; if it doesn't recognize an object
1567 it will call the to_xfer_partial of the next target down.
1568 But for memory this won't do. Memory is the only target
1569 object which can be read from more than one valid target.
1570 A core file, for instance, could have some of memory but
1571 delegate other bits to the target below it. So, we must
1572 manually try all targets. */
1573
1574 do
1575 {
1576 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1577 readbuf, writebuf, memaddr, reg_len);
1578 if (res > 0)
1579 break;
1580
1581 /* We want to continue past core files to executables, but not
1582 past a running target's memory. */
1583 if (ops->to_has_all_memory (ops))
1584 break;
1585
1586 ops = ops->beneath;
1587 }
1588 while (ops != NULL);
1589
1590 /* Make sure the cache gets updated no matter what - if we are writing
1591 to the stack. Even if this write is not tagged as such, we still need
1592 to update the cache. */
1593
1594 if (res > 0
1595 && inf != NULL
1596 && writebuf != NULL
1597 && !region->attrib.cache
1598 && stack_cache_enabled_p
1599 && object != TARGET_OBJECT_STACK_MEMORY)
1600 {
1601 dcache_update (target_dcache, memaddr, (void *) writebuf, res);
1602 }
1603
1604 /* If we still haven't got anything, return the last error. We
1605 give up. */
1606 return res;
1607 }
1608
1609 /* Perform a partial memory transfer. For docs see target.h,
1610 to_xfer_partial. */
1611
1612 static LONGEST
1613 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1614 void *readbuf, const void *writebuf, ULONGEST memaddr,
1615 LONGEST len)
1616 {
1617 int res;
1618
1619 /* Zero length requests are ok and require no work. */
1620 if (len == 0)
1621 return 0;
1622
1623 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1624 breakpoint insns, thus hiding out from higher layers whether
1625 there are software breakpoints inserted in the code stream. */
1626 if (readbuf != NULL)
1627 {
1628 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len);
1629
1630 if (res > 0 && !show_memory_breakpoints)
1631 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1632 }
1633 else
1634 {
1635 void *buf;
1636 struct cleanup *old_chain;
1637
1638 buf = xmalloc (len);
1639 old_chain = make_cleanup (xfree, buf);
1640 memcpy (buf, writebuf, len);
1641
1642 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1643 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len);
1644
1645 do_cleanups (old_chain);
1646 }
1647
1648 return res;
1649 }
1650
1651 static void
1652 restore_show_memory_breakpoints (void *arg)
1653 {
1654 show_memory_breakpoints = (uintptr_t) arg;
1655 }
1656
1657 struct cleanup *
1658 make_show_memory_breakpoints_cleanup (int show)
1659 {
1660 int current = show_memory_breakpoints;
1661
1662 show_memory_breakpoints = show;
1663 return make_cleanup (restore_show_memory_breakpoints,
1664 (void *) (uintptr_t) current);
1665 }
1666
1667 /* For docs see target.h, to_xfer_partial. */
1668
1669 static LONGEST
1670 target_xfer_partial (struct target_ops *ops,
1671 enum target_object object, const char *annex,
1672 void *readbuf, const void *writebuf,
1673 ULONGEST offset, LONGEST len)
1674 {
1675 LONGEST retval;
1676
1677 gdb_assert (ops->to_xfer_partial != NULL);
1678
1679 if (writebuf && !may_write_memory)
1680 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1681 core_addr_to_string_nz (offset), plongest (len));
1682
1683 /* If this is a memory transfer, let the memory-specific code
1684 have a look at it instead. Memory transfers are more
1685 complicated. */
1686 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1687 retval = memory_xfer_partial (ops, object, readbuf,
1688 writebuf, offset, len);
1689 else
1690 {
1691 enum target_object raw_object = object;
1692
1693 /* If this is a raw memory transfer, request the normal
1694 memory object from other layers. */
1695 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1696 raw_object = TARGET_OBJECT_MEMORY;
1697
1698 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1699 writebuf, offset, len);
1700 }
1701
1702 if (targetdebug)
1703 {
1704 const unsigned char *myaddr = NULL;
1705
1706 fprintf_unfiltered (gdb_stdlog,
1707 "%s:target_xfer_partial "
1708 "(%d, %s, %s, %s, %s, %s) = %s",
1709 ops->to_shortname,
1710 (int) object,
1711 (annex ? annex : "(null)"),
1712 host_address_to_string (readbuf),
1713 host_address_to_string (writebuf),
1714 core_addr_to_string_nz (offset),
1715 plongest (len), plongest (retval));
1716
1717 if (readbuf)
1718 myaddr = readbuf;
1719 if (writebuf)
1720 myaddr = writebuf;
1721 if (retval > 0 && myaddr != NULL)
1722 {
1723 int i;
1724
1725 fputs_unfiltered (", bytes =", gdb_stdlog);
1726 for (i = 0; i < retval; i++)
1727 {
1728 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1729 {
1730 if (targetdebug < 2 && i > 0)
1731 {
1732 fprintf_unfiltered (gdb_stdlog, " ...");
1733 break;
1734 }
1735 fprintf_unfiltered (gdb_stdlog, "\n");
1736 }
1737
1738 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1739 }
1740 }
1741
1742 fputc_unfiltered ('\n', gdb_stdlog);
1743 }
1744 return retval;
1745 }
1746
1747 /* Read LEN bytes of target memory at address MEMADDR, placing the results in
1748 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1749 if any error occurs.
1750
1751 If an error occurs, no guarantee is made about the contents of the data at
1752 MYADDR. In particular, the caller should not depend upon partial reads
1753 filling the buffer with good data. There is no way for the caller to know
1754 how much good data might have been transfered anyway. Callers that can
1755 deal with partial reads should call target_read (which will retry until
1756 it makes no progress, and then return how much was transferred). */
1757
1758 int
1759 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1760 {
1761 /* Dispatch to the topmost target, not the flattened current_target.
1762 Memory accesses check target->to_has_(all_)memory, and the
1763 flattened target doesn't inherit those. */
1764 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1765 myaddr, memaddr, len) == len)
1766 return 0;
1767 else
1768 return EIO;
1769 }
1770
1771 /* Like target_read_memory, but specify explicitly that this is a read from
1772 the target's stack. This may trigger different cache behavior. */
1773
1774 int
1775 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, int len)
1776 {
1777 /* Dispatch to the topmost target, not the flattened current_target.
1778 Memory accesses check target->to_has_(all_)memory, and the
1779 flattened target doesn't inherit those. */
1780
1781 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1782 myaddr, memaddr, len) == len)
1783 return 0;
1784 else
1785 return EIO;
1786 }
1787
1788 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1789 Returns either 0 for success or an errno value if any error occurs.
1790 If an error occurs, no guarantee is made about how much data got written.
1791 Callers that can deal with partial writes should call target_write. */
1792
1793 int
1794 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1795 {
1796 /* Dispatch to the topmost target, not the flattened current_target.
1797 Memory accesses check target->to_has_(all_)memory, and the
1798 flattened target doesn't inherit those. */
1799 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1800 myaddr, memaddr, len) == len)
1801 return 0;
1802 else
1803 return EIO;
1804 }
1805
1806 /* Write LEN bytes from MYADDR to target raw memory at address
1807 MEMADDR. Returns either 0 for success or an errno value if any
1808 error occurs. If an error occurs, no guarantee is made about how
1809 much data got written. Callers that can deal with partial writes
1810 should call target_write. */
1811
1812 int
1813 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1814 {
1815 /* Dispatch to the topmost target, not the flattened current_target.
1816 Memory accesses check target->to_has_(all_)memory, and the
1817 flattened target doesn't inherit those. */
1818 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1819 myaddr, memaddr, len) == len)
1820 return 0;
1821 else
1822 return EIO;
1823 }
1824
1825 /* Fetch the target's memory map. */
1826
1827 VEC(mem_region_s) *
1828 target_memory_map (void)
1829 {
1830 VEC(mem_region_s) *result;
1831 struct mem_region *last_one, *this_one;
1832 int ix;
1833 struct target_ops *t;
1834
1835 if (targetdebug)
1836 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1837
1838 for (t = current_target.beneath; t != NULL; t = t->beneath)
1839 if (t->to_memory_map != NULL)
1840 break;
1841
1842 if (t == NULL)
1843 return NULL;
1844
1845 result = t->to_memory_map (t);
1846 if (result == NULL)
1847 return NULL;
1848
1849 qsort (VEC_address (mem_region_s, result),
1850 VEC_length (mem_region_s, result),
1851 sizeof (struct mem_region), mem_region_cmp);
1852
1853 /* Check that regions do not overlap. Simultaneously assign
1854 a numbering for the "mem" commands to use to refer to
1855 each region. */
1856 last_one = NULL;
1857 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1858 {
1859 this_one->number = ix;
1860
1861 if (last_one && last_one->hi > this_one->lo)
1862 {
1863 warning (_("Overlapping regions in memory map: ignoring"));
1864 VEC_free (mem_region_s, result);
1865 return NULL;
1866 }
1867 last_one = this_one;
1868 }
1869
1870 return result;
1871 }
1872
1873 void
1874 target_flash_erase (ULONGEST address, LONGEST length)
1875 {
1876 struct target_ops *t;
1877
1878 for (t = current_target.beneath; t != NULL; t = t->beneath)
1879 if (t->to_flash_erase != NULL)
1880 {
1881 if (targetdebug)
1882 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1883 hex_string (address), phex (length, 0));
1884 t->to_flash_erase (t, address, length);
1885 return;
1886 }
1887
1888 tcomplain ();
1889 }
1890
1891 void
1892 target_flash_done (void)
1893 {
1894 struct target_ops *t;
1895
1896 for (t = current_target.beneath; t != NULL; t = t->beneath)
1897 if (t->to_flash_done != NULL)
1898 {
1899 if (targetdebug)
1900 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1901 t->to_flash_done (t);
1902 return;
1903 }
1904
1905 tcomplain ();
1906 }
1907
1908 static void
1909 show_trust_readonly (struct ui_file *file, int from_tty,
1910 struct cmd_list_element *c, const char *value)
1911 {
1912 fprintf_filtered (file,
1913 _("Mode for reading from readonly sections is %s.\n"),
1914 value);
1915 }
1916
1917 /* More generic transfers. */
1918
1919 static LONGEST
1920 default_xfer_partial (struct target_ops *ops, enum target_object object,
1921 const char *annex, gdb_byte *readbuf,
1922 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1923 {
1924 if (object == TARGET_OBJECT_MEMORY
1925 && ops->deprecated_xfer_memory != NULL)
1926 /* If available, fall back to the target's
1927 "deprecated_xfer_memory" method. */
1928 {
1929 int xfered = -1;
1930
1931 errno = 0;
1932 if (writebuf != NULL)
1933 {
1934 void *buffer = xmalloc (len);
1935 struct cleanup *cleanup = make_cleanup (xfree, buffer);
1936
1937 memcpy (buffer, writebuf, len);
1938 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1939 1/*write*/, NULL, ops);
1940 do_cleanups (cleanup);
1941 }
1942 if (readbuf != NULL)
1943 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1944 0/*read*/, NULL, ops);
1945 if (xfered > 0)
1946 return xfered;
1947 else if (xfered == 0 && errno == 0)
1948 /* "deprecated_xfer_memory" uses 0, cross checked against
1949 ERRNO as one indication of an error. */
1950 return 0;
1951 else
1952 return -1;
1953 }
1954 else if (ops->beneath != NULL)
1955 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1956 readbuf, writebuf, offset, len);
1957 else
1958 return -1;
1959 }
1960
1961 /* The xfer_partial handler for the topmost target. Unlike the default,
1962 it does not need to handle memory specially; it just passes all
1963 requests down the stack. */
1964
1965 static LONGEST
1966 current_xfer_partial (struct target_ops *ops, enum target_object object,
1967 const char *annex, gdb_byte *readbuf,
1968 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1969 {
1970 if (ops->beneath != NULL)
1971 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1972 readbuf, writebuf, offset, len);
1973 else
1974 return -1;
1975 }
1976
1977 /* Target vector read/write partial wrapper functions. */
1978
1979 static LONGEST
1980 target_read_partial (struct target_ops *ops,
1981 enum target_object object,
1982 const char *annex, gdb_byte *buf,
1983 ULONGEST offset, LONGEST len)
1984 {
1985 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
1986 }
1987
1988 static LONGEST
1989 target_write_partial (struct target_ops *ops,
1990 enum target_object object,
1991 const char *annex, const gdb_byte *buf,
1992 ULONGEST offset, LONGEST len)
1993 {
1994 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
1995 }
1996
1997 /* Wrappers to perform the full transfer. */
1998
1999 /* For docs on target_read see target.h. */
2000
2001 LONGEST
2002 target_read (struct target_ops *ops,
2003 enum target_object object,
2004 const char *annex, gdb_byte *buf,
2005 ULONGEST offset, LONGEST len)
2006 {
2007 LONGEST xfered = 0;
2008
2009 while (xfered < len)
2010 {
2011 LONGEST xfer = target_read_partial (ops, object, annex,
2012 (gdb_byte *) buf + xfered,
2013 offset + xfered, len - xfered);
2014
2015 /* Call an observer, notifying them of the xfer progress? */
2016 if (xfer == 0)
2017 return xfered;
2018 if (xfer < 0)
2019 return -1;
2020 xfered += xfer;
2021 QUIT;
2022 }
2023 return len;
2024 }
2025
2026 /* Assuming that the entire [begin, end) range of memory cannot be
2027 read, try to read whatever subrange is possible to read.
2028
2029 The function returns, in RESULT, either zero or one memory block.
2030 If there's a readable subrange at the beginning, it is completely
2031 read and returned. Any further readable subrange will not be read.
2032 Otherwise, if there's a readable subrange at the end, it will be
2033 completely read and returned. Any readable subranges before it
2034 (obviously, not starting at the beginning), will be ignored. In
2035 other cases -- either no readable subrange, or readable subrange(s)
2036 that is neither at the beginning, or end, nothing is returned.
2037
2038 The purpose of this function is to handle a read across a boundary
2039 of accessible memory in a case when memory map is not available.
2040 The above restrictions are fine for this case, but will give
2041 incorrect results if the memory is 'patchy'. However, supporting
2042 'patchy' memory would require trying to read every single byte,
2043 and it seems unacceptable solution. Explicit memory map is
2044 recommended for this case -- and target_read_memory_robust will
2045 take care of reading multiple ranges then. */
2046
2047 static void
2048 read_whatever_is_readable (struct target_ops *ops,
2049 ULONGEST begin, ULONGEST end,
2050 VEC(memory_read_result_s) **result)
2051 {
2052 gdb_byte *buf = xmalloc (end - begin);
2053 ULONGEST current_begin = begin;
2054 ULONGEST current_end = end;
2055 int forward;
2056 memory_read_result_s r;
2057
2058 /* If we previously failed to read 1 byte, nothing can be done here. */
2059 if (end - begin <= 1)
2060 {
2061 xfree (buf);
2062 return;
2063 }
2064
2065 /* Check that either first or the last byte is readable, and give up
2066 if not. This heuristic is meant to permit reading accessible memory
2067 at the boundary of accessible region. */
2068 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2069 buf, begin, 1) == 1)
2070 {
2071 forward = 1;
2072 ++current_begin;
2073 }
2074 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2075 buf + (end-begin) - 1, end - 1, 1) == 1)
2076 {
2077 forward = 0;
2078 --current_end;
2079 }
2080 else
2081 {
2082 xfree (buf);
2083 return;
2084 }
2085
2086 /* Loop invariant is that the [current_begin, current_end) was previously
2087 found to be not readable as a whole.
2088
2089 Note loop condition -- if the range has 1 byte, we can't divide the range
2090 so there's no point trying further. */
2091 while (current_end - current_begin > 1)
2092 {
2093 ULONGEST first_half_begin, first_half_end;
2094 ULONGEST second_half_begin, second_half_end;
2095 LONGEST xfer;
2096 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2097
2098 if (forward)
2099 {
2100 first_half_begin = current_begin;
2101 first_half_end = middle;
2102 second_half_begin = middle;
2103 second_half_end = current_end;
2104 }
2105 else
2106 {
2107 first_half_begin = middle;
2108 first_half_end = current_end;
2109 second_half_begin = current_begin;
2110 second_half_end = middle;
2111 }
2112
2113 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2114 buf + (first_half_begin - begin),
2115 first_half_begin,
2116 first_half_end - first_half_begin);
2117
2118 if (xfer == first_half_end - first_half_begin)
2119 {
2120 /* This half reads up fine. So, the error must be in the
2121 other half. */
2122 current_begin = second_half_begin;
2123 current_end = second_half_end;
2124 }
2125 else
2126 {
2127 /* This half is not readable. Because we've tried one byte, we
2128 know some part of this half if actually redable. Go to the next
2129 iteration to divide again and try to read.
2130
2131 We don't handle the other half, because this function only tries
2132 to read a single readable subrange. */
2133 current_begin = first_half_begin;
2134 current_end = first_half_end;
2135 }
2136 }
2137
2138 if (forward)
2139 {
2140 /* The [begin, current_begin) range has been read. */
2141 r.begin = begin;
2142 r.end = current_begin;
2143 r.data = buf;
2144 }
2145 else
2146 {
2147 /* The [current_end, end) range has been read. */
2148 LONGEST rlen = end - current_end;
2149
2150 r.data = xmalloc (rlen);
2151 memcpy (r.data, buf + current_end - begin, rlen);
2152 r.begin = current_end;
2153 r.end = end;
2154 xfree (buf);
2155 }
2156 VEC_safe_push(memory_read_result_s, (*result), &r);
2157 }
2158
2159 void
2160 free_memory_read_result_vector (void *x)
2161 {
2162 VEC(memory_read_result_s) *v = x;
2163 memory_read_result_s *current;
2164 int ix;
2165
2166 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2167 {
2168 xfree (current->data);
2169 }
2170 VEC_free (memory_read_result_s, v);
2171 }
2172
2173 VEC(memory_read_result_s) *
2174 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2175 {
2176 VEC(memory_read_result_s) *result = 0;
2177
2178 LONGEST xfered = 0;
2179 while (xfered < len)
2180 {
2181 struct mem_region *region = lookup_mem_region (offset + xfered);
2182 LONGEST rlen;
2183
2184 /* If there is no explicit region, a fake one should be created. */
2185 gdb_assert (region);
2186
2187 if (region->hi == 0)
2188 rlen = len - xfered;
2189 else
2190 rlen = region->hi - offset;
2191
2192 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2193 {
2194 /* Cannot read this region. Note that we can end up here only
2195 if the region is explicitly marked inaccessible, or
2196 'inaccessible-by-default' is in effect. */
2197 xfered += rlen;
2198 }
2199 else
2200 {
2201 LONGEST to_read = min (len - xfered, rlen);
2202 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2203
2204 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2205 (gdb_byte *) buffer,
2206 offset + xfered, to_read);
2207 /* Call an observer, notifying them of the xfer progress? */
2208 if (xfer <= 0)
2209 {
2210 /* Got an error reading full chunk. See if maybe we can read
2211 some subrange. */
2212 xfree (buffer);
2213 read_whatever_is_readable (ops, offset + xfered,
2214 offset + xfered + to_read, &result);
2215 xfered += to_read;
2216 }
2217 else
2218 {
2219 struct memory_read_result r;
2220 r.data = buffer;
2221 r.begin = offset + xfered;
2222 r.end = r.begin + xfer;
2223 VEC_safe_push (memory_read_result_s, result, &r);
2224 xfered += xfer;
2225 }
2226 QUIT;
2227 }
2228 }
2229 return result;
2230 }
2231
2232
2233 /* An alternative to target_write with progress callbacks. */
2234
2235 LONGEST
2236 target_write_with_progress (struct target_ops *ops,
2237 enum target_object object,
2238 const char *annex, const gdb_byte *buf,
2239 ULONGEST offset, LONGEST len,
2240 void (*progress) (ULONGEST, void *), void *baton)
2241 {
2242 LONGEST xfered = 0;
2243
2244 /* Give the progress callback a chance to set up. */
2245 if (progress)
2246 (*progress) (0, baton);
2247
2248 while (xfered < len)
2249 {
2250 LONGEST xfer = target_write_partial (ops, object, annex,
2251 (gdb_byte *) buf + xfered,
2252 offset + xfered, len - xfered);
2253
2254 if (xfer == 0)
2255 return xfered;
2256 if (xfer < 0)
2257 return -1;
2258
2259 if (progress)
2260 (*progress) (xfer, baton);
2261
2262 xfered += xfer;
2263 QUIT;
2264 }
2265 return len;
2266 }
2267
2268 /* For docs on target_write see target.h. */
2269
2270 LONGEST
2271 target_write (struct target_ops *ops,
2272 enum target_object object,
2273 const char *annex, const gdb_byte *buf,
2274 ULONGEST offset, LONGEST len)
2275 {
2276 return target_write_with_progress (ops, object, annex, buf, offset, len,
2277 NULL, NULL);
2278 }
2279
2280 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2281 the size of the transferred data. PADDING additional bytes are
2282 available in *BUF_P. This is a helper function for
2283 target_read_alloc; see the declaration of that function for more
2284 information. */
2285
2286 static LONGEST
2287 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2288 const char *annex, gdb_byte **buf_p, int padding)
2289 {
2290 size_t buf_alloc, buf_pos;
2291 gdb_byte *buf;
2292 LONGEST n;
2293
2294 /* This function does not have a length parameter; it reads the
2295 entire OBJECT). Also, it doesn't support objects fetched partly
2296 from one target and partly from another (in a different stratum,
2297 e.g. a core file and an executable). Both reasons make it
2298 unsuitable for reading memory. */
2299 gdb_assert (object != TARGET_OBJECT_MEMORY);
2300
2301 /* Start by reading up to 4K at a time. The target will throttle
2302 this number down if necessary. */
2303 buf_alloc = 4096;
2304 buf = xmalloc (buf_alloc);
2305 buf_pos = 0;
2306 while (1)
2307 {
2308 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2309 buf_pos, buf_alloc - buf_pos - padding);
2310 if (n < 0)
2311 {
2312 /* An error occurred. */
2313 xfree (buf);
2314 return -1;
2315 }
2316 else if (n == 0)
2317 {
2318 /* Read all there was. */
2319 if (buf_pos == 0)
2320 xfree (buf);
2321 else
2322 *buf_p = buf;
2323 return buf_pos;
2324 }
2325
2326 buf_pos += n;
2327
2328 /* If the buffer is filling up, expand it. */
2329 if (buf_alloc < buf_pos * 2)
2330 {
2331 buf_alloc *= 2;
2332 buf = xrealloc (buf, buf_alloc);
2333 }
2334
2335 QUIT;
2336 }
2337 }
2338
2339 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2340 the size of the transferred data. See the declaration in "target.h"
2341 function for more information about the return value. */
2342
2343 LONGEST
2344 target_read_alloc (struct target_ops *ops, enum target_object object,
2345 const char *annex, gdb_byte **buf_p)
2346 {
2347 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2348 }
2349
2350 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2351 returned as a string, allocated using xmalloc. If an error occurs
2352 or the transfer is unsupported, NULL is returned. Empty objects
2353 are returned as allocated but empty strings. A warning is issued
2354 if the result contains any embedded NUL bytes. */
2355
2356 char *
2357 target_read_stralloc (struct target_ops *ops, enum target_object object,
2358 const char *annex)
2359 {
2360 gdb_byte *buffer;
2361 LONGEST i, transferred;
2362
2363 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2364
2365 if (transferred < 0)
2366 return NULL;
2367
2368 if (transferred == 0)
2369 return xstrdup ("");
2370
2371 buffer[transferred] = 0;
2372
2373 /* Check for embedded NUL bytes; but allow trailing NULs. */
2374 for (i = strlen (buffer); i < transferred; i++)
2375 if (buffer[i] != 0)
2376 {
2377 warning (_("target object %d, annex %s, "
2378 "contained unexpected null characters"),
2379 (int) object, annex ? annex : "(none)");
2380 break;
2381 }
2382
2383 return (char *) buffer;
2384 }
2385
2386 /* Memory transfer methods. */
2387
2388 void
2389 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2390 LONGEST len)
2391 {
2392 /* This method is used to read from an alternate, non-current
2393 target. This read must bypass the overlay support (as symbols
2394 don't match this target), and GDB's internal cache (wrong cache
2395 for this target). */
2396 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2397 != len)
2398 memory_error (EIO, addr);
2399 }
2400
2401 ULONGEST
2402 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2403 int len, enum bfd_endian byte_order)
2404 {
2405 gdb_byte buf[sizeof (ULONGEST)];
2406
2407 gdb_assert (len <= sizeof (buf));
2408 get_target_memory (ops, addr, buf, len);
2409 return extract_unsigned_integer (buf, len, byte_order);
2410 }
2411
2412 int
2413 target_insert_breakpoint (struct gdbarch *gdbarch,
2414 struct bp_target_info *bp_tgt)
2415 {
2416 if (!may_insert_breakpoints)
2417 {
2418 warning (_("May not insert breakpoints"));
2419 return 1;
2420 }
2421
2422 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2423 }
2424
2425 int
2426 target_remove_breakpoint (struct gdbarch *gdbarch,
2427 struct bp_target_info *bp_tgt)
2428 {
2429 /* This is kind of a weird case to handle, but the permission might
2430 have been changed after breakpoints were inserted - in which case
2431 we should just take the user literally and assume that any
2432 breakpoints should be left in place. */
2433 if (!may_insert_breakpoints)
2434 {
2435 warning (_("May not remove breakpoints"));
2436 return 1;
2437 }
2438
2439 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2440 }
2441
2442 static void
2443 target_info (char *args, int from_tty)
2444 {
2445 struct target_ops *t;
2446 int has_all_mem = 0;
2447
2448 if (symfile_objfile != NULL)
2449 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
2450
2451 for (t = target_stack; t != NULL; t = t->beneath)
2452 {
2453 if (!(*t->to_has_memory) (t))
2454 continue;
2455
2456 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2457 continue;
2458 if (has_all_mem)
2459 printf_unfiltered (_("\tWhile running this, "
2460 "GDB does not access memory from...\n"));
2461 printf_unfiltered ("%s:\n", t->to_longname);
2462 (t->to_files_info) (t);
2463 has_all_mem = (*t->to_has_all_memory) (t);
2464 }
2465 }
2466
2467 /* This function is called before any new inferior is created, e.g.
2468 by running a program, attaching, or connecting to a target.
2469 It cleans up any state from previous invocations which might
2470 change between runs. This is a subset of what target_preopen
2471 resets (things which might change between targets). */
2472
2473 void
2474 target_pre_inferior (int from_tty)
2475 {
2476 /* Clear out solib state. Otherwise the solib state of the previous
2477 inferior might have survived and is entirely wrong for the new
2478 target. This has been observed on GNU/Linux using glibc 2.3. How
2479 to reproduce:
2480
2481 bash$ ./foo&
2482 [1] 4711
2483 bash$ ./foo&
2484 [1] 4712
2485 bash$ gdb ./foo
2486 [...]
2487 (gdb) attach 4711
2488 (gdb) detach
2489 (gdb) attach 4712
2490 Cannot access memory at address 0xdeadbeef
2491 */
2492
2493 /* In some OSs, the shared library list is the same/global/shared
2494 across inferiors. If code is shared between processes, so are
2495 memory regions and features. */
2496 if (!gdbarch_has_global_solist (target_gdbarch))
2497 {
2498 no_shared_libraries (NULL, from_tty);
2499
2500 invalidate_target_mem_regions ();
2501
2502 target_clear_description ();
2503 }
2504
2505 agent_capability_invalidate ();
2506 }
2507
2508 /* Callback for iterate_over_inferiors. Gets rid of the given
2509 inferior. */
2510
2511 static int
2512 dispose_inferior (struct inferior *inf, void *args)
2513 {
2514 struct thread_info *thread;
2515
2516 thread = any_thread_of_process (inf->pid);
2517 if (thread)
2518 {
2519 switch_to_thread (thread->ptid);
2520
2521 /* Core inferiors actually should be detached, not killed. */
2522 if (target_has_execution)
2523 target_kill ();
2524 else
2525 target_detach (NULL, 0);
2526 }
2527
2528 return 0;
2529 }
2530
2531 /* This is to be called by the open routine before it does
2532 anything. */
2533
2534 void
2535 target_preopen (int from_tty)
2536 {
2537 dont_repeat ();
2538
2539 if (have_inferiors ())
2540 {
2541 if (!from_tty
2542 || !have_live_inferiors ()
2543 || query (_("A program is being debugged already. Kill it? ")))
2544 iterate_over_inferiors (dispose_inferior, NULL);
2545 else
2546 error (_("Program not killed."));
2547 }
2548
2549 /* Calling target_kill may remove the target from the stack. But if
2550 it doesn't (which seems like a win for UDI), remove it now. */
2551 /* Leave the exec target, though. The user may be switching from a
2552 live process to a core of the same program. */
2553 pop_all_targets_above (file_stratum, 0);
2554
2555 target_pre_inferior (from_tty);
2556 }
2557
2558 /* Detach a target after doing deferred register stores. */
2559
2560 void
2561 target_detach (char *args, int from_tty)
2562 {
2563 struct target_ops* t;
2564
2565 if (gdbarch_has_global_breakpoints (target_gdbarch))
2566 /* Don't remove global breakpoints here. They're removed on
2567 disconnection from the target. */
2568 ;
2569 else
2570 /* If we're in breakpoints-always-inserted mode, have to remove
2571 them before detaching. */
2572 remove_breakpoints_pid (PIDGET (inferior_ptid));
2573
2574 prepare_for_detach ();
2575
2576 for (t = current_target.beneath; t != NULL; t = t->beneath)
2577 {
2578 if (t->to_detach != NULL)
2579 {
2580 t->to_detach (t, args, from_tty);
2581 if (targetdebug)
2582 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2583 args, from_tty);
2584 return;
2585 }
2586 }
2587
2588 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2589 }
2590
2591 void
2592 target_disconnect (char *args, int from_tty)
2593 {
2594 struct target_ops *t;
2595
2596 /* If we're in breakpoints-always-inserted mode or if breakpoints
2597 are global across processes, we have to remove them before
2598 disconnecting. */
2599 remove_breakpoints ();
2600
2601 for (t = current_target.beneath; t != NULL; t = t->beneath)
2602 if (t->to_disconnect != NULL)
2603 {
2604 if (targetdebug)
2605 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2606 args, from_tty);
2607 t->to_disconnect (t, args, from_tty);
2608 return;
2609 }
2610
2611 tcomplain ();
2612 }
2613
2614 ptid_t
2615 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2616 {
2617 struct target_ops *t;
2618
2619 for (t = current_target.beneath; t != NULL; t = t->beneath)
2620 {
2621 if (t->to_wait != NULL)
2622 {
2623 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2624
2625 if (targetdebug)
2626 {
2627 char *status_string;
2628
2629 status_string = target_waitstatus_to_string (status);
2630 fprintf_unfiltered (gdb_stdlog,
2631 "target_wait (%d, status) = %d, %s\n",
2632 PIDGET (ptid), PIDGET (retval),
2633 status_string);
2634 xfree (status_string);
2635 }
2636
2637 return retval;
2638 }
2639 }
2640
2641 noprocess ();
2642 }
2643
2644 char *
2645 target_pid_to_str (ptid_t ptid)
2646 {
2647 struct target_ops *t;
2648
2649 for (t = current_target.beneath; t != NULL; t = t->beneath)
2650 {
2651 if (t->to_pid_to_str != NULL)
2652 return (*t->to_pid_to_str) (t, ptid);
2653 }
2654
2655 return normal_pid_to_str (ptid);
2656 }
2657
2658 char *
2659 target_thread_name (struct thread_info *info)
2660 {
2661 struct target_ops *t;
2662
2663 for (t = current_target.beneath; t != NULL; t = t->beneath)
2664 {
2665 if (t->to_thread_name != NULL)
2666 return (*t->to_thread_name) (info);
2667 }
2668
2669 return NULL;
2670 }
2671
2672 void
2673 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2674 {
2675 struct target_ops *t;
2676
2677 target_dcache_invalidate ();
2678
2679 for (t = current_target.beneath; t != NULL; t = t->beneath)
2680 {
2681 if (t->to_resume != NULL)
2682 {
2683 t->to_resume (t, ptid, step, signal);
2684 if (targetdebug)
2685 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2686 PIDGET (ptid),
2687 step ? "step" : "continue",
2688 gdb_signal_to_name (signal));
2689
2690 registers_changed_ptid (ptid);
2691 set_executing (ptid, 1);
2692 set_running (ptid, 1);
2693 clear_inline_frame_state (ptid);
2694 return;
2695 }
2696 }
2697
2698 noprocess ();
2699 }
2700
2701 void
2702 target_pass_signals (int numsigs, unsigned char *pass_signals)
2703 {
2704 struct target_ops *t;
2705
2706 for (t = current_target.beneath; t != NULL; t = t->beneath)
2707 {
2708 if (t->to_pass_signals != NULL)
2709 {
2710 if (targetdebug)
2711 {
2712 int i;
2713
2714 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2715 numsigs);
2716
2717 for (i = 0; i < numsigs; i++)
2718 if (pass_signals[i])
2719 fprintf_unfiltered (gdb_stdlog, " %s",
2720 gdb_signal_to_name (i));
2721
2722 fprintf_unfiltered (gdb_stdlog, " })\n");
2723 }
2724
2725 (*t->to_pass_signals) (numsigs, pass_signals);
2726 return;
2727 }
2728 }
2729 }
2730
2731 void
2732 target_program_signals (int numsigs, unsigned char *program_signals)
2733 {
2734 struct target_ops *t;
2735
2736 for (t = current_target.beneath; t != NULL; t = t->beneath)
2737 {
2738 if (t->to_program_signals != NULL)
2739 {
2740 if (targetdebug)
2741 {
2742 int i;
2743
2744 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2745 numsigs);
2746
2747 for (i = 0; i < numsigs; i++)
2748 if (program_signals[i])
2749 fprintf_unfiltered (gdb_stdlog, " %s",
2750 gdb_signal_to_name (i));
2751
2752 fprintf_unfiltered (gdb_stdlog, " })\n");
2753 }
2754
2755 (*t->to_program_signals) (numsigs, program_signals);
2756 return;
2757 }
2758 }
2759 }
2760
2761 /* Look through the list of possible targets for a target that can
2762 follow forks. */
2763
2764 int
2765 target_follow_fork (int follow_child)
2766 {
2767 struct target_ops *t;
2768
2769 for (t = current_target.beneath; t != NULL; t = t->beneath)
2770 {
2771 if (t->to_follow_fork != NULL)
2772 {
2773 int retval = t->to_follow_fork (t, follow_child);
2774
2775 if (targetdebug)
2776 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2777 follow_child, retval);
2778 return retval;
2779 }
2780 }
2781
2782 /* Some target returned a fork event, but did not know how to follow it. */
2783 internal_error (__FILE__, __LINE__,
2784 _("could not find a target to follow fork"));
2785 }
2786
2787 void
2788 target_mourn_inferior (void)
2789 {
2790 struct target_ops *t;
2791
2792 for (t = current_target.beneath; t != NULL; t = t->beneath)
2793 {
2794 if (t->to_mourn_inferior != NULL)
2795 {
2796 t->to_mourn_inferior (t);
2797 if (targetdebug)
2798 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2799
2800 /* We no longer need to keep handles on any of the object files.
2801 Make sure to release them to avoid unnecessarily locking any
2802 of them while we're not actually debugging. */
2803 bfd_cache_close_all ();
2804
2805 return;
2806 }
2807 }
2808
2809 internal_error (__FILE__, __LINE__,
2810 _("could not find a target to follow mourn inferior"));
2811 }
2812
2813 /* Look for a target which can describe architectural features, starting
2814 from TARGET. If we find one, return its description. */
2815
2816 const struct target_desc *
2817 target_read_description (struct target_ops *target)
2818 {
2819 struct target_ops *t;
2820
2821 for (t = target; t != NULL; t = t->beneath)
2822 if (t->to_read_description != NULL)
2823 {
2824 const struct target_desc *tdesc;
2825
2826 tdesc = t->to_read_description (t);
2827 if (tdesc)
2828 return tdesc;
2829 }
2830
2831 return NULL;
2832 }
2833
2834 /* The default implementation of to_search_memory.
2835 This implements a basic search of memory, reading target memory and
2836 performing the search here (as opposed to performing the search in on the
2837 target side with, for example, gdbserver). */
2838
2839 int
2840 simple_search_memory (struct target_ops *ops,
2841 CORE_ADDR start_addr, ULONGEST search_space_len,
2842 const gdb_byte *pattern, ULONGEST pattern_len,
2843 CORE_ADDR *found_addrp)
2844 {
2845 /* NOTE: also defined in find.c testcase. */
2846 #define SEARCH_CHUNK_SIZE 16000
2847 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2848 /* Buffer to hold memory contents for searching. */
2849 gdb_byte *search_buf;
2850 unsigned search_buf_size;
2851 struct cleanup *old_cleanups;
2852
2853 search_buf_size = chunk_size + pattern_len - 1;
2854
2855 /* No point in trying to allocate a buffer larger than the search space. */
2856 if (search_space_len < search_buf_size)
2857 search_buf_size = search_space_len;
2858
2859 search_buf = malloc (search_buf_size);
2860 if (search_buf == NULL)
2861 error (_("Unable to allocate memory to perform the search."));
2862 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2863
2864 /* Prime the search buffer. */
2865
2866 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2867 search_buf, start_addr, search_buf_size) != search_buf_size)
2868 {
2869 warning (_("Unable to access target memory at %s, halting search."),
2870 hex_string (start_addr));
2871 do_cleanups (old_cleanups);
2872 return -1;
2873 }
2874
2875 /* Perform the search.
2876
2877 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2878 When we've scanned N bytes we copy the trailing bytes to the start and
2879 read in another N bytes. */
2880
2881 while (search_space_len >= pattern_len)
2882 {
2883 gdb_byte *found_ptr;
2884 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2885
2886 found_ptr = memmem (search_buf, nr_search_bytes,
2887 pattern, pattern_len);
2888
2889 if (found_ptr != NULL)
2890 {
2891 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2892
2893 *found_addrp = found_addr;
2894 do_cleanups (old_cleanups);
2895 return 1;
2896 }
2897
2898 /* Not found in this chunk, skip to next chunk. */
2899
2900 /* Don't let search_space_len wrap here, it's unsigned. */
2901 if (search_space_len >= chunk_size)
2902 search_space_len -= chunk_size;
2903 else
2904 search_space_len = 0;
2905
2906 if (search_space_len >= pattern_len)
2907 {
2908 unsigned keep_len = search_buf_size - chunk_size;
2909 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
2910 int nr_to_read;
2911
2912 /* Copy the trailing part of the previous iteration to the front
2913 of the buffer for the next iteration. */
2914 gdb_assert (keep_len == pattern_len - 1);
2915 memcpy (search_buf, search_buf + chunk_size, keep_len);
2916
2917 nr_to_read = min (search_space_len - keep_len, chunk_size);
2918
2919 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2920 search_buf + keep_len, read_addr,
2921 nr_to_read) != nr_to_read)
2922 {
2923 warning (_("Unable to access target "
2924 "memory at %s, halting search."),
2925 hex_string (read_addr));
2926 do_cleanups (old_cleanups);
2927 return -1;
2928 }
2929
2930 start_addr += chunk_size;
2931 }
2932 }
2933
2934 /* Not found. */
2935
2936 do_cleanups (old_cleanups);
2937 return 0;
2938 }
2939
2940 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2941 sequence of bytes in PATTERN with length PATTERN_LEN.
2942
2943 The result is 1 if found, 0 if not found, and -1 if there was an error
2944 requiring halting of the search (e.g. memory read error).
2945 If the pattern is found the address is recorded in FOUND_ADDRP. */
2946
2947 int
2948 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2949 const gdb_byte *pattern, ULONGEST pattern_len,
2950 CORE_ADDR *found_addrp)
2951 {
2952 struct target_ops *t;
2953 int found;
2954
2955 /* We don't use INHERIT to set current_target.to_search_memory,
2956 so we have to scan the target stack and handle targetdebug
2957 ourselves. */
2958
2959 if (targetdebug)
2960 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2961 hex_string (start_addr));
2962
2963 for (t = current_target.beneath; t != NULL; t = t->beneath)
2964 if (t->to_search_memory != NULL)
2965 break;
2966
2967 if (t != NULL)
2968 {
2969 found = t->to_search_memory (t, start_addr, search_space_len,
2970 pattern, pattern_len, found_addrp);
2971 }
2972 else
2973 {
2974 /* If a special version of to_search_memory isn't available, use the
2975 simple version. */
2976 found = simple_search_memory (current_target.beneath,
2977 start_addr, search_space_len,
2978 pattern, pattern_len, found_addrp);
2979 }
2980
2981 if (targetdebug)
2982 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
2983
2984 return found;
2985 }
2986
2987 /* Look through the currently pushed targets. If none of them will
2988 be able to restart the currently running process, issue an error
2989 message. */
2990
2991 void
2992 target_require_runnable (void)
2993 {
2994 struct target_ops *t;
2995
2996 for (t = target_stack; t != NULL; t = t->beneath)
2997 {
2998 /* If this target knows how to create a new program, then
2999 assume we will still be able to after killing the current
3000 one. Either killing and mourning will not pop T, or else
3001 find_default_run_target will find it again. */
3002 if (t->to_create_inferior != NULL)
3003 return;
3004
3005 /* Do not worry about thread_stratum targets that can not
3006 create inferiors. Assume they will be pushed again if
3007 necessary, and continue to the process_stratum. */
3008 if (t->to_stratum == thread_stratum
3009 || t->to_stratum == arch_stratum)
3010 continue;
3011
3012 error (_("The \"%s\" target does not support \"run\". "
3013 "Try \"help target\" or \"continue\"."),
3014 t->to_shortname);
3015 }
3016
3017 /* This function is only called if the target is running. In that
3018 case there should have been a process_stratum target and it
3019 should either know how to create inferiors, or not... */
3020 internal_error (__FILE__, __LINE__, _("No targets found"));
3021 }
3022
3023 /* Look through the list of possible targets for a target that can
3024 execute a run or attach command without any other data. This is
3025 used to locate the default process stratum.
3026
3027 If DO_MESG is not NULL, the result is always valid (error() is
3028 called for errors); else, return NULL on error. */
3029
3030 static struct target_ops *
3031 find_default_run_target (char *do_mesg)
3032 {
3033 struct target_ops **t;
3034 struct target_ops *runable = NULL;
3035 int count;
3036
3037 count = 0;
3038
3039 for (t = target_structs; t < target_structs + target_struct_size;
3040 ++t)
3041 {
3042 if ((*t)->to_can_run && target_can_run (*t))
3043 {
3044 runable = *t;
3045 ++count;
3046 }
3047 }
3048
3049 if (count != 1)
3050 {
3051 if (do_mesg)
3052 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3053 else
3054 return NULL;
3055 }
3056
3057 return runable;
3058 }
3059
3060 void
3061 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3062 {
3063 struct target_ops *t;
3064
3065 t = find_default_run_target ("attach");
3066 (t->to_attach) (t, args, from_tty);
3067 return;
3068 }
3069
3070 void
3071 find_default_create_inferior (struct target_ops *ops,
3072 char *exec_file, char *allargs, char **env,
3073 int from_tty)
3074 {
3075 struct target_ops *t;
3076
3077 t = find_default_run_target ("run");
3078 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3079 return;
3080 }
3081
3082 static int
3083 find_default_can_async_p (void)
3084 {
3085 struct target_ops *t;
3086
3087 /* This may be called before the target is pushed on the stack;
3088 look for the default process stratum. If there's none, gdb isn't
3089 configured with a native debugger, and target remote isn't
3090 connected yet. */
3091 t = find_default_run_target (NULL);
3092 if (t && t->to_can_async_p)
3093 return (t->to_can_async_p) ();
3094 return 0;
3095 }
3096
3097 static int
3098 find_default_is_async_p (void)
3099 {
3100 struct target_ops *t;
3101
3102 /* This may be called before the target is pushed on the stack;
3103 look for the default process stratum. If there's none, gdb isn't
3104 configured with a native debugger, and target remote isn't
3105 connected yet. */
3106 t = find_default_run_target (NULL);
3107 if (t && t->to_is_async_p)
3108 return (t->to_is_async_p) ();
3109 return 0;
3110 }
3111
3112 static int
3113 find_default_supports_non_stop (void)
3114 {
3115 struct target_ops *t;
3116
3117 t = find_default_run_target (NULL);
3118 if (t && t->to_supports_non_stop)
3119 return (t->to_supports_non_stop) ();
3120 return 0;
3121 }
3122
3123 int
3124 target_supports_non_stop (void)
3125 {
3126 struct target_ops *t;
3127
3128 for (t = &current_target; t != NULL; t = t->beneath)
3129 if (t->to_supports_non_stop)
3130 return t->to_supports_non_stop ();
3131
3132 return 0;
3133 }
3134
3135 /* Implement the "info proc" command. */
3136
3137 void
3138 target_info_proc (char *args, enum info_proc_what what)
3139 {
3140 struct target_ops *t;
3141
3142 /* If we're already connected to something that can get us OS
3143 related data, use it. Otherwise, try using the native
3144 target. */
3145 if (current_target.to_stratum >= process_stratum)
3146 t = current_target.beneath;
3147 else
3148 t = find_default_run_target (NULL);
3149
3150 for (; t != NULL; t = t->beneath)
3151 {
3152 if (t->to_info_proc != NULL)
3153 {
3154 t->to_info_proc (t, args, what);
3155
3156 if (targetdebug)
3157 fprintf_unfiltered (gdb_stdlog,
3158 "target_info_proc (\"%s\", %d)\n", args, what);
3159
3160 return;
3161 }
3162 }
3163
3164 error (_("Not supported on this target."));
3165 }
3166
3167 static int
3168 find_default_supports_disable_randomization (void)
3169 {
3170 struct target_ops *t;
3171
3172 t = find_default_run_target (NULL);
3173 if (t && t->to_supports_disable_randomization)
3174 return (t->to_supports_disable_randomization) ();
3175 return 0;
3176 }
3177
3178 int
3179 target_supports_disable_randomization (void)
3180 {
3181 struct target_ops *t;
3182
3183 for (t = &current_target; t != NULL; t = t->beneath)
3184 if (t->to_supports_disable_randomization)
3185 return t->to_supports_disable_randomization ();
3186
3187 return 0;
3188 }
3189
3190 char *
3191 target_get_osdata (const char *type)
3192 {
3193 struct target_ops *t;
3194
3195 /* If we're already connected to something that can get us OS
3196 related data, use it. Otherwise, try using the native
3197 target. */
3198 if (current_target.to_stratum >= process_stratum)
3199 t = current_target.beneath;
3200 else
3201 t = find_default_run_target ("get OS data");
3202
3203 if (!t)
3204 return NULL;
3205
3206 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3207 }
3208
3209 /* Determine the current address space of thread PTID. */
3210
3211 struct address_space *
3212 target_thread_address_space (ptid_t ptid)
3213 {
3214 struct address_space *aspace;
3215 struct inferior *inf;
3216 struct target_ops *t;
3217
3218 for (t = current_target.beneath; t != NULL; t = t->beneath)
3219 {
3220 if (t->to_thread_address_space != NULL)
3221 {
3222 aspace = t->to_thread_address_space (t, ptid);
3223 gdb_assert (aspace);
3224
3225 if (targetdebug)
3226 fprintf_unfiltered (gdb_stdlog,
3227 "target_thread_address_space (%s) = %d\n",
3228 target_pid_to_str (ptid),
3229 address_space_num (aspace));
3230 return aspace;
3231 }
3232 }
3233
3234 /* Fall-back to the "main" address space of the inferior. */
3235 inf = find_inferior_pid (ptid_get_pid (ptid));
3236
3237 if (inf == NULL || inf->aspace == NULL)
3238 internal_error (__FILE__, __LINE__,
3239 _("Can't determine the current "
3240 "address space of thread %s\n"),
3241 target_pid_to_str (ptid));
3242
3243 return inf->aspace;
3244 }
3245
3246
3247 /* Target file operations. */
3248
3249 static struct target_ops *
3250 default_fileio_target (void)
3251 {
3252 /* If we're already connected to something that can perform
3253 file I/O, use it. Otherwise, try using the native target. */
3254 if (current_target.to_stratum >= process_stratum)
3255 return current_target.beneath;
3256 else
3257 return find_default_run_target ("file I/O");
3258 }
3259
3260 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3261 target file descriptor, or -1 if an error occurs (and set
3262 *TARGET_ERRNO). */
3263 int
3264 target_fileio_open (const char *filename, int flags, int mode,
3265 int *target_errno)
3266 {
3267 struct target_ops *t;
3268
3269 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3270 {
3271 if (t->to_fileio_open != NULL)
3272 {
3273 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3274
3275 if (targetdebug)
3276 fprintf_unfiltered (gdb_stdlog,
3277 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3278 filename, flags, mode,
3279 fd, fd != -1 ? 0 : *target_errno);
3280 return fd;
3281 }
3282 }
3283
3284 *target_errno = FILEIO_ENOSYS;
3285 return -1;
3286 }
3287
3288 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3289 Return the number of bytes written, or -1 if an error occurs
3290 (and set *TARGET_ERRNO). */
3291 int
3292 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3293 ULONGEST offset, int *target_errno)
3294 {
3295 struct target_ops *t;
3296
3297 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3298 {
3299 if (t->to_fileio_pwrite != NULL)
3300 {
3301 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3302 target_errno);
3303
3304 if (targetdebug)
3305 fprintf_unfiltered (gdb_stdlog,
3306 "target_fileio_pwrite (%d,...,%d,%s) "
3307 "= %d (%d)\n",
3308 fd, len, pulongest (offset),
3309 ret, ret != -1 ? 0 : *target_errno);
3310 return ret;
3311 }
3312 }
3313
3314 *target_errno = FILEIO_ENOSYS;
3315 return -1;
3316 }
3317
3318 /* Read up to LEN bytes FD on the target into READ_BUF.
3319 Return the number of bytes read, or -1 if an error occurs
3320 (and set *TARGET_ERRNO). */
3321 int
3322 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3323 ULONGEST offset, int *target_errno)
3324 {
3325 struct target_ops *t;
3326
3327 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3328 {
3329 if (t->to_fileio_pread != NULL)
3330 {
3331 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3332 target_errno);
3333
3334 if (targetdebug)
3335 fprintf_unfiltered (gdb_stdlog,
3336 "target_fileio_pread (%d,...,%d,%s) "
3337 "= %d (%d)\n",
3338 fd, len, pulongest (offset),
3339 ret, ret != -1 ? 0 : *target_errno);
3340 return ret;
3341 }
3342 }
3343
3344 *target_errno = FILEIO_ENOSYS;
3345 return -1;
3346 }
3347
3348 /* Close FD on the target. Return 0, or -1 if an error occurs
3349 (and set *TARGET_ERRNO). */
3350 int
3351 target_fileio_close (int fd, int *target_errno)
3352 {
3353 struct target_ops *t;
3354
3355 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3356 {
3357 if (t->to_fileio_close != NULL)
3358 {
3359 int ret = t->to_fileio_close (fd, target_errno);
3360
3361 if (targetdebug)
3362 fprintf_unfiltered (gdb_stdlog,
3363 "target_fileio_close (%d) = %d (%d)\n",
3364 fd, ret, ret != -1 ? 0 : *target_errno);
3365 return ret;
3366 }
3367 }
3368
3369 *target_errno = FILEIO_ENOSYS;
3370 return -1;
3371 }
3372
3373 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3374 occurs (and set *TARGET_ERRNO). */
3375 int
3376 target_fileio_unlink (const char *filename, int *target_errno)
3377 {
3378 struct target_ops *t;
3379
3380 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3381 {
3382 if (t->to_fileio_unlink != NULL)
3383 {
3384 int ret = t->to_fileio_unlink (filename, target_errno);
3385
3386 if (targetdebug)
3387 fprintf_unfiltered (gdb_stdlog,
3388 "target_fileio_unlink (%s) = %d (%d)\n",
3389 filename, ret, ret != -1 ? 0 : *target_errno);
3390 return ret;
3391 }
3392 }
3393
3394 *target_errno = FILEIO_ENOSYS;
3395 return -1;
3396 }
3397
3398 /* Read value of symbolic link FILENAME on the target. Return a
3399 null-terminated string allocated via xmalloc, or NULL if an error
3400 occurs (and set *TARGET_ERRNO). */
3401 char *
3402 target_fileio_readlink (const char *filename, int *target_errno)
3403 {
3404 struct target_ops *t;
3405
3406 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3407 {
3408 if (t->to_fileio_readlink != NULL)
3409 {
3410 char *ret = t->to_fileio_readlink (filename, target_errno);
3411
3412 if (targetdebug)
3413 fprintf_unfiltered (gdb_stdlog,
3414 "target_fileio_readlink (%s) = %s (%d)\n",
3415 filename, ret? ret : "(nil)",
3416 ret? 0 : *target_errno);
3417 return ret;
3418 }
3419 }
3420
3421 *target_errno = FILEIO_ENOSYS;
3422 return NULL;
3423 }
3424
3425 static void
3426 target_fileio_close_cleanup (void *opaque)
3427 {
3428 int fd = *(int *) opaque;
3429 int target_errno;
3430
3431 target_fileio_close (fd, &target_errno);
3432 }
3433
3434 /* Read target file FILENAME. Store the result in *BUF_P and
3435 return the size of the transferred data. PADDING additional bytes are
3436 available in *BUF_P. This is a helper function for
3437 target_fileio_read_alloc; see the declaration of that function for more
3438 information. */
3439
3440 static LONGEST
3441 target_fileio_read_alloc_1 (const char *filename,
3442 gdb_byte **buf_p, int padding)
3443 {
3444 struct cleanup *close_cleanup;
3445 size_t buf_alloc, buf_pos;
3446 gdb_byte *buf;
3447 LONGEST n;
3448 int fd;
3449 int target_errno;
3450
3451 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3452 if (fd == -1)
3453 return -1;
3454
3455 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3456
3457 /* Start by reading up to 4K at a time. The target will throttle
3458 this number down if necessary. */
3459 buf_alloc = 4096;
3460 buf = xmalloc (buf_alloc);
3461 buf_pos = 0;
3462 while (1)
3463 {
3464 n = target_fileio_pread (fd, &buf[buf_pos],
3465 buf_alloc - buf_pos - padding, buf_pos,
3466 &target_errno);
3467 if (n < 0)
3468 {
3469 /* An error occurred. */
3470 do_cleanups (close_cleanup);
3471 xfree (buf);
3472 return -1;
3473 }
3474 else if (n == 0)
3475 {
3476 /* Read all there was. */
3477 do_cleanups (close_cleanup);
3478 if (buf_pos == 0)
3479 xfree (buf);
3480 else
3481 *buf_p = buf;
3482 return buf_pos;
3483 }
3484
3485 buf_pos += n;
3486
3487 /* If the buffer is filling up, expand it. */
3488 if (buf_alloc < buf_pos * 2)
3489 {
3490 buf_alloc *= 2;
3491 buf = xrealloc (buf, buf_alloc);
3492 }
3493
3494 QUIT;
3495 }
3496 }
3497
3498 /* Read target file FILENAME. Store the result in *BUF_P and return
3499 the size of the transferred data. See the declaration in "target.h"
3500 function for more information about the return value. */
3501
3502 LONGEST
3503 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3504 {
3505 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3506 }
3507
3508 /* Read target file FILENAME. The result is NUL-terminated and
3509 returned as a string, allocated using xmalloc. If an error occurs
3510 or the transfer is unsupported, NULL is returned. Empty objects
3511 are returned as allocated but empty strings. A warning is issued
3512 if the result contains any embedded NUL bytes. */
3513
3514 char *
3515 target_fileio_read_stralloc (const char *filename)
3516 {
3517 gdb_byte *buffer;
3518 LONGEST i, transferred;
3519
3520 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3521
3522 if (transferred < 0)
3523 return NULL;
3524
3525 if (transferred == 0)
3526 return xstrdup ("");
3527
3528 buffer[transferred] = 0;
3529
3530 /* Check for embedded NUL bytes; but allow trailing NULs. */
3531 for (i = strlen (buffer); i < transferred; i++)
3532 if (buffer[i] != 0)
3533 {
3534 warning (_("target file %s "
3535 "contained unexpected null characters"),
3536 filename);
3537 break;
3538 }
3539
3540 return (char *) buffer;
3541 }
3542
3543
3544 static int
3545 default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3546 {
3547 return (len <= gdbarch_ptr_bit (target_gdbarch) / TARGET_CHAR_BIT);
3548 }
3549
3550 static int
3551 default_watchpoint_addr_within_range (struct target_ops *target,
3552 CORE_ADDR addr,
3553 CORE_ADDR start, int length)
3554 {
3555 return addr >= start && addr < start + length;
3556 }
3557
3558 static struct gdbarch *
3559 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3560 {
3561 return target_gdbarch;
3562 }
3563
3564 static int
3565 return_zero (void)
3566 {
3567 return 0;
3568 }
3569
3570 static int
3571 return_one (void)
3572 {
3573 return 1;
3574 }
3575
3576 static int
3577 return_minus_one (void)
3578 {
3579 return -1;
3580 }
3581
3582 /* Find a single runnable target in the stack and return it. If for
3583 some reason there is more than one, return NULL. */
3584
3585 struct target_ops *
3586 find_run_target (void)
3587 {
3588 struct target_ops **t;
3589 struct target_ops *runable = NULL;
3590 int count;
3591
3592 count = 0;
3593
3594 for (t = target_structs; t < target_structs + target_struct_size; ++t)
3595 {
3596 if ((*t)->to_can_run && target_can_run (*t))
3597 {
3598 runable = *t;
3599 ++count;
3600 }
3601 }
3602
3603 return (count == 1 ? runable : NULL);
3604 }
3605
3606 /*
3607 * Find the next target down the stack from the specified target.
3608 */
3609
3610 struct target_ops *
3611 find_target_beneath (struct target_ops *t)
3612 {
3613 return t->beneath;
3614 }
3615
3616 \f
3617 /* The inferior process has died. Long live the inferior! */
3618
3619 void
3620 generic_mourn_inferior (void)
3621 {
3622 ptid_t ptid;
3623
3624 ptid = inferior_ptid;
3625 inferior_ptid = null_ptid;
3626
3627 /* Mark breakpoints uninserted in case something tries to delete a
3628 breakpoint while we delete the inferior's threads (which would
3629 fail, since the inferior is long gone). */
3630 mark_breakpoints_out ();
3631
3632 if (!ptid_equal (ptid, null_ptid))
3633 {
3634 int pid = ptid_get_pid (ptid);
3635 exit_inferior (pid);
3636 }
3637
3638 /* Note this wipes step-resume breakpoints, so needs to be done
3639 after exit_inferior, which ends up referencing the step-resume
3640 breakpoints through clear_thread_inferior_resources. */
3641 breakpoint_init_inferior (inf_exited);
3642
3643 registers_changed ();
3644
3645 reopen_exec_file ();
3646 reinit_frame_cache ();
3647
3648 if (deprecated_detach_hook)
3649 deprecated_detach_hook ();
3650 }
3651 \f
3652 /* Convert a normal process ID to a string. Returns the string in a
3653 static buffer. */
3654
3655 char *
3656 normal_pid_to_str (ptid_t ptid)
3657 {
3658 static char buf[32];
3659
3660 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3661 return buf;
3662 }
3663
3664 static char *
3665 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3666 {
3667 return normal_pid_to_str (ptid);
3668 }
3669
3670 /* Error-catcher for target_find_memory_regions. */
3671 static int
3672 dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
3673 {
3674 error (_("Command not implemented for this target."));
3675 return 0;
3676 }
3677
3678 /* Error-catcher for target_make_corefile_notes. */
3679 static char *
3680 dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3681 {
3682 error (_("Command not implemented for this target."));
3683 return NULL;
3684 }
3685
3686 /* Error-catcher for target_get_bookmark. */
3687 static gdb_byte *
3688 dummy_get_bookmark (char *ignore1, int ignore2)
3689 {
3690 tcomplain ();
3691 return NULL;
3692 }
3693
3694 /* Error-catcher for target_goto_bookmark. */
3695 static void
3696 dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3697 {
3698 tcomplain ();
3699 }
3700
3701 /* Set up the handful of non-empty slots needed by the dummy target
3702 vector. */
3703
3704 static void
3705 init_dummy_target (void)
3706 {
3707 dummy_target.to_shortname = "None";
3708 dummy_target.to_longname = "None";
3709 dummy_target.to_doc = "";
3710 dummy_target.to_attach = find_default_attach;
3711 dummy_target.to_detach =
3712 (void (*)(struct target_ops *, char *, int))target_ignore;
3713 dummy_target.to_create_inferior = find_default_create_inferior;
3714 dummy_target.to_can_async_p = find_default_can_async_p;
3715 dummy_target.to_is_async_p = find_default_is_async_p;
3716 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3717 dummy_target.to_supports_disable_randomization
3718 = find_default_supports_disable_randomization;
3719 dummy_target.to_pid_to_str = dummy_pid_to_str;
3720 dummy_target.to_stratum = dummy_stratum;
3721 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3722 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3723 dummy_target.to_get_bookmark = dummy_get_bookmark;
3724 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3725 dummy_target.to_xfer_partial = default_xfer_partial;
3726 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3727 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3728 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3729 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3730 dummy_target.to_has_execution
3731 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3732 dummy_target.to_stopped_by_watchpoint = return_zero;
3733 dummy_target.to_stopped_data_address =
3734 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
3735 dummy_target.to_magic = OPS_MAGIC;
3736 }
3737 \f
3738 static void
3739 debug_to_open (char *args, int from_tty)
3740 {
3741 debug_target.to_open (args, from_tty);
3742
3743 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3744 }
3745
3746 void
3747 target_close (struct target_ops *targ, int quitting)
3748 {
3749 if (targ->to_xclose != NULL)
3750 targ->to_xclose (targ, quitting);
3751 else if (targ->to_close != NULL)
3752 targ->to_close (quitting);
3753
3754 if (targetdebug)
3755 fprintf_unfiltered (gdb_stdlog, "target_close (%d)\n", quitting);
3756 }
3757
3758 void
3759 target_attach (char *args, int from_tty)
3760 {
3761 struct target_ops *t;
3762
3763 for (t = current_target.beneath; t != NULL; t = t->beneath)
3764 {
3765 if (t->to_attach != NULL)
3766 {
3767 t->to_attach (t, args, from_tty);
3768 if (targetdebug)
3769 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3770 args, from_tty);
3771 return;
3772 }
3773 }
3774
3775 internal_error (__FILE__, __LINE__,
3776 _("could not find a target to attach"));
3777 }
3778
3779 int
3780 target_thread_alive (ptid_t ptid)
3781 {
3782 struct target_ops *t;
3783
3784 for (t = current_target.beneath; t != NULL; t = t->beneath)
3785 {
3786 if (t->to_thread_alive != NULL)
3787 {
3788 int retval;
3789
3790 retval = t->to_thread_alive (t, ptid);
3791 if (targetdebug)
3792 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3793 PIDGET (ptid), retval);
3794
3795 return retval;
3796 }
3797 }
3798
3799 return 0;
3800 }
3801
3802 void
3803 target_find_new_threads (void)
3804 {
3805 struct target_ops *t;
3806
3807 for (t = current_target.beneath; t != NULL; t = t->beneath)
3808 {
3809 if (t->to_find_new_threads != NULL)
3810 {
3811 t->to_find_new_threads (t);
3812 if (targetdebug)
3813 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3814
3815 return;
3816 }
3817 }
3818 }
3819
3820 void
3821 target_stop (ptid_t ptid)
3822 {
3823 if (!may_stop)
3824 {
3825 warning (_("May not interrupt or stop the target, ignoring attempt"));
3826 return;
3827 }
3828
3829 (*current_target.to_stop) (ptid);
3830 }
3831
3832 static void
3833 debug_to_post_attach (int pid)
3834 {
3835 debug_target.to_post_attach (pid);
3836
3837 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3838 }
3839
3840 /* Return a pretty printed form of target_waitstatus.
3841 Space for the result is malloc'd, caller must free. */
3842
3843 char *
3844 target_waitstatus_to_string (const struct target_waitstatus *ws)
3845 {
3846 const char *kind_str = "status->kind = ";
3847
3848 switch (ws->kind)
3849 {
3850 case TARGET_WAITKIND_EXITED:
3851 return xstrprintf ("%sexited, status = %d",
3852 kind_str, ws->value.integer);
3853 case TARGET_WAITKIND_STOPPED:
3854 return xstrprintf ("%sstopped, signal = %s",
3855 kind_str, gdb_signal_to_name (ws->value.sig));
3856 case TARGET_WAITKIND_SIGNALLED:
3857 return xstrprintf ("%ssignalled, signal = %s",
3858 kind_str, gdb_signal_to_name (ws->value.sig));
3859 case TARGET_WAITKIND_LOADED:
3860 return xstrprintf ("%sloaded", kind_str);
3861 case TARGET_WAITKIND_FORKED:
3862 return xstrprintf ("%sforked", kind_str);
3863 case TARGET_WAITKIND_VFORKED:
3864 return xstrprintf ("%svforked", kind_str);
3865 case TARGET_WAITKIND_EXECD:
3866 return xstrprintf ("%sexecd", kind_str);
3867 case TARGET_WAITKIND_SYSCALL_ENTRY:
3868 return xstrprintf ("%sentered syscall", kind_str);
3869 case TARGET_WAITKIND_SYSCALL_RETURN:
3870 return xstrprintf ("%sexited syscall", kind_str);
3871 case TARGET_WAITKIND_SPURIOUS:
3872 return xstrprintf ("%sspurious", kind_str);
3873 case TARGET_WAITKIND_IGNORE:
3874 return xstrprintf ("%signore", kind_str);
3875 case TARGET_WAITKIND_NO_HISTORY:
3876 return xstrprintf ("%sno-history", kind_str);
3877 case TARGET_WAITKIND_NO_RESUMED:
3878 return xstrprintf ("%sno-resumed", kind_str);
3879 default:
3880 return xstrprintf ("%sunknown???", kind_str);
3881 }
3882 }
3883
3884 static void
3885 debug_print_register (const char * func,
3886 struct regcache *regcache, int regno)
3887 {
3888 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3889
3890 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3891 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3892 && gdbarch_register_name (gdbarch, regno) != NULL
3893 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3894 fprintf_unfiltered (gdb_stdlog, "(%s)",
3895 gdbarch_register_name (gdbarch, regno));
3896 else
3897 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
3898 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
3899 {
3900 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
3901 int i, size = register_size (gdbarch, regno);
3902 unsigned char buf[MAX_REGISTER_SIZE];
3903
3904 regcache_raw_collect (regcache, regno, buf);
3905 fprintf_unfiltered (gdb_stdlog, " = ");
3906 for (i = 0; i < size; i++)
3907 {
3908 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
3909 }
3910 if (size <= sizeof (LONGEST))
3911 {
3912 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
3913
3914 fprintf_unfiltered (gdb_stdlog, " %s %s",
3915 core_addr_to_string_nz (val), plongest (val));
3916 }
3917 }
3918 fprintf_unfiltered (gdb_stdlog, "\n");
3919 }
3920
3921 void
3922 target_fetch_registers (struct regcache *regcache, int regno)
3923 {
3924 struct target_ops *t;
3925
3926 for (t = current_target.beneath; t != NULL; t = t->beneath)
3927 {
3928 if (t->to_fetch_registers != NULL)
3929 {
3930 t->to_fetch_registers (t, regcache, regno);
3931 if (targetdebug)
3932 debug_print_register ("target_fetch_registers", regcache, regno);
3933 return;
3934 }
3935 }
3936 }
3937
3938 void
3939 target_store_registers (struct regcache *regcache, int regno)
3940 {
3941 struct target_ops *t;
3942
3943 if (!may_write_registers)
3944 error (_("Writing to registers is not allowed (regno %d)"), regno);
3945
3946 for (t = current_target.beneath; t != NULL; t = t->beneath)
3947 {
3948 if (t->to_store_registers != NULL)
3949 {
3950 t->to_store_registers (t, regcache, regno);
3951 if (targetdebug)
3952 {
3953 debug_print_register ("target_store_registers", regcache, regno);
3954 }
3955 return;
3956 }
3957 }
3958
3959 noprocess ();
3960 }
3961
3962 int
3963 target_core_of_thread (ptid_t ptid)
3964 {
3965 struct target_ops *t;
3966
3967 for (t = current_target.beneath; t != NULL; t = t->beneath)
3968 {
3969 if (t->to_core_of_thread != NULL)
3970 {
3971 int retval = t->to_core_of_thread (t, ptid);
3972
3973 if (targetdebug)
3974 fprintf_unfiltered (gdb_stdlog,
3975 "target_core_of_thread (%d) = %d\n",
3976 PIDGET (ptid), retval);
3977 return retval;
3978 }
3979 }
3980
3981 return -1;
3982 }
3983
3984 int
3985 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
3986 {
3987 struct target_ops *t;
3988
3989 for (t = current_target.beneath; t != NULL; t = t->beneath)
3990 {
3991 if (t->to_verify_memory != NULL)
3992 {
3993 int retval = t->to_verify_memory (t, data, memaddr, size);
3994
3995 if (targetdebug)
3996 fprintf_unfiltered (gdb_stdlog,
3997 "target_verify_memory (%s, %s) = %d\n",
3998 paddress (target_gdbarch, memaddr),
3999 pulongest (size),
4000 retval);
4001 return retval;
4002 }
4003 }
4004
4005 tcomplain ();
4006 }
4007
4008 /* The documentation for this function is in its prototype declaration in
4009 target.h. */
4010
4011 int
4012 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4013 {
4014 struct target_ops *t;
4015
4016 for (t = current_target.beneath; t != NULL; t = t->beneath)
4017 if (t->to_insert_mask_watchpoint != NULL)
4018 {
4019 int ret;
4020
4021 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4022
4023 if (targetdebug)
4024 fprintf_unfiltered (gdb_stdlog, "\
4025 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4026 core_addr_to_string (addr),
4027 core_addr_to_string (mask), rw, ret);
4028
4029 return ret;
4030 }
4031
4032 return 1;
4033 }
4034
4035 /* The documentation for this function is in its prototype declaration in
4036 target.h. */
4037
4038 int
4039 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4040 {
4041 struct target_ops *t;
4042
4043 for (t = current_target.beneath; t != NULL; t = t->beneath)
4044 if (t->to_remove_mask_watchpoint != NULL)
4045 {
4046 int ret;
4047
4048 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4049
4050 if (targetdebug)
4051 fprintf_unfiltered (gdb_stdlog, "\
4052 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4053 core_addr_to_string (addr),
4054 core_addr_to_string (mask), rw, ret);
4055
4056 return ret;
4057 }
4058
4059 return 1;
4060 }
4061
4062 /* The documentation for this function is in its prototype declaration
4063 in target.h. */
4064
4065 int
4066 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4067 {
4068 struct target_ops *t;
4069
4070 for (t = current_target.beneath; t != NULL; t = t->beneath)
4071 if (t->to_masked_watch_num_registers != NULL)
4072 return t->to_masked_watch_num_registers (t, addr, mask);
4073
4074 return -1;
4075 }
4076
4077 /* The documentation for this function is in its prototype declaration
4078 in target.h. */
4079
4080 int
4081 target_ranged_break_num_registers (void)
4082 {
4083 struct target_ops *t;
4084
4085 for (t = current_target.beneath; t != NULL; t = t->beneath)
4086 if (t->to_ranged_break_num_registers != NULL)
4087 return t->to_ranged_break_num_registers (t);
4088
4089 return -1;
4090 }
4091
4092 static void
4093 debug_to_prepare_to_store (struct regcache *regcache)
4094 {
4095 debug_target.to_prepare_to_store (regcache);
4096
4097 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4098 }
4099
4100 static int
4101 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4102 int write, struct mem_attrib *attrib,
4103 struct target_ops *target)
4104 {
4105 int retval;
4106
4107 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4108 attrib, target);
4109
4110 fprintf_unfiltered (gdb_stdlog,
4111 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4112 paddress (target_gdbarch, memaddr), len,
4113 write ? "write" : "read", retval);
4114
4115 if (retval > 0)
4116 {
4117 int i;
4118
4119 fputs_unfiltered (", bytes =", gdb_stdlog);
4120 for (i = 0; i < retval; i++)
4121 {
4122 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4123 {
4124 if (targetdebug < 2 && i > 0)
4125 {
4126 fprintf_unfiltered (gdb_stdlog, " ...");
4127 break;
4128 }
4129 fprintf_unfiltered (gdb_stdlog, "\n");
4130 }
4131
4132 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4133 }
4134 }
4135
4136 fputc_unfiltered ('\n', gdb_stdlog);
4137
4138 return retval;
4139 }
4140
4141 static void
4142 debug_to_files_info (struct target_ops *target)
4143 {
4144 debug_target.to_files_info (target);
4145
4146 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4147 }
4148
4149 static int
4150 debug_to_insert_breakpoint (struct gdbarch *gdbarch,
4151 struct bp_target_info *bp_tgt)
4152 {
4153 int retval;
4154
4155 retval = debug_target.to_insert_breakpoint (gdbarch, bp_tgt);
4156
4157 fprintf_unfiltered (gdb_stdlog,
4158 "target_insert_breakpoint (%s, xxx) = %ld\n",
4159 core_addr_to_string (bp_tgt->placed_address),
4160 (unsigned long) retval);
4161 return retval;
4162 }
4163
4164 static int
4165 debug_to_remove_breakpoint (struct gdbarch *gdbarch,
4166 struct bp_target_info *bp_tgt)
4167 {
4168 int retval;
4169
4170 retval = debug_target.to_remove_breakpoint (gdbarch, bp_tgt);
4171
4172 fprintf_unfiltered (gdb_stdlog,
4173 "target_remove_breakpoint (%s, xxx) = %ld\n",
4174 core_addr_to_string (bp_tgt->placed_address),
4175 (unsigned long) retval);
4176 return retval;
4177 }
4178
4179 static int
4180 debug_to_can_use_hw_breakpoint (int type, int cnt, int from_tty)
4181 {
4182 int retval;
4183
4184 retval = debug_target.to_can_use_hw_breakpoint (type, cnt, from_tty);
4185
4186 fprintf_unfiltered (gdb_stdlog,
4187 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4188 (unsigned long) type,
4189 (unsigned long) cnt,
4190 (unsigned long) from_tty,
4191 (unsigned long) retval);
4192 return retval;
4193 }
4194
4195 static int
4196 debug_to_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
4197 {
4198 CORE_ADDR retval;
4199
4200 retval = debug_target.to_region_ok_for_hw_watchpoint (addr, len);
4201
4202 fprintf_unfiltered (gdb_stdlog,
4203 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4204 core_addr_to_string (addr), (unsigned long) len,
4205 core_addr_to_string (retval));
4206 return retval;
4207 }
4208
4209 static int
4210 debug_to_can_accel_watchpoint_condition (CORE_ADDR addr, int len, int rw,
4211 struct expression *cond)
4212 {
4213 int retval;
4214
4215 retval = debug_target.to_can_accel_watchpoint_condition (addr, len,
4216 rw, cond);
4217
4218 fprintf_unfiltered (gdb_stdlog,
4219 "target_can_accel_watchpoint_condition "
4220 "(%s, %d, %d, %s) = %ld\n",
4221 core_addr_to_string (addr), len, rw,
4222 host_address_to_string (cond), (unsigned long) retval);
4223 return retval;
4224 }
4225
4226 static int
4227 debug_to_stopped_by_watchpoint (void)
4228 {
4229 int retval;
4230
4231 retval = debug_target.to_stopped_by_watchpoint ();
4232
4233 fprintf_unfiltered (gdb_stdlog,
4234 "target_stopped_by_watchpoint () = %ld\n",
4235 (unsigned long) retval);
4236 return retval;
4237 }
4238
4239 static int
4240 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4241 {
4242 int retval;
4243
4244 retval = debug_target.to_stopped_data_address (target, addr);
4245
4246 fprintf_unfiltered (gdb_stdlog,
4247 "target_stopped_data_address ([%s]) = %ld\n",
4248 core_addr_to_string (*addr),
4249 (unsigned long)retval);
4250 return retval;
4251 }
4252
4253 static int
4254 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4255 CORE_ADDR addr,
4256 CORE_ADDR start, int length)
4257 {
4258 int retval;
4259
4260 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4261 start, length);
4262
4263 fprintf_filtered (gdb_stdlog,
4264 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4265 core_addr_to_string (addr), core_addr_to_string (start),
4266 length, retval);
4267 return retval;
4268 }
4269
4270 static int
4271 debug_to_insert_hw_breakpoint (struct gdbarch *gdbarch,
4272 struct bp_target_info *bp_tgt)
4273 {
4274 int retval;
4275
4276 retval = debug_target.to_insert_hw_breakpoint (gdbarch, bp_tgt);
4277
4278 fprintf_unfiltered (gdb_stdlog,
4279 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4280 core_addr_to_string (bp_tgt->placed_address),
4281 (unsigned long) retval);
4282 return retval;
4283 }
4284
4285 static int
4286 debug_to_remove_hw_breakpoint (struct gdbarch *gdbarch,
4287 struct bp_target_info *bp_tgt)
4288 {
4289 int retval;
4290
4291 retval = debug_target.to_remove_hw_breakpoint (gdbarch, bp_tgt);
4292
4293 fprintf_unfiltered (gdb_stdlog,
4294 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4295 core_addr_to_string (bp_tgt->placed_address),
4296 (unsigned long) retval);
4297 return retval;
4298 }
4299
4300 static int
4301 debug_to_insert_watchpoint (CORE_ADDR addr, int len, int type,
4302 struct expression *cond)
4303 {
4304 int retval;
4305
4306 retval = debug_target.to_insert_watchpoint (addr, len, type, cond);
4307
4308 fprintf_unfiltered (gdb_stdlog,
4309 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4310 core_addr_to_string (addr), len, type,
4311 host_address_to_string (cond), (unsigned long) retval);
4312 return retval;
4313 }
4314
4315 static int
4316 debug_to_remove_watchpoint (CORE_ADDR addr, int len, int type,
4317 struct expression *cond)
4318 {
4319 int retval;
4320
4321 retval = debug_target.to_remove_watchpoint (addr, len, type, cond);
4322
4323 fprintf_unfiltered (gdb_stdlog,
4324 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4325 core_addr_to_string (addr), len, type,
4326 host_address_to_string (cond), (unsigned long) retval);
4327 return retval;
4328 }
4329
4330 static void
4331 debug_to_terminal_init (void)
4332 {
4333 debug_target.to_terminal_init ();
4334
4335 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4336 }
4337
4338 static void
4339 debug_to_terminal_inferior (void)
4340 {
4341 debug_target.to_terminal_inferior ();
4342
4343 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4344 }
4345
4346 static void
4347 debug_to_terminal_ours_for_output (void)
4348 {
4349 debug_target.to_terminal_ours_for_output ();
4350
4351 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4352 }
4353
4354 static void
4355 debug_to_terminal_ours (void)
4356 {
4357 debug_target.to_terminal_ours ();
4358
4359 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4360 }
4361
4362 static void
4363 debug_to_terminal_save_ours (void)
4364 {
4365 debug_target.to_terminal_save_ours ();
4366
4367 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4368 }
4369
4370 static void
4371 debug_to_terminal_info (char *arg, int from_tty)
4372 {
4373 debug_target.to_terminal_info (arg, from_tty);
4374
4375 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4376 from_tty);
4377 }
4378
4379 static void
4380 debug_to_load (char *args, int from_tty)
4381 {
4382 debug_target.to_load (args, from_tty);
4383
4384 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4385 }
4386
4387 static void
4388 debug_to_post_startup_inferior (ptid_t ptid)
4389 {
4390 debug_target.to_post_startup_inferior (ptid);
4391
4392 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4393 PIDGET (ptid));
4394 }
4395
4396 static int
4397 debug_to_insert_fork_catchpoint (int pid)
4398 {
4399 int retval;
4400
4401 retval = debug_target.to_insert_fork_catchpoint (pid);
4402
4403 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4404 pid, retval);
4405
4406 return retval;
4407 }
4408
4409 static int
4410 debug_to_remove_fork_catchpoint (int pid)
4411 {
4412 int retval;
4413
4414 retval = debug_target.to_remove_fork_catchpoint (pid);
4415
4416 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4417 pid, retval);
4418
4419 return retval;
4420 }
4421
4422 static int
4423 debug_to_insert_vfork_catchpoint (int pid)
4424 {
4425 int retval;
4426
4427 retval = debug_target.to_insert_vfork_catchpoint (pid);
4428
4429 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4430 pid, retval);
4431
4432 return retval;
4433 }
4434
4435 static int
4436 debug_to_remove_vfork_catchpoint (int pid)
4437 {
4438 int retval;
4439
4440 retval = debug_target.to_remove_vfork_catchpoint (pid);
4441
4442 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4443 pid, retval);
4444
4445 return retval;
4446 }
4447
4448 static int
4449 debug_to_insert_exec_catchpoint (int pid)
4450 {
4451 int retval;
4452
4453 retval = debug_target.to_insert_exec_catchpoint (pid);
4454
4455 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4456 pid, retval);
4457
4458 return retval;
4459 }
4460
4461 static int
4462 debug_to_remove_exec_catchpoint (int pid)
4463 {
4464 int retval;
4465
4466 retval = debug_target.to_remove_exec_catchpoint (pid);
4467
4468 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4469 pid, retval);
4470
4471 return retval;
4472 }
4473
4474 static int
4475 debug_to_has_exited (int pid, int wait_status, int *exit_status)
4476 {
4477 int has_exited;
4478
4479 has_exited = debug_target.to_has_exited (pid, wait_status, exit_status);
4480
4481 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4482 pid, wait_status, *exit_status, has_exited);
4483
4484 return has_exited;
4485 }
4486
4487 static int
4488 debug_to_can_run (void)
4489 {
4490 int retval;
4491
4492 retval = debug_target.to_can_run ();
4493
4494 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4495
4496 return retval;
4497 }
4498
4499 static struct gdbarch *
4500 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4501 {
4502 struct gdbarch *retval;
4503
4504 retval = debug_target.to_thread_architecture (ops, ptid);
4505
4506 fprintf_unfiltered (gdb_stdlog,
4507 "target_thread_architecture (%s) = %s [%s]\n",
4508 target_pid_to_str (ptid),
4509 host_address_to_string (retval),
4510 gdbarch_bfd_arch_info (retval)->printable_name);
4511 return retval;
4512 }
4513
4514 static void
4515 debug_to_stop (ptid_t ptid)
4516 {
4517 debug_target.to_stop (ptid);
4518
4519 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4520 target_pid_to_str (ptid));
4521 }
4522
4523 static void
4524 debug_to_rcmd (char *command,
4525 struct ui_file *outbuf)
4526 {
4527 debug_target.to_rcmd (command, outbuf);
4528 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
4529 }
4530
4531 static char *
4532 debug_to_pid_to_exec_file (int pid)
4533 {
4534 char *exec_file;
4535
4536 exec_file = debug_target.to_pid_to_exec_file (pid);
4537
4538 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
4539 pid, exec_file);
4540
4541 return exec_file;
4542 }
4543
4544 static void
4545 setup_target_debug (void)
4546 {
4547 memcpy (&debug_target, &current_target, sizeof debug_target);
4548
4549 current_target.to_open = debug_to_open;
4550 current_target.to_post_attach = debug_to_post_attach;
4551 current_target.to_prepare_to_store = debug_to_prepare_to_store;
4552 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
4553 current_target.to_files_info = debug_to_files_info;
4554 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
4555 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
4556 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
4557 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
4558 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
4559 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
4560 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
4561 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
4562 current_target.to_stopped_data_address = debug_to_stopped_data_address;
4563 current_target.to_watchpoint_addr_within_range
4564 = debug_to_watchpoint_addr_within_range;
4565 current_target.to_region_ok_for_hw_watchpoint
4566 = debug_to_region_ok_for_hw_watchpoint;
4567 current_target.to_can_accel_watchpoint_condition
4568 = debug_to_can_accel_watchpoint_condition;
4569 current_target.to_terminal_init = debug_to_terminal_init;
4570 current_target.to_terminal_inferior = debug_to_terminal_inferior;
4571 current_target.to_terminal_ours_for_output
4572 = debug_to_terminal_ours_for_output;
4573 current_target.to_terminal_ours = debug_to_terminal_ours;
4574 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
4575 current_target.to_terminal_info = debug_to_terminal_info;
4576 current_target.to_load = debug_to_load;
4577 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
4578 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
4579 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
4580 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
4581 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
4582 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
4583 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
4584 current_target.to_has_exited = debug_to_has_exited;
4585 current_target.to_can_run = debug_to_can_run;
4586 current_target.to_stop = debug_to_stop;
4587 current_target.to_rcmd = debug_to_rcmd;
4588 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
4589 current_target.to_thread_architecture = debug_to_thread_architecture;
4590 }
4591 \f
4592
4593 static char targ_desc[] =
4594 "Names of targets and files being debugged.\nShows the entire \
4595 stack of targets currently in use (including the exec-file,\n\
4596 core-file, and process, if any), as well as the symbol file name.";
4597
4598 static void
4599 do_monitor_command (char *cmd,
4600 int from_tty)
4601 {
4602 if ((current_target.to_rcmd
4603 == (void (*) (char *, struct ui_file *)) tcomplain)
4604 || (current_target.to_rcmd == debug_to_rcmd
4605 && (debug_target.to_rcmd
4606 == (void (*) (char *, struct ui_file *)) tcomplain)))
4607 error (_("\"monitor\" command not supported by this target."));
4608 target_rcmd (cmd, gdb_stdtarg);
4609 }
4610
4611 /* Print the name of each layers of our target stack. */
4612
4613 static void
4614 maintenance_print_target_stack (char *cmd, int from_tty)
4615 {
4616 struct target_ops *t;
4617
4618 printf_filtered (_("The current target stack is:\n"));
4619
4620 for (t = target_stack; t != NULL; t = t->beneath)
4621 {
4622 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
4623 }
4624 }
4625
4626 /* Controls if async mode is permitted. */
4627 int target_async_permitted = 0;
4628
4629 /* The set command writes to this variable. If the inferior is
4630 executing, linux_nat_async_permitted is *not* updated. */
4631 static int target_async_permitted_1 = 0;
4632
4633 static void
4634 set_target_async_command (char *args, int from_tty,
4635 struct cmd_list_element *c)
4636 {
4637 if (have_live_inferiors ())
4638 {
4639 target_async_permitted_1 = target_async_permitted;
4640 error (_("Cannot change this setting while the inferior is running."));
4641 }
4642
4643 target_async_permitted = target_async_permitted_1;
4644 }
4645
4646 static void
4647 show_target_async_command (struct ui_file *file, int from_tty,
4648 struct cmd_list_element *c,
4649 const char *value)
4650 {
4651 fprintf_filtered (file,
4652 _("Controlling the inferior in "
4653 "asynchronous mode is %s.\n"), value);
4654 }
4655
4656 /* Temporary copies of permission settings. */
4657
4658 static int may_write_registers_1 = 1;
4659 static int may_write_memory_1 = 1;
4660 static int may_insert_breakpoints_1 = 1;
4661 static int may_insert_tracepoints_1 = 1;
4662 static int may_insert_fast_tracepoints_1 = 1;
4663 static int may_stop_1 = 1;
4664
4665 /* Make the user-set values match the real values again. */
4666
4667 void
4668 update_target_permissions (void)
4669 {
4670 may_write_registers_1 = may_write_registers;
4671 may_write_memory_1 = may_write_memory;
4672 may_insert_breakpoints_1 = may_insert_breakpoints;
4673 may_insert_tracepoints_1 = may_insert_tracepoints;
4674 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
4675 may_stop_1 = may_stop;
4676 }
4677
4678 /* The one function handles (most of) the permission flags in the same
4679 way. */
4680
4681 static void
4682 set_target_permissions (char *args, int from_tty,
4683 struct cmd_list_element *c)
4684 {
4685 if (target_has_execution)
4686 {
4687 update_target_permissions ();
4688 error (_("Cannot change this setting while the inferior is running."));
4689 }
4690
4691 /* Make the real values match the user-changed values. */
4692 may_write_registers = may_write_registers_1;
4693 may_insert_breakpoints = may_insert_breakpoints_1;
4694 may_insert_tracepoints = may_insert_tracepoints_1;
4695 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
4696 may_stop = may_stop_1;
4697 update_observer_mode ();
4698 }
4699
4700 /* Set memory write permission independently of observer mode. */
4701
4702 static void
4703 set_write_memory_permission (char *args, int from_tty,
4704 struct cmd_list_element *c)
4705 {
4706 /* Make the real values match the user-changed values. */
4707 may_write_memory = may_write_memory_1;
4708 update_observer_mode ();
4709 }
4710
4711
4712 void
4713 initialize_targets (void)
4714 {
4715 init_dummy_target ();
4716 push_target (&dummy_target);
4717
4718 add_info ("target", target_info, targ_desc);
4719 add_info ("files", target_info, targ_desc);
4720
4721 add_setshow_zinteger_cmd ("target", class_maintenance, &targetdebug, _("\
4722 Set target debugging."), _("\
4723 Show target debugging."), _("\
4724 When non-zero, target debugging is enabled. Higher numbers are more\n\
4725 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
4726 command."),
4727 NULL,
4728 show_targetdebug,
4729 &setdebuglist, &showdebuglist);
4730
4731 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
4732 &trust_readonly, _("\
4733 Set mode for reading from readonly sections."), _("\
4734 Show mode for reading from readonly sections."), _("\
4735 When this mode is on, memory reads from readonly sections (such as .text)\n\
4736 will be read from the object file instead of from the target. This will\n\
4737 result in significant performance improvement for remote targets."),
4738 NULL,
4739 show_trust_readonly,
4740 &setlist, &showlist);
4741
4742 add_com ("monitor", class_obscure, do_monitor_command,
4743 _("Send a command to the remote monitor (remote targets only)."));
4744
4745 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
4746 _("Print the name of each layer of the internal target stack."),
4747 &maintenanceprintlist);
4748
4749 add_setshow_boolean_cmd ("target-async", no_class,
4750 &target_async_permitted_1, _("\
4751 Set whether gdb controls the inferior in asynchronous mode."), _("\
4752 Show whether gdb controls the inferior in asynchronous mode."), _("\
4753 Tells gdb whether to control the inferior in asynchronous mode."),
4754 set_target_async_command,
4755 show_target_async_command,
4756 &setlist,
4757 &showlist);
4758
4759 add_setshow_boolean_cmd ("stack-cache", class_support,
4760 &stack_cache_enabled_p_1, _("\
4761 Set cache use for stack access."), _("\
4762 Show cache use for stack access."), _("\
4763 When on, use the data cache for all stack access, regardless of any\n\
4764 configured memory regions. This improves remote performance significantly.\n\
4765 By default, caching for stack access is on."),
4766 set_stack_cache_enabled_p,
4767 show_stack_cache_enabled_p,
4768 &setlist, &showlist);
4769
4770 add_setshow_boolean_cmd ("may-write-registers", class_support,
4771 &may_write_registers_1, _("\
4772 Set permission to write into registers."), _("\
4773 Show permission to write into registers."), _("\
4774 When this permission is on, GDB may write into the target's registers.\n\
4775 Otherwise, any sort of write attempt will result in an error."),
4776 set_target_permissions, NULL,
4777 &setlist, &showlist);
4778
4779 add_setshow_boolean_cmd ("may-write-memory", class_support,
4780 &may_write_memory_1, _("\
4781 Set permission to write into target memory."), _("\
4782 Show permission to write into target memory."), _("\
4783 When this permission is on, GDB may write into the target's memory.\n\
4784 Otherwise, any sort of write attempt will result in an error."),
4785 set_write_memory_permission, NULL,
4786 &setlist, &showlist);
4787
4788 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
4789 &may_insert_breakpoints_1, _("\
4790 Set permission to insert breakpoints in the target."), _("\
4791 Show permission to insert breakpoints in the target."), _("\
4792 When this permission is on, GDB may insert breakpoints in the program.\n\
4793 Otherwise, any sort of insertion attempt will result in an error."),
4794 set_target_permissions, NULL,
4795 &setlist, &showlist);
4796
4797 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
4798 &may_insert_tracepoints_1, _("\
4799 Set permission to insert tracepoints in the target."), _("\
4800 Show permission to insert tracepoints in the target."), _("\
4801 When this permission is on, GDB may insert tracepoints in the program.\n\
4802 Otherwise, any sort of insertion attempt will result in an error."),
4803 set_target_permissions, NULL,
4804 &setlist, &showlist);
4805
4806 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
4807 &may_insert_fast_tracepoints_1, _("\
4808 Set permission to insert fast tracepoints in the target."), _("\
4809 Show permission to insert fast tracepoints in the target."), _("\
4810 When this permission is on, GDB may insert fast tracepoints.\n\
4811 Otherwise, any sort of insertion attempt will result in an error."),
4812 set_target_permissions, NULL,
4813 &setlist, &showlist);
4814
4815 add_setshow_boolean_cmd ("may-interrupt", class_support,
4816 &may_stop_1, _("\
4817 Set permission to interrupt or signal the target."), _("\
4818 Show permission to interrupt or signal the target."), _("\
4819 When this permission is on, GDB may interrupt/stop the target's execution.\n\
4820 Otherwise, any attempt to interrupt or stop will be ignored."),
4821 set_target_permissions, NULL,
4822 &setlist, &showlist);
4823
4824
4825 target_dcache = dcache_init ();
4826 }