]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/target.c
Add target_ops argument to to_supports_evaluation_of_breakpoint_conditions
[thirdparty/binutils-gdb.git] / gdb / target.c
1 /* Select target systems and architectures at runtime for GDB.
2
3 Copyright (C) 1990-2014 Free Software Foundation, Inc.
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include <errno.h>
24 #include <string.h>
25 #include "target.h"
26 #include "target-dcache.h"
27 #include "gdbcmd.h"
28 #include "symtab.h"
29 #include "inferior.h"
30 #include "bfd.h"
31 #include "symfile.h"
32 #include "objfiles.h"
33 #include "dcache.h"
34 #include <signal.h>
35 #include "regcache.h"
36 #include "gdb_assert.h"
37 #include "gdbcore.h"
38 #include "exceptions.h"
39 #include "target-descriptions.h"
40 #include "gdbthread.h"
41 #include "solib.h"
42 #include "exec.h"
43 #include "inline-frame.h"
44 #include "tracepoint.h"
45 #include "gdb/fileio.h"
46 #include "agent.h"
47
48 static void target_info (char *, int);
49
50 static void default_terminal_info (struct target_ops *, const char *, int);
51
52 static int default_watchpoint_addr_within_range (struct target_ops *,
53 CORE_ADDR, CORE_ADDR, int);
54
55 static int default_region_ok_for_hw_watchpoint (struct target_ops *,
56 CORE_ADDR, int);
57
58 static void tcomplain (void) ATTRIBUTE_NORETURN;
59
60 static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
61
62 static int return_zero (void);
63
64 static int return_one (void);
65
66 static int return_minus_one (void);
67
68 static void *return_null (void);
69
70 void target_ignore (void);
71
72 static void target_command (char *, int);
73
74 static struct target_ops *find_default_run_target (char *);
75
76 static target_xfer_partial_ftype default_xfer_partial;
77
78 static struct gdbarch *default_thread_architecture (struct target_ops *ops,
79 ptid_t ptid);
80
81 static int find_default_can_async_p (struct target_ops *ignore);
82
83 static int find_default_is_async_p (struct target_ops *ignore);
84
85 #include "target-delegates.c"
86
87 static void init_dummy_target (void);
88
89 static struct target_ops debug_target;
90
91 static void debug_to_open (char *, int);
92
93 static void debug_to_prepare_to_store (struct target_ops *self,
94 struct regcache *);
95
96 static void debug_to_files_info (struct target_ops *);
97
98 static int debug_to_insert_breakpoint (struct target_ops *, struct gdbarch *,
99 struct bp_target_info *);
100
101 static int debug_to_remove_breakpoint (struct target_ops *, struct gdbarch *,
102 struct bp_target_info *);
103
104 static int debug_to_can_use_hw_breakpoint (struct target_ops *self,
105 int, int, int);
106
107 static int debug_to_insert_hw_breakpoint (struct target_ops *self,
108 struct gdbarch *,
109 struct bp_target_info *);
110
111 static int debug_to_remove_hw_breakpoint (struct target_ops *self,
112 struct gdbarch *,
113 struct bp_target_info *);
114
115 static int debug_to_insert_watchpoint (struct target_ops *self,
116 CORE_ADDR, int, int,
117 struct expression *);
118
119 static int debug_to_remove_watchpoint (struct target_ops *self,
120 CORE_ADDR, int, int,
121 struct expression *);
122
123 static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
124
125 static int debug_to_watchpoint_addr_within_range (struct target_ops *,
126 CORE_ADDR, CORE_ADDR, int);
127
128 static int debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
129 CORE_ADDR, int);
130
131 static int debug_to_can_accel_watchpoint_condition (struct target_ops *self,
132 CORE_ADDR, int, int,
133 struct expression *);
134
135 static void debug_to_terminal_init (struct target_ops *self);
136
137 static void debug_to_terminal_inferior (struct target_ops *self);
138
139 static void debug_to_terminal_ours_for_output (struct target_ops *self);
140
141 static void debug_to_terminal_save_ours (struct target_ops *self);
142
143 static void debug_to_terminal_ours (struct target_ops *self);
144
145 static void debug_to_load (struct target_ops *self, char *, int);
146
147 static int debug_to_can_run (struct target_ops *self);
148
149 static void debug_to_stop (struct target_ops *self, ptid_t);
150
151 /* Pointer to array of target architecture structures; the size of the
152 array; the current index into the array; the allocated size of the
153 array. */
154 struct target_ops **target_structs;
155 unsigned target_struct_size;
156 unsigned target_struct_allocsize;
157 #define DEFAULT_ALLOCSIZE 10
158
159 /* The initial current target, so that there is always a semi-valid
160 current target. */
161
162 static struct target_ops dummy_target;
163
164 /* Top of target stack. */
165
166 static struct target_ops *target_stack;
167
168 /* The target structure we are currently using to talk to a process
169 or file or whatever "inferior" we have. */
170
171 struct target_ops current_target;
172
173 /* Command list for target. */
174
175 static struct cmd_list_element *targetlist = NULL;
176
177 /* Nonzero if we should trust readonly sections from the
178 executable when reading memory. */
179
180 static int trust_readonly = 0;
181
182 /* Nonzero if we should show true memory content including
183 memory breakpoint inserted by gdb. */
184
185 static int show_memory_breakpoints = 0;
186
187 /* These globals control whether GDB attempts to perform these
188 operations; they are useful for targets that need to prevent
189 inadvertant disruption, such as in non-stop mode. */
190
191 int may_write_registers = 1;
192
193 int may_write_memory = 1;
194
195 int may_insert_breakpoints = 1;
196
197 int may_insert_tracepoints = 1;
198
199 int may_insert_fast_tracepoints = 1;
200
201 int may_stop = 1;
202
203 /* Non-zero if we want to see trace of target level stuff. */
204
205 static unsigned int targetdebug = 0;
206 static void
207 show_targetdebug (struct ui_file *file, int from_tty,
208 struct cmd_list_element *c, const char *value)
209 {
210 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
211 }
212
213 static void setup_target_debug (void);
214
215 /* The user just typed 'target' without the name of a target. */
216
217 static void
218 target_command (char *arg, int from_tty)
219 {
220 fputs_filtered ("Argument required (target name). Try `help target'\n",
221 gdb_stdout);
222 }
223
224 /* Default target_has_* methods for process_stratum targets. */
225
226 int
227 default_child_has_all_memory (struct target_ops *ops)
228 {
229 /* If no inferior selected, then we can't read memory here. */
230 if (ptid_equal (inferior_ptid, null_ptid))
231 return 0;
232
233 return 1;
234 }
235
236 int
237 default_child_has_memory (struct target_ops *ops)
238 {
239 /* If no inferior selected, then we can't read memory here. */
240 if (ptid_equal (inferior_ptid, null_ptid))
241 return 0;
242
243 return 1;
244 }
245
246 int
247 default_child_has_stack (struct target_ops *ops)
248 {
249 /* If no inferior selected, there's no stack. */
250 if (ptid_equal (inferior_ptid, null_ptid))
251 return 0;
252
253 return 1;
254 }
255
256 int
257 default_child_has_registers (struct target_ops *ops)
258 {
259 /* Can't read registers from no inferior. */
260 if (ptid_equal (inferior_ptid, null_ptid))
261 return 0;
262
263 return 1;
264 }
265
266 int
267 default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
268 {
269 /* If there's no thread selected, then we can't make it run through
270 hoops. */
271 if (ptid_equal (the_ptid, null_ptid))
272 return 0;
273
274 return 1;
275 }
276
277
278 int
279 target_has_all_memory_1 (void)
280 {
281 struct target_ops *t;
282
283 for (t = current_target.beneath; t != NULL; t = t->beneath)
284 if (t->to_has_all_memory (t))
285 return 1;
286
287 return 0;
288 }
289
290 int
291 target_has_memory_1 (void)
292 {
293 struct target_ops *t;
294
295 for (t = current_target.beneath; t != NULL; t = t->beneath)
296 if (t->to_has_memory (t))
297 return 1;
298
299 return 0;
300 }
301
302 int
303 target_has_stack_1 (void)
304 {
305 struct target_ops *t;
306
307 for (t = current_target.beneath; t != NULL; t = t->beneath)
308 if (t->to_has_stack (t))
309 return 1;
310
311 return 0;
312 }
313
314 int
315 target_has_registers_1 (void)
316 {
317 struct target_ops *t;
318
319 for (t = current_target.beneath; t != NULL; t = t->beneath)
320 if (t->to_has_registers (t))
321 return 1;
322
323 return 0;
324 }
325
326 int
327 target_has_execution_1 (ptid_t the_ptid)
328 {
329 struct target_ops *t;
330
331 for (t = current_target.beneath; t != NULL; t = t->beneath)
332 if (t->to_has_execution (t, the_ptid))
333 return 1;
334
335 return 0;
336 }
337
338 int
339 target_has_execution_current (void)
340 {
341 return target_has_execution_1 (inferior_ptid);
342 }
343
344 /* Complete initialization of T. This ensures that various fields in
345 T are set, if needed by the target implementation. */
346
347 void
348 complete_target_initialization (struct target_ops *t)
349 {
350 /* Provide default values for all "must have" methods. */
351 if (t->to_xfer_partial == NULL)
352 t->to_xfer_partial = default_xfer_partial;
353
354 if (t->to_has_all_memory == NULL)
355 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
356
357 if (t->to_has_memory == NULL)
358 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
359
360 if (t->to_has_stack == NULL)
361 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
362
363 if (t->to_has_registers == NULL)
364 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
365
366 if (t->to_has_execution == NULL)
367 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
368
369 install_delegators (t);
370 }
371
372 /* Add possible target architecture T to the list and add a new
373 command 'target T->to_shortname'. Set COMPLETER as the command's
374 completer if not NULL. */
375
376 void
377 add_target_with_completer (struct target_ops *t,
378 completer_ftype *completer)
379 {
380 struct cmd_list_element *c;
381
382 complete_target_initialization (t);
383
384 if (!target_structs)
385 {
386 target_struct_allocsize = DEFAULT_ALLOCSIZE;
387 target_structs = (struct target_ops **) xmalloc
388 (target_struct_allocsize * sizeof (*target_structs));
389 }
390 if (target_struct_size >= target_struct_allocsize)
391 {
392 target_struct_allocsize *= 2;
393 target_structs = (struct target_ops **)
394 xrealloc ((char *) target_structs,
395 target_struct_allocsize * sizeof (*target_structs));
396 }
397 target_structs[target_struct_size++] = t;
398
399 if (targetlist == NULL)
400 add_prefix_cmd ("target", class_run, target_command, _("\
401 Connect to a target machine or process.\n\
402 The first argument is the type or protocol of the target machine.\n\
403 Remaining arguments are interpreted by the target protocol. For more\n\
404 information on the arguments for a particular protocol, type\n\
405 `help target ' followed by the protocol name."),
406 &targetlist, "target ", 0, &cmdlist);
407 c = add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc,
408 &targetlist);
409 if (completer != NULL)
410 set_cmd_completer (c, completer);
411 }
412
413 /* Add a possible target architecture to the list. */
414
415 void
416 add_target (struct target_ops *t)
417 {
418 add_target_with_completer (t, NULL);
419 }
420
421 /* See target.h. */
422
423 void
424 add_deprecated_target_alias (struct target_ops *t, char *alias)
425 {
426 struct cmd_list_element *c;
427 char *alt;
428
429 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
430 see PR cli/15104. */
431 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
432 alt = xstrprintf ("target %s", t->to_shortname);
433 deprecate_cmd (c, alt);
434 }
435
436 /* Stub functions */
437
438 void
439 target_ignore (void)
440 {
441 }
442
443 void
444 target_kill (void)
445 {
446 struct target_ops *t;
447
448 for (t = current_target.beneath; t != NULL; t = t->beneath)
449 if (t->to_kill != NULL)
450 {
451 if (targetdebug)
452 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
453
454 t->to_kill (t);
455 return;
456 }
457
458 noprocess ();
459 }
460
461 void
462 target_load (char *arg, int from_tty)
463 {
464 target_dcache_invalidate ();
465 (*current_target.to_load) (&current_target, arg, from_tty);
466 }
467
468 void
469 target_create_inferior (char *exec_file, char *args,
470 char **env, int from_tty)
471 {
472 struct target_ops *t;
473
474 for (t = current_target.beneath; t != NULL; t = t->beneath)
475 {
476 if (t->to_create_inferior != NULL)
477 {
478 t->to_create_inferior (t, exec_file, args, env, from_tty);
479 if (targetdebug)
480 fprintf_unfiltered (gdb_stdlog,
481 "target_create_inferior (%s, %s, xxx, %d)\n",
482 exec_file, args, from_tty);
483 return;
484 }
485 }
486
487 internal_error (__FILE__, __LINE__,
488 _("could not find a target to create inferior"));
489 }
490
491 void
492 target_terminal_inferior (void)
493 {
494 /* A background resume (``run&'') should leave GDB in control of the
495 terminal. Use target_can_async_p, not target_is_async_p, since at
496 this point the target is not async yet. However, if sync_execution
497 is not set, we know it will become async prior to resume. */
498 if (target_can_async_p () && !sync_execution)
499 return;
500
501 /* If GDB is resuming the inferior in the foreground, install
502 inferior's terminal modes. */
503 (*current_target.to_terminal_inferior) (&current_target);
504 }
505
506 static int
507 nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
508 struct target_ops *t)
509 {
510 errno = EIO; /* Can't read/write this location. */
511 return 0; /* No bytes handled. */
512 }
513
514 static void
515 tcomplain (void)
516 {
517 error (_("You can't do that when your target is `%s'"),
518 current_target.to_shortname);
519 }
520
521 void
522 noprocess (void)
523 {
524 error (_("You can't do that without a process to debug."));
525 }
526
527 static void
528 default_terminal_info (struct target_ops *self, const char *args, int from_tty)
529 {
530 printf_unfiltered (_("No saved terminal information.\n"));
531 }
532
533 /* A default implementation for the to_get_ada_task_ptid target method.
534
535 This function builds the PTID by using both LWP and TID as part of
536 the PTID lwp and tid elements. The pid used is the pid of the
537 inferior_ptid. */
538
539 static ptid_t
540 default_get_ada_task_ptid (struct target_ops *self, long lwp, long tid)
541 {
542 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
543 }
544
545 static enum exec_direction_kind
546 default_execution_direction (struct target_ops *self)
547 {
548 if (!target_can_execute_reverse)
549 return EXEC_FORWARD;
550 else if (!target_can_async_p ())
551 return EXEC_FORWARD;
552 else
553 gdb_assert_not_reached ("\
554 to_execution_direction must be implemented for reverse async");
555 }
556
557 /* Go through the target stack from top to bottom, copying over zero
558 entries in current_target, then filling in still empty entries. In
559 effect, we are doing class inheritance through the pushed target
560 vectors.
561
562 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
563 is currently implemented, is that it discards any knowledge of
564 which target an inherited method originally belonged to.
565 Consequently, new new target methods should instead explicitly and
566 locally search the target stack for the target that can handle the
567 request. */
568
569 static void
570 update_current_target (void)
571 {
572 struct target_ops *t;
573
574 /* First, reset current's contents. */
575 memset (&current_target, 0, sizeof (current_target));
576
577 /* Install the delegators. */
578 install_delegators (&current_target);
579
580 #define INHERIT(FIELD, TARGET) \
581 if (!current_target.FIELD) \
582 current_target.FIELD = (TARGET)->FIELD
583
584 for (t = target_stack; t; t = t->beneath)
585 {
586 INHERIT (to_shortname, t);
587 INHERIT (to_longname, t);
588 INHERIT (to_doc, t);
589 /* Do not inherit to_open. */
590 /* Do not inherit to_close. */
591 /* Do not inherit to_attach. */
592 INHERIT (to_post_attach, t);
593 INHERIT (to_attach_no_wait, t);
594 /* Do not inherit to_detach. */
595 /* Do not inherit to_disconnect. */
596 /* Do not inherit to_resume. */
597 /* Do not inherit to_wait. */
598 /* Do not inherit to_fetch_registers. */
599 /* Do not inherit to_store_registers. */
600 INHERIT (to_prepare_to_store, t);
601 INHERIT (deprecated_xfer_memory, t);
602 INHERIT (to_files_info, t);
603 /* Do not inherit to_insert_breakpoint. */
604 /* Do not inherit to_remove_breakpoint. */
605 INHERIT (to_can_use_hw_breakpoint, t);
606 INHERIT (to_insert_hw_breakpoint, t);
607 INHERIT (to_remove_hw_breakpoint, t);
608 /* Do not inherit to_ranged_break_num_registers. */
609 INHERIT (to_insert_watchpoint, t);
610 INHERIT (to_remove_watchpoint, t);
611 /* Do not inherit to_insert_mask_watchpoint. */
612 /* Do not inherit to_remove_mask_watchpoint. */
613 /* Do not inherit to_stopped_data_address. */
614 INHERIT (to_have_steppable_watchpoint, t);
615 INHERIT (to_have_continuable_watchpoint, t);
616 /* Do not inherit to_stopped_by_watchpoint. */
617 INHERIT (to_watchpoint_addr_within_range, t);
618 INHERIT (to_region_ok_for_hw_watchpoint, t);
619 INHERIT (to_can_accel_watchpoint_condition, t);
620 /* Do not inherit to_masked_watch_num_registers. */
621 INHERIT (to_terminal_init, t);
622 INHERIT (to_terminal_inferior, t);
623 INHERIT (to_terminal_ours_for_output, t);
624 INHERIT (to_terminal_ours, t);
625 INHERIT (to_terminal_save_ours, t);
626 INHERIT (to_terminal_info, t);
627 /* Do not inherit to_kill. */
628 INHERIT (to_load, t);
629 /* Do no inherit to_create_inferior. */
630 INHERIT (to_post_startup_inferior, t);
631 INHERIT (to_insert_fork_catchpoint, t);
632 INHERIT (to_remove_fork_catchpoint, t);
633 INHERIT (to_insert_vfork_catchpoint, t);
634 INHERIT (to_remove_vfork_catchpoint, t);
635 /* Do not inherit to_follow_fork. */
636 INHERIT (to_insert_exec_catchpoint, t);
637 INHERIT (to_remove_exec_catchpoint, t);
638 INHERIT (to_set_syscall_catchpoint, t);
639 INHERIT (to_has_exited, t);
640 /* Do not inherit to_mourn_inferior. */
641 INHERIT (to_can_run, t);
642 /* Do not inherit to_pass_signals. */
643 /* Do not inherit to_program_signals. */
644 /* Do not inherit to_thread_alive. */
645 /* Do not inherit to_find_new_threads. */
646 /* Do not inherit to_pid_to_str. */
647 INHERIT (to_extra_thread_info, t);
648 INHERIT (to_thread_name, t);
649 INHERIT (to_stop, t);
650 /* Do not inherit to_xfer_partial. */
651 INHERIT (to_rcmd, t);
652 INHERIT (to_pid_to_exec_file, t);
653 INHERIT (to_log_command, t);
654 INHERIT (to_stratum, t);
655 /* Do not inherit to_has_all_memory. */
656 /* Do not inherit to_has_memory. */
657 /* Do not inherit to_has_stack. */
658 /* Do not inherit to_has_registers. */
659 /* Do not inherit to_has_execution. */
660 INHERIT (to_has_thread_control, t);
661 /* Do not inherit to_can_async_p. */
662 /* Do not inherit to_is_async_p. */
663 /* Do not inherit to_async. */
664 INHERIT (to_find_memory_regions, t);
665 INHERIT (to_make_corefile_notes, t);
666 INHERIT (to_get_bookmark, t);
667 INHERIT (to_goto_bookmark, t);
668 /* Do not inherit to_get_thread_local_address. */
669 INHERIT (to_can_execute_reverse, t);
670 INHERIT (to_execution_direction, t);
671 INHERIT (to_thread_architecture, t);
672 /* Do not inherit to_read_description. */
673 INHERIT (to_get_ada_task_ptid, t);
674 /* Do not inherit to_search_memory. */
675 INHERIT (to_supports_multi_process, t);
676 INHERIT (to_supports_enable_disable_tracepoint, t);
677 INHERIT (to_supports_string_tracing, t);
678 INHERIT (to_trace_init, t);
679 INHERIT (to_download_tracepoint, t);
680 INHERIT (to_can_download_tracepoint, t);
681 INHERIT (to_download_trace_state_variable, t);
682 INHERIT (to_enable_tracepoint, t);
683 INHERIT (to_disable_tracepoint, t);
684 INHERIT (to_trace_set_readonly_regions, t);
685 INHERIT (to_trace_start, t);
686 INHERIT (to_get_trace_status, t);
687 INHERIT (to_get_tracepoint_status, t);
688 INHERIT (to_trace_stop, t);
689 INHERIT (to_trace_find, t);
690 INHERIT (to_get_trace_state_variable_value, t);
691 INHERIT (to_save_trace_data, t);
692 INHERIT (to_upload_tracepoints, t);
693 INHERIT (to_upload_trace_state_variables, t);
694 INHERIT (to_get_raw_trace_data, t);
695 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
696 INHERIT (to_set_disconnected_tracing, t);
697 INHERIT (to_set_circular_trace_buffer, t);
698 INHERIT (to_set_trace_buffer_size, t);
699 INHERIT (to_set_trace_notes, t);
700 INHERIT (to_get_tib_address, t);
701 INHERIT (to_set_permissions, t);
702 INHERIT (to_static_tracepoint_marker_at, t);
703 INHERIT (to_static_tracepoint_markers_by_strid, t);
704 INHERIT (to_traceframe_info, t);
705 INHERIT (to_use_agent, t);
706 INHERIT (to_can_use_agent, t);
707 INHERIT (to_augmented_libraries_svr4_read, t);
708 INHERIT (to_magic, t);
709 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
710 INHERIT (to_can_run_breakpoint_commands, t);
711 /* Do not inherit to_memory_map. */
712 /* Do not inherit to_flash_erase. */
713 /* Do not inherit to_flash_done. */
714 }
715 #undef INHERIT
716
717 /* Clean up a target struct so it no longer has any zero pointers in
718 it. Some entries are defaulted to a method that print an error,
719 others are hard-wired to a standard recursive default. */
720
721 #define de_fault(field, value) \
722 if (!current_target.field) \
723 current_target.field = value
724
725 de_fault (to_open,
726 (void (*) (char *, int))
727 tcomplain);
728 de_fault (to_close,
729 (void (*) (struct target_ops *))
730 target_ignore);
731 de_fault (to_post_attach,
732 (void (*) (struct target_ops *, int))
733 target_ignore);
734 de_fault (to_prepare_to_store,
735 (void (*) (struct target_ops *, struct regcache *))
736 noprocess);
737 de_fault (deprecated_xfer_memory,
738 (int (*) (CORE_ADDR, gdb_byte *, int, int,
739 struct mem_attrib *, struct target_ops *))
740 nomemory);
741 de_fault (to_files_info,
742 (void (*) (struct target_ops *))
743 target_ignore);
744 de_fault (to_can_use_hw_breakpoint,
745 (int (*) (struct target_ops *, int, int, int))
746 return_zero);
747 de_fault (to_insert_hw_breakpoint,
748 (int (*) (struct target_ops *, struct gdbarch *,
749 struct bp_target_info *))
750 return_minus_one);
751 de_fault (to_remove_hw_breakpoint,
752 (int (*) (struct target_ops *, struct gdbarch *,
753 struct bp_target_info *))
754 return_minus_one);
755 de_fault (to_insert_watchpoint,
756 (int (*) (struct target_ops *, CORE_ADDR, int, int,
757 struct expression *))
758 return_minus_one);
759 de_fault (to_remove_watchpoint,
760 (int (*) (struct target_ops *, CORE_ADDR, int, int,
761 struct expression *))
762 return_minus_one);
763 de_fault (to_watchpoint_addr_within_range,
764 default_watchpoint_addr_within_range);
765 de_fault (to_region_ok_for_hw_watchpoint,
766 default_region_ok_for_hw_watchpoint);
767 de_fault (to_can_accel_watchpoint_condition,
768 (int (*) (struct target_ops *, CORE_ADDR, int, int,
769 struct expression *))
770 return_zero);
771 de_fault (to_terminal_init,
772 (void (*) (struct target_ops *))
773 target_ignore);
774 de_fault (to_terminal_inferior,
775 (void (*) (struct target_ops *))
776 target_ignore);
777 de_fault (to_terminal_ours_for_output,
778 (void (*) (struct target_ops *))
779 target_ignore);
780 de_fault (to_terminal_ours,
781 (void (*) (struct target_ops *))
782 target_ignore);
783 de_fault (to_terminal_save_ours,
784 (void (*) (struct target_ops *))
785 target_ignore);
786 de_fault (to_terminal_info,
787 default_terminal_info);
788 de_fault (to_load,
789 (void (*) (struct target_ops *, char *, int))
790 tcomplain);
791 de_fault (to_post_startup_inferior,
792 (void (*) (struct target_ops *, ptid_t))
793 target_ignore);
794 de_fault (to_insert_fork_catchpoint,
795 (int (*) (struct target_ops *, int))
796 return_one);
797 de_fault (to_remove_fork_catchpoint,
798 (int (*) (struct target_ops *, int))
799 return_one);
800 de_fault (to_insert_vfork_catchpoint,
801 (int (*) (struct target_ops *, int))
802 return_one);
803 de_fault (to_remove_vfork_catchpoint,
804 (int (*) (struct target_ops *, int))
805 return_one);
806 de_fault (to_insert_exec_catchpoint,
807 (int (*) (struct target_ops *, int))
808 return_one);
809 de_fault (to_remove_exec_catchpoint,
810 (int (*) (struct target_ops *, int))
811 return_one);
812 de_fault (to_set_syscall_catchpoint,
813 (int (*) (struct target_ops *, int, int, int, int, int *))
814 return_one);
815 de_fault (to_has_exited,
816 (int (*) (struct target_ops *, int, int, int *))
817 return_zero);
818 de_fault (to_can_run,
819 (int (*) (struct target_ops *))
820 return_zero);
821 de_fault (to_extra_thread_info,
822 (char *(*) (struct target_ops *, struct thread_info *))
823 return_null);
824 de_fault (to_thread_name,
825 (char *(*) (struct target_ops *, struct thread_info *))
826 return_null);
827 de_fault (to_stop,
828 (void (*) (struct target_ops *, ptid_t))
829 target_ignore);
830 de_fault (to_rcmd,
831 (void (*) (struct target_ops *, char *, struct ui_file *))
832 tcomplain);
833 de_fault (to_pid_to_exec_file,
834 (char *(*) (struct target_ops *, int))
835 return_null);
836 de_fault (to_thread_architecture,
837 default_thread_architecture);
838 current_target.to_read_description = NULL;
839 de_fault (to_get_ada_task_ptid,
840 (ptid_t (*) (struct target_ops *, long, long))
841 default_get_ada_task_ptid);
842 de_fault (to_supports_multi_process,
843 (int (*) (struct target_ops *))
844 return_zero);
845 de_fault (to_supports_enable_disable_tracepoint,
846 (int (*) (struct target_ops *))
847 return_zero);
848 de_fault (to_supports_string_tracing,
849 (int (*) (struct target_ops *))
850 return_zero);
851 de_fault (to_trace_init,
852 (void (*) (void))
853 tcomplain);
854 de_fault (to_download_tracepoint,
855 (void (*) (struct bp_location *))
856 tcomplain);
857 de_fault (to_can_download_tracepoint,
858 (int (*) (void))
859 return_zero);
860 de_fault (to_download_trace_state_variable,
861 (void (*) (struct trace_state_variable *))
862 tcomplain);
863 de_fault (to_enable_tracepoint,
864 (void (*) (struct bp_location *))
865 tcomplain);
866 de_fault (to_disable_tracepoint,
867 (void (*) (struct bp_location *))
868 tcomplain);
869 de_fault (to_trace_set_readonly_regions,
870 (void (*) (void))
871 tcomplain);
872 de_fault (to_trace_start,
873 (void (*) (void))
874 tcomplain);
875 de_fault (to_get_trace_status,
876 (int (*) (struct trace_status *))
877 return_minus_one);
878 de_fault (to_get_tracepoint_status,
879 (void (*) (struct breakpoint *, struct uploaded_tp *))
880 tcomplain);
881 de_fault (to_trace_stop,
882 (void (*) (void))
883 tcomplain);
884 de_fault (to_trace_find,
885 (int (*) (enum trace_find_type, int, CORE_ADDR, CORE_ADDR, int *))
886 return_minus_one);
887 de_fault (to_get_trace_state_variable_value,
888 (int (*) (int, LONGEST *))
889 return_zero);
890 de_fault (to_save_trace_data,
891 (int (*) (const char *))
892 tcomplain);
893 de_fault (to_upload_tracepoints,
894 (int (*) (struct uploaded_tp **))
895 return_zero);
896 de_fault (to_upload_trace_state_variables,
897 (int (*) (struct uploaded_tsv **))
898 return_zero);
899 de_fault (to_get_raw_trace_data,
900 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
901 tcomplain);
902 de_fault (to_get_min_fast_tracepoint_insn_len,
903 (int (*) (void))
904 return_minus_one);
905 de_fault (to_set_disconnected_tracing,
906 (void (*) (int))
907 target_ignore);
908 de_fault (to_set_circular_trace_buffer,
909 (void (*) (int))
910 target_ignore);
911 de_fault (to_set_trace_buffer_size,
912 (void (*) (LONGEST))
913 target_ignore);
914 de_fault (to_set_trace_notes,
915 (int (*) (const char *, const char *, const char *))
916 return_zero);
917 de_fault (to_get_tib_address,
918 (int (*) (ptid_t, CORE_ADDR *))
919 tcomplain);
920 de_fault (to_set_permissions,
921 (void (*) (void))
922 target_ignore);
923 de_fault (to_static_tracepoint_marker_at,
924 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
925 return_zero);
926 de_fault (to_static_tracepoint_markers_by_strid,
927 (VEC(static_tracepoint_marker_p) * (*) (const char *))
928 tcomplain);
929 de_fault (to_traceframe_info,
930 (struct traceframe_info * (*) (void))
931 return_null);
932 de_fault (to_supports_evaluation_of_breakpoint_conditions,
933 (int (*) (struct target_ops *))
934 return_zero);
935 de_fault (to_can_run_breakpoint_commands,
936 (int (*) (void))
937 return_zero);
938 de_fault (to_use_agent,
939 (int (*) (int))
940 tcomplain);
941 de_fault (to_can_use_agent,
942 (int (*) (void))
943 return_zero);
944 de_fault (to_augmented_libraries_svr4_read,
945 (int (*) (void))
946 return_zero);
947 de_fault (to_execution_direction, default_execution_direction);
948
949 #undef de_fault
950
951 /* Finally, position the target-stack beneath the squashed
952 "current_target". That way code looking for a non-inherited
953 target method can quickly and simply find it. */
954 current_target.beneath = target_stack;
955
956 if (targetdebug)
957 setup_target_debug ();
958 }
959
960 /* Push a new target type into the stack of the existing target accessors,
961 possibly superseding some of the existing accessors.
962
963 Rather than allow an empty stack, we always have the dummy target at
964 the bottom stratum, so we can call the function vectors without
965 checking them. */
966
967 void
968 push_target (struct target_ops *t)
969 {
970 struct target_ops **cur;
971
972 /* Check magic number. If wrong, it probably means someone changed
973 the struct definition, but not all the places that initialize one. */
974 if (t->to_magic != OPS_MAGIC)
975 {
976 fprintf_unfiltered (gdb_stderr,
977 "Magic number of %s target struct wrong\n",
978 t->to_shortname);
979 internal_error (__FILE__, __LINE__,
980 _("failed internal consistency check"));
981 }
982
983 /* Find the proper stratum to install this target in. */
984 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
985 {
986 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
987 break;
988 }
989
990 /* If there's already targets at this stratum, remove them. */
991 /* FIXME: cagney/2003-10-15: I think this should be popping all
992 targets to CUR, and not just those at this stratum level. */
993 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
994 {
995 /* There's already something at this stratum level. Close it,
996 and un-hook it from the stack. */
997 struct target_ops *tmp = (*cur);
998
999 (*cur) = (*cur)->beneath;
1000 tmp->beneath = NULL;
1001 target_close (tmp);
1002 }
1003
1004 /* We have removed all targets in our stratum, now add the new one. */
1005 t->beneath = (*cur);
1006 (*cur) = t;
1007
1008 update_current_target ();
1009 }
1010
1011 /* Remove a target_ops vector from the stack, wherever it may be.
1012 Return how many times it was removed (0 or 1). */
1013
1014 int
1015 unpush_target (struct target_ops *t)
1016 {
1017 struct target_ops **cur;
1018 struct target_ops *tmp;
1019
1020 if (t->to_stratum == dummy_stratum)
1021 internal_error (__FILE__, __LINE__,
1022 _("Attempt to unpush the dummy target"));
1023
1024 /* Look for the specified target. Note that we assume that a target
1025 can only occur once in the target stack. */
1026
1027 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1028 {
1029 if ((*cur) == t)
1030 break;
1031 }
1032
1033 /* If we don't find target_ops, quit. Only open targets should be
1034 closed. */
1035 if ((*cur) == NULL)
1036 return 0;
1037
1038 /* Unchain the target. */
1039 tmp = (*cur);
1040 (*cur) = (*cur)->beneath;
1041 tmp->beneath = NULL;
1042
1043 update_current_target ();
1044
1045 /* Finally close the target. Note we do this after unchaining, so
1046 any target method calls from within the target_close
1047 implementation don't end up in T anymore. */
1048 target_close (t);
1049
1050 return 1;
1051 }
1052
1053 void
1054 pop_all_targets_above (enum strata above_stratum)
1055 {
1056 while ((int) (current_target.to_stratum) > (int) above_stratum)
1057 {
1058 if (!unpush_target (target_stack))
1059 {
1060 fprintf_unfiltered (gdb_stderr,
1061 "pop_all_targets couldn't find target %s\n",
1062 target_stack->to_shortname);
1063 internal_error (__FILE__, __LINE__,
1064 _("failed internal consistency check"));
1065 break;
1066 }
1067 }
1068 }
1069
1070 void
1071 pop_all_targets (void)
1072 {
1073 pop_all_targets_above (dummy_stratum);
1074 }
1075
1076 /* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1077
1078 int
1079 target_is_pushed (struct target_ops *t)
1080 {
1081 struct target_ops **cur;
1082
1083 /* Check magic number. If wrong, it probably means someone changed
1084 the struct definition, but not all the places that initialize one. */
1085 if (t->to_magic != OPS_MAGIC)
1086 {
1087 fprintf_unfiltered (gdb_stderr,
1088 "Magic number of %s target struct wrong\n",
1089 t->to_shortname);
1090 internal_error (__FILE__, __LINE__,
1091 _("failed internal consistency check"));
1092 }
1093
1094 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1095 if (*cur == t)
1096 return 1;
1097
1098 return 0;
1099 }
1100
1101 /* Using the objfile specified in OBJFILE, find the address for the
1102 current thread's thread-local storage with offset OFFSET. */
1103 CORE_ADDR
1104 target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1105 {
1106 volatile CORE_ADDR addr = 0;
1107 struct target_ops *target;
1108
1109 for (target = current_target.beneath;
1110 target != NULL;
1111 target = target->beneath)
1112 {
1113 if (target->to_get_thread_local_address != NULL)
1114 break;
1115 }
1116
1117 if (target != NULL
1118 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
1119 {
1120 ptid_t ptid = inferior_ptid;
1121 volatile struct gdb_exception ex;
1122
1123 TRY_CATCH (ex, RETURN_MASK_ALL)
1124 {
1125 CORE_ADDR lm_addr;
1126
1127 /* Fetch the load module address for this objfile. */
1128 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
1129 objfile);
1130 /* If it's 0, throw the appropriate exception. */
1131 if (lm_addr == 0)
1132 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1133 _("TLS load module not found"));
1134
1135 addr = target->to_get_thread_local_address (target, ptid,
1136 lm_addr, offset);
1137 }
1138 /* If an error occurred, print TLS related messages here. Otherwise,
1139 throw the error to some higher catcher. */
1140 if (ex.reason < 0)
1141 {
1142 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1143
1144 switch (ex.error)
1145 {
1146 case TLS_NO_LIBRARY_SUPPORT_ERROR:
1147 error (_("Cannot find thread-local variables "
1148 "in this thread library."));
1149 break;
1150 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1151 if (objfile_is_library)
1152 error (_("Cannot find shared library `%s' in dynamic"
1153 " linker's load module list"), objfile_name (objfile));
1154 else
1155 error (_("Cannot find executable file `%s' in dynamic"
1156 " linker's load module list"), objfile_name (objfile));
1157 break;
1158 case TLS_NOT_ALLOCATED_YET_ERROR:
1159 if (objfile_is_library)
1160 error (_("The inferior has not yet allocated storage for"
1161 " thread-local variables in\n"
1162 "the shared library `%s'\n"
1163 "for %s"),
1164 objfile_name (objfile), target_pid_to_str (ptid));
1165 else
1166 error (_("The inferior has not yet allocated storage for"
1167 " thread-local variables in\n"
1168 "the executable `%s'\n"
1169 "for %s"),
1170 objfile_name (objfile), target_pid_to_str (ptid));
1171 break;
1172 case TLS_GENERIC_ERROR:
1173 if (objfile_is_library)
1174 error (_("Cannot find thread-local storage for %s, "
1175 "shared library %s:\n%s"),
1176 target_pid_to_str (ptid),
1177 objfile_name (objfile), ex.message);
1178 else
1179 error (_("Cannot find thread-local storage for %s, "
1180 "executable file %s:\n%s"),
1181 target_pid_to_str (ptid),
1182 objfile_name (objfile), ex.message);
1183 break;
1184 default:
1185 throw_exception (ex);
1186 break;
1187 }
1188 }
1189 }
1190 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1191 TLS is an ABI-specific thing. But we don't do that yet. */
1192 else
1193 error (_("Cannot find thread-local variables on this target"));
1194
1195 return addr;
1196 }
1197
1198 const char *
1199 target_xfer_status_to_string (enum target_xfer_status err)
1200 {
1201 #define CASE(X) case X: return #X
1202 switch (err)
1203 {
1204 CASE(TARGET_XFER_E_IO);
1205 CASE(TARGET_XFER_E_UNAVAILABLE);
1206 default:
1207 return "<unknown>";
1208 }
1209 #undef CASE
1210 };
1211
1212
1213 #undef MIN
1214 #define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1215
1216 /* target_read_string -- read a null terminated string, up to LEN bytes,
1217 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1218 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1219 is responsible for freeing it. Return the number of bytes successfully
1220 read. */
1221
1222 int
1223 target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1224 {
1225 int tlen, offset, i;
1226 gdb_byte buf[4];
1227 int errcode = 0;
1228 char *buffer;
1229 int buffer_allocated;
1230 char *bufptr;
1231 unsigned int nbytes_read = 0;
1232
1233 gdb_assert (string);
1234
1235 /* Small for testing. */
1236 buffer_allocated = 4;
1237 buffer = xmalloc (buffer_allocated);
1238 bufptr = buffer;
1239
1240 while (len > 0)
1241 {
1242 tlen = MIN (len, 4 - (memaddr & 3));
1243 offset = memaddr & 3;
1244
1245 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1246 if (errcode != 0)
1247 {
1248 /* The transfer request might have crossed the boundary to an
1249 unallocated region of memory. Retry the transfer, requesting
1250 a single byte. */
1251 tlen = 1;
1252 offset = 0;
1253 errcode = target_read_memory (memaddr, buf, 1);
1254 if (errcode != 0)
1255 goto done;
1256 }
1257
1258 if (bufptr - buffer + tlen > buffer_allocated)
1259 {
1260 unsigned int bytes;
1261
1262 bytes = bufptr - buffer;
1263 buffer_allocated *= 2;
1264 buffer = xrealloc (buffer, buffer_allocated);
1265 bufptr = buffer + bytes;
1266 }
1267
1268 for (i = 0; i < tlen; i++)
1269 {
1270 *bufptr++ = buf[i + offset];
1271 if (buf[i + offset] == '\000')
1272 {
1273 nbytes_read += i + 1;
1274 goto done;
1275 }
1276 }
1277
1278 memaddr += tlen;
1279 len -= tlen;
1280 nbytes_read += tlen;
1281 }
1282 done:
1283 *string = buffer;
1284 if (errnop != NULL)
1285 *errnop = errcode;
1286 return nbytes_read;
1287 }
1288
1289 struct target_section_table *
1290 target_get_section_table (struct target_ops *target)
1291 {
1292 struct target_ops *t;
1293
1294 if (targetdebug)
1295 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1296
1297 for (t = target; t != NULL; t = t->beneath)
1298 if (t->to_get_section_table != NULL)
1299 return (*t->to_get_section_table) (t);
1300
1301 return NULL;
1302 }
1303
1304 /* Find a section containing ADDR. */
1305
1306 struct target_section *
1307 target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1308 {
1309 struct target_section_table *table = target_get_section_table (target);
1310 struct target_section *secp;
1311
1312 if (table == NULL)
1313 return NULL;
1314
1315 for (secp = table->sections; secp < table->sections_end; secp++)
1316 {
1317 if (addr >= secp->addr && addr < secp->endaddr)
1318 return secp;
1319 }
1320 return NULL;
1321 }
1322
1323 /* Read memory from the live target, even if currently inspecting a
1324 traceframe. The return is the same as that of target_read. */
1325
1326 static enum target_xfer_status
1327 target_read_live_memory (enum target_object object,
1328 ULONGEST memaddr, gdb_byte *myaddr, ULONGEST len,
1329 ULONGEST *xfered_len)
1330 {
1331 enum target_xfer_status ret;
1332 struct cleanup *cleanup;
1333
1334 /* Switch momentarily out of tfind mode so to access live memory.
1335 Note that this must not clear global state, such as the frame
1336 cache, which must still remain valid for the previous traceframe.
1337 We may be _building_ the frame cache at this point. */
1338 cleanup = make_cleanup_restore_traceframe_number ();
1339 set_traceframe_number (-1);
1340
1341 ret = target_xfer_partial (current_target.beneath, object, NULL,
1342 myaddr, NULL, memaddr, len, xfered_len);
1343
1344 do_cleanups (cleanup);
1345 return ret;
1346 }
1347
1348 /* Using the set of read-only target sections of OPS, read live
1349 read-only memory. Note that the actual reads start from the
1350 top-most target again.
1351
1352 For interface/parameters/return description see target.h,
1353 to_xfer_partial. */
1354
1355 static enum target_xfer_status
1356 memory_xfer_live_readonly_partial (struct target_ops *ops,
1357 enum target_object object,
1358 gdb_byte *readbuf, ULONGEST memaddr,
1359 ULONGEST len, ULONGEST *xfered_len)
1360 {
1361 struct target_section *secp;
1362 struct target_section_table *table;
1363
1364 secp = target_section_by_addr (ops, memaddr);
1365 if (secp != NULL
1366 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1367 secp->the_bfd_section)
1368 & SEC_READONLY))
1369 {
1370 struct target_section *p;
1371 ULONGEST memend = memaddr + len;
1372
1373 table = target_get_section_table (ops);
1374
1375 for (p = table->sections; p < table->sections_end; p++)
1376 {
1377 if (memaddr >= p->addr)
1378 {
1379 if (memend <= p->endaddr)
1380 {
1381 /* Entire transfer is within this section. */
1382 return target_read_live_memory (object, memaddr,
1383 readbuf, len, xfered_len);
1384 }
1385 else if (memaddr >= p->endaddr)
1386 {
1387 /* This section ends before the transfer starts. */
1388 continue;
1389 }
1390 else
1391 {
1392 /* This section overlaps the transfer. Just do half. */
1393 len = p->endaddr - memaddr;
1394 return target_read_live_memory (object, memaddr,
1395 readbuf, len, xfered_len);
1396 }
1397 }
1398 }
1399 }
1400
1401 return TARGET_XFER_EOF;
1402 }
1403
1404 /* Read memory from more than one valid target. A core file, for
1405 instance, could have some of memory but delegate other bits to
1406 the target below it. So, we must manually try all targets. */
1407
1408 static enum target_xfer_status
1409 raw_memory_xfer_partial (struct target_ops *ops, gdb_byte *readbuf,
1410 const gdb_byte *writebuf, ULONGEST memaddr, LONGEST len,
1411 ULONGEST *xfered_len)
1412 {
1413 enum target_xfer_status res;
1414
1415 do
1416 {
1417 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1418 readbuf, writebuf, memaddr, len,
1419 xfered_len);
1420 if (res == TARGET_XFER_OK)
1421 break;
1422
1423 /* Stop if the target reports that the memory is not available. */
1424 if (res == TARGET_XFER_E_UNAVAILABLE)
1425 break;
1426
1427 /* We want to continue past core files to executables, but not
1428 past a running target's memory. */
1429 if (ops->to_has_all_memory (ops))
1430 break;
1431
1432 ops = ops->beneath;
1433 }
1434 while (ops != NULL);
1435
1436 return res;
1437 }
1438
1439 /* Perform a partial memory transfer.
1440 For docs see target.h, to_xfer_partial. */
1441
1442 static enum target_xfer_status
1443 memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1444 gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST memaddr,
1445 ULONGEST len, ULONGEST *xfered_len)
1446 {
1447 enum target_xfer_status res;
1448 int reg_len;
1449 struct mem_region *region;
1450 struct inferior *inf;
1451
1452 /* For accesses to unmapped overlay sections, read directly from
1453 files. Must do this first, as MEMADDR may need adjustment. */
1454 if (readbuf != NULL && overlay_debugging)
1455 {
1456 struct obj_section *section = find_pc_overlay (memaddr);
1457
1458 if (pc_in_unmapped_range (memaddr, section))
1459 {
1460 struct target_section_table *table
1461 = target_get_section_table (ops);
1462 const char *section_name = section->the_bfd_section->name;
1463
1464 memaddr = overlay_mapped_address (memaddr, section);
1465 return section_table_xfer_memory_partial (readbuf, writebuf,
1466 memaddr, len, xfered_len,
1467 table->sections,
1468 table->sections_end,
1469 section_name);
1470 }
1471 }
1472
1473 /* Try the executable files, if "trust-readonly-sections" is set. */
1474 if (readbuf != NULL && trust_readonly)
1475 {
1476 struct target_section *secp;
1477 struct target_section_table *table;
1478
1479 secp = target_section_by_addr (ops, memaddr);
1480 if (secp != NULL
1481 && (bfd_get_section_flags (secp->the_bfd_section->owner,
1482 secp->the_bfd_section)
1483 & SEC_READONLY))
1484 {
1485 table = target_get_section_table (ops);
1486 return section_table_xfer_memory_partial (readbuf, writebuf,
1487 memaddr, len, xfered_len,
1488 table->sections,
1489 table->sections_end,
1490 NULL);
1491 }
1492 }
1493
1494 /* If reading unavailable memory in the context of traceframes, and
1495 this address falls within a read-only section, fallback to
1496 reading from live memory. */
1497 if (readbuf != NULL && get_traceframe_number () != -1)
1498 {
1499 VEC(mem_range_s) *available;
1500
1501 /* If we fail to get the set of available memory, then the
1502 target does not support querying traceframe info, and so we
1503 attempt reading from the traceframe anyway (assuming the
1504 target implements the old QTro packet then). */
1505 if (traceframe_available_memory (&available, memaddr, len))
1506 {
1507 struct cleanup *old_chain;
1508
1509 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1510
1511 if (VEC_empty (mem_range_s, available)
1512 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1513 {
1514 /* Don't read into the traceframe's available
1515 memory. */
1516 if (!VEC_empty (mem_range_s, available))
1517 {
1518 LONGEST oldlen = len;
1519
1520 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1521 gdb_assert (len <= oldlen);
1522 }
1523
1524 do_cleanups (old_chain);
1525
1526 /* This goes through the topmost target again. */
1527 res = memory_xfer_live_readonly_partial (ops, object,
1528 readbuf, memaddr,
1529 len, xfered_len);
1530 if (res == TARGET_XFER_OK)
1531 return TARGET_XFER_OK;
1532 else
1533 {
1534 /* No use trying further, we know some memory starting
1535 at MEMADDR isn't available. */
1536 *xfered_len = len;
1537 return TARGET_XFER_E_UNAVAILABLE;
1538 }
1539 }
1540
1541 /* Don't try to read more than how much is available, in
1542 case the target implements the deprecated QTro packet to
1543 cater for older GDBs (the target's knowledge of read-only
1544 sections may be outdated by now). */
1545 len = VEC_index (mem_range_s, available, 0)->length;
1546
1547 do_cleanups (old_chain);
1548 }
1549 }
1550
1551 /* Try GDB's internal data cache. */
1552 region = lookup_mem_region (memaddr);
1553 /* region->hi == 0 means there's no upper bound. */
1554 if (memaddr + len < region->hi || region->hi == 0)
1555 reg_len = len;
1556 else
1557 reg_len = region->hi - memaddr;
1558
1559 switch (region->attrib.mode)
1560 {
1561 case MEM_RO:
1562 if (writebuf != NULL)
1563 return TARGET_XFER_E_IO;
1564 break;
1565
1566 case MEM_WO:
1567 if (readbuf != NULL)
1568 return TARGET_XFER_E_IO;
1569 break;
1570
1571 case MEM_FLASH:
1572 /* We only support writing to flash during "load" for now. */
1573 if (writebuf != NULL)
1574 error (_("Writing to flash memory forbidden in this context"));
1575 break;
1576
1577 case MEM_NONE:
1578 return TARGET_XFER_E_IO;
1579 }
1580
1581 if (!ptid_equal (inferior_ptid, null_ptid))
1582 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1583 else
1584 inf = NULL;
1585
1586 if (inf != NULL
1587 /* The dcache reads whole cache lines; that doesn't play well
1588 with reading from a trace buffer, because reading outside of
1589 the collected memory range fails. */
1590 && get_traceframe_number () == -1
1591 && (region->attrib.cache
1592 || (stack_cache_enabled_p () && object == TARGET_OBJECT_STACK_MEMORY)
1593 || (code_cache_enabled_p () && object == TARGET_OBJECT_CODE_MEMORY)))
1594 {
1595 DCACHE *dcache = target_dcache_get_or_init ();
1596 int l;
1597
1598 if (readbuf != NULL)
1599 l = dcache_xfer_memory (ops, dcache, memaddr, readbuf, reg_len, 0);
1600 else
1601 /* FIXME drow/2006-08-09: If we're going to preserve const
1602 correctness dcache_xfer_memory should take readbuf and
1603 writebuf. */
1604 l = dcache_xfer_memory (ops, dcache, memaddr, (void *) writebuf,
1605 reg_len, 1);
1606 if (l <= 0)
1607 return TARGET_XFER_E_IO;
1608 else
1609 {
1610 *xfered_len = (ULONGEST) l;
1611 return TARGET_XFER_OK;
1612 }
1613 }
1614
1615 /* If none of those methods found the memory we wanted, fall back
1616 to a target partial transfer. Normally a single call to
1617 to_xfer_partial is enough; if it doesn't recognize an object
1618 it will call the to_xfer_partial of the next target down.
1619 But for memory this won't do. Memory is the only target
1620 object which can be read from more than one valid target.
1621 A core file, for instance, could have some of memory but
1622 delegate other bits to the target below it. So, we must
1623 manually try all targets. */
1624
1625 res = raw_memory_xfer_partial (ops, readbuf, writebuf, memaddr, reg_len,
1626 xfered_len);
1627
1628 /* Make sure the cache gets updated no matter what - if we are writing
1629 to the stack. Even if this write is not tagged as such, we still need
1630 to update the cache. */
1631
1632 if (res == TARGET_XFER_OK
1633 && inf != NULL
1634 && writebuf != NULL
1635 && target_dcache_init_p ()
1636 && !region->attrib.cache
1637 && ((stack_cache_enabled_p () && object != TARGET_OBJECT_STACK_MEMORY)
1638 || (code_cache_enabled_p () && object != TARGET_OBJECT_CODE_MEMORY)))
1639 {
1640 DCACHE *dcache = target_dcache_get ();
1641
1642 dcache_update (dcache, memaddr, (void *) writebuf, reg_len);
1643 }
1644
1645 /* If we still haven't got anything, return the last error. We
1646 give up. */
1647 return res;
1648 }
1649
1650 /* Perform a partial memory transfer. For docs see target.h,
1651 to_xfer_partial. */
1652
1653 static enum target_xfer_status
1654 memory_xfer_partial (struct target_ops *ops, enum target_object object,
1655 gdb_byte *readbuf, const gdb_byte *writebuf,
1656 ULONGEST memaddr, ULONGEST len, ULONGEST *xfered_len)
1657 {
1658 enum target_xfer_status res;
1659
1660 /* Zero length requests are ok and require no work. */
1661 if (len == 0)
1662 return TARGET_XFER_EOF;
1663
1664 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1665 breakpoint insns, thus hiding out from higher layers whether
1666 there are software breakpoints inserted in the code stream. */
1667 if (readbuf != NULL)
1668 {
1669 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len,
1670 xfered_len);
1671
1672 if (res == TARGET_XFER_OK && !show_memory_breakpoints)
1673 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1674 }
1675 else
1676 {
1677 void *buf;
1678 struct cleanup *old_chain;
1679
1680 /* A large write request is likely to be partially satisfied
1681 by memory_xfer_partial_1. We will continually malloc
1682 and free a copy of the entire write request for breakpoint
1683 shadow handling even though we only end up writing a small
1684 subset of it. Cap writes to 4KB to mitigate this. */
1685 len = min (4096, len);
1686
1687 buf = xmalloc (len);
1688 old_chain = make_cleanup (xfree, buf);
1689 memcpy (buf, writebuf, len);
1690
1691 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1692 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len,
1693 xfered_len);
1694
1695 do_cleanups (old_chain);
1696 }
1697
1698 return res;
1699 }
1700
1701 static void
1702 restore_show_memory_breakpoints (void *arg)
1703 {
1704 show_memory_breakpoints = (uintptr_t) arg;
1705 }
1706
1707 struct cleanup *
1708 make_show_memory_breakpoints_cleanup (int show)
1709 {
1710 int current = show_memory_breakpoints;
1711
1712 show_memory_breakpoints = show;
1713 return make_cleanup (restore_show_memory_breakpoints,
1714 (void *) (uintptr_t) current);
1715 }
1716
1717 /* For docs see target.h, to_xfer_partial. */
1718
1719 enum target_xfer_status
1720 target_xfer_partial (struct target_ops *ops,
1721 enum target_object object, const char *annex,
1722 gdb_byte *readbuf, const gdb_byte *writebuf,
1723 ULONGEST offset, ULONGEST len,
1724 ULONGEST *xfered_len)
1725 {
1726 enum target_xfer_status retval;
1727
1728 gdb_assert (ops->to_xfer_partial != NULL);
1729
1730 /* Transfer is done when LEN is zero. */
1731 if (len == 0)
1732 return TARGET_XFER_EOF;
1733
1734 if (writebuf && !may_write_memory)
1735 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1736 core_addr_to_string_nz (offset), plongest (len));
1737
1738 *xfered_len = 0;
1739
1740 /* If this is a memory transfer, let the memory-specific code
1741 have a look at it instead. Memory transfers are more
1742 complicated. */
1743 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY
1744 || object == TARGET_OBJECT_CODE_MEMORY)
1745 retval = memory_xfer_partial (ops, object, readbuf,
1746 writebuf, offset, len, xfered_len);
1747 else if (object == TARGET_OBJECT_RAW_MEMORY)
1748 {
1749 /* Request the normal memory object from other layers. */
1750 retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
1751 xfered_len);
1752 }
1753 else
1754 retval = ops->to_xfer_partial (ops, object, annex, readbuf,
1755 writebuf, offset, len, xfered_len);
1756
1757 if (targetdebug)
1758 {
1759 const unsigned char *myaddr = NULL;
1760
1761 fprintf_unfiltered (gdb_stdlog,
1762 "%s:target_xfer_partial "
1763 "(%d, %s, %s, %s, %s, %s) = %d, %s",
1764 ops->to_shortname,
1765 (int) object,
1766 (annex ? annex : "(null)"),
1767 host_address_to_string (readbuf),
1768 host_address_to_string (writebuf),
1769 core_addr_to_string_nz (offset),
1770 pulongest (len), retval,
1771 pulongest (*xfered_len));
1772
1773 if (readbuf)
1774 myaddr = readbuf;
1775 if (writebuf)
1776 myaddr = writebuf;
1777 if (retval == TARGET_XFER_OK && myaddr != NULL)
1778 {
1779 int i;
1780
1781 fputs_unfiltered (", bytes =", gdb_stdlog);
1782 for (i = 0; i < *xfered_len; i++)
1783 {
1784 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1785 {
1786 if (targetdebug < 2 && i > 0)
1787 {
1788 fprintf_unfiltered (gdb_stdlog, " ...");
1789 break;
1790 }
1791 fprintf_unfiltered (gdb_stdlog, "\n");
1792 }
1793
1794 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1795 }
1796 }
1797
1798 fputc_unfiltered ('\n', gdb_stdlog);
1799 }
1800
1801 /* Check implementations of to_xfer_partial update *XFERED_LEN
1802 properly. Do assertion after printing debug messages, so that we
1803 can find more clues on assertion failure from debugging messages. */
1804 if (retval == TARGET_XFER_OK || retval == TARGET_XFER_E_UNAVAILABLE)
1805 gdb_assert (*xfered_len > 0);
1806
1807 return retval;
1808 }
1809
1810 /* Read LEN bytes of target memory at address MEMADDR, placing the
1811 results in GDB's memory at MYADDR. Returns either 0 for success or
1812 TARGET_XFER_E_IO if any error occurs.
1813
1814 If an error occurs, no guarantee is made about the contents of the data at
1815 MYADDR. In particular, the caller should not depend upon partial reads
1816 filling the buffer with good data. There is no way for the caller to know
1817 how much good data might have been transfered anyway. Callers that can
1818 deal with partial reads should call target_read (which will retry until
1819 it makes no progress, and then return how much was transferred). */
1820
1821 int
1822 target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1823 {
1824 /* Dispatch to the topmost target, not the flattened current_target.
1825 Memory accesses check target->to_has_(all_)memory, and the
1826 flattened target doesn't inherit those. */
1827 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1828 myaddr, memaddr, len) == len)
1829 return 0;
1830 else
1831 return TARGET_XFER_E_IO;
1832 }
1833
1834 /* Like target_read_memory, but specify explicitly that this is a read
1835 from the target's raw memory. That is, this read bypasses the
1836 dcache, breakpoint shadowing, etc. */
1837
1838 int
1839 target_read_raw_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1840 {
1841 /* See comment in target_read_memory about why the request starts at
1842 current_target.beneath. */
1843 if (target_read (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1844 myaddr, memaddr, len) == len)
1845 return 0;
1846 else
1847 return TARGET_XFER_E_IO;
1848 }
1849
1850 /* Like target_read_memory, but specify explicitly that this is a read from
1851 the target's stack. This may trigger different cache behavior. */
1852
1853 int
1854 target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1855 {
1856 /* See comment in target_read_memory about why the request starts at
1857 current_target.beneath. */
1858 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1859 myaddr, memaddr, len) == len)
1860 return 0;
1861 else
1862 return TARGET_XFER_E_IO;
1863 }
1864
1865 /* Like target_read_memory, but specify explicitly that this is a read from
1866 the target's code. This may trigger different cache behavior. */
1867
1868 int
1869 target_read_code (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
1870 {
1871 /* See comment in target_read_memory about why the request starts at
1872 current_target.beneath. */
1873 if (target_read (current_target.beneath, TARGET_OBJECT_CODE_MEMORY, NULL,
1874 myaddr, memaddr, len) == len)
1875 return 0;
1876 else
1877 return TARGET_XFER_E_IO;
1878 }
1879
1880 /* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1881 Returns either 0 for success or TARGET_XFER_E_IO if any
1882 error occurs. If an error occurs, no guarantee is made about how
1883 much data got written. Callers that can deal with partial writes
1884 should call target_write. */
1885
1886 int
1887 target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1888 {
1889 /* See comment in target_read_memory about why the request starts at
1890 current_target.beneath. */
1891 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1892 myaddr, memaddr, len) == len)
1893 return 0;
1894 else
1895 return TARGET_XFER_E_IO;
1896 }
1897
1898 /* Write LEN bytes from MYADDR to target raw memory at address
1899 MEMADDR. Returns either 0 for success or TARGET_XFER_E_IO
1900 if any error occurs. If an error occurs, no guarantee is made
1901 about how much data got written. Callers that can deal with
1902 partial writes should call target_write. */
1903
1904 int
1905 target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
1906 {
1907 /* See comment in target_read_memory about why the request starts at
1908 current_target.beneath. */
1909 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1910 myaddr, memaddr, len) == len)
1911 return 0;
1912 else
1913 return TARGET_XFER_E_IO;
1914 }
1915
1916 /* Fetch the target's memory map. */
1917
1918 VEC(mem_region_s) *
1919 target_memory_map (void)
1920 {
1921 VEC(mem_region_s) *result;
1922 struct mem_region *last_one, *this_one;
1923 int ix;
1924 struct target_ops *t;
1925
1926 if (targetdebug)
1927 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1928
1929 for (t = current_target.beneath; t != NULL; t = t->beneath)
1930 if (t->to_memory_map != NULL)
1931 break;
1932
1933 if (t == NULL)
1934 return NULL;
1935
1936 result = t->to_memory_map (t);
1937 if (result == NULL)
1938 return NULL;
1939
1940 qsort (VEC_address (mem_region_s, result),
1941 VEC_length (mem_region_s, result),
1942 sizeof (struct mem_region), mem_region_cmp);
1943
1944 /* Check that regions do not overlap. Simultaneously assign
1945 a numbering for the "mem" commands to use to refer to
1946 each region. */
1947 last_one = NULL;
1948 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1949 {
1950 this_one->number = ix;
1951
1952 if (last_one && last_one->hi > this_one->lo)
1953 {
1954 warning (_("Overlapping regions in memory map: ignoring"));
1955 VEC_free (mem_region_s, result);
1956 return NULL;
1957 }
1958 last_one = this_one;
1959 }
1960
1961 return result;
1962 }
1963
1964 void
1965 target_flash_erase (ULONGEST address, LONGEST length)
1966 {
1967 struct target_ops *t;
1968
1969 for (t = current_target.beneath; t != NULL; t = t->beneath)
1970 if (t->to_flash_erase != NULL)
1971 {
1972 if (targetdebug)
1973 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1974 hex_string (address), phex (length, 0));
1975 t->to_flash_erase (t, address, length);
1976 return;
1977 }
1978
1979 tcomplain ();
1980 }
1981
1982 void
1983 target_flash_done (void)
1984 {
1985 struct target_ops *t;
1986
1987 for (t = current_target.beneath; t != NULL; t = t->beneath)
1988 if (t->to_flash_done != NULL)
1989 {
1990 if (targetdebug)
1991 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1992 t->to_flash_done (t);
1993 return;
1994 }
1995
1996 tcomplain ();
1997 }
1998
1999 static void
2000 show_trust_readonly (struct ui_file *file, int from_tty,
2001 struct cmd_list_element *c, const char *value)
2002 {
2003 fprintf_filtered (file,
2004 _("Mode for reading from readonly sections is %s.\n"),
2005 value);
2006 }
2007
2008 /* More generic transfers. */
2009
2010 static enum target_xfer_status
2011 default_xfer_partial (struct target_ops *ops, enum target_object object,
2012 const char *annex, gdb_byte *readbuf,
2013 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
2014 ULONGEST *xfered_len)
2015 {
2016 if (object == TARGET_OBJECT_MEMORY
2017 && ops->deprecated_xfer_memory != NULL)
2018 /* If available, fall back to the target's
2019 "deprecated_xfer_memory" method. */
2020 {
2021 int xfered = -1;
2022
2023 errno = 0;
2024 if (writebuf != NULL)
2025 {
2026 void *buffer = xmalloc (len);
2027 struct cleanup *cleanup = make_cleanup (xfree, buffer);
2028
2029 memcpy (buffer, writebuf, len);
2030 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
2031 1/*write*/, NULL, ops);
2032 do_cleanups (cleanup);
2033 }
2034 if (readbuf != NULL)
2035 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
2036 0/*read*/, NULL, ops);
2037 if (xfered > 0)
2038 {
2039 *xfered_len = (ULONGEST) xfered;
2040 return TARGET_XFER_E_IO;
2041 }
2042 else if (xfered == 0 && errno == 0)
2043 /* "deprecated_xfer_memory" uses 0, cross checked against
2044 ERRNO as one indication of an error. */
2045 return TARGET_XFER_EOF;
2046 else
2047 return TARGET_XFER_E_IO;
2048 }
2049 else
2050 {
2051 gdb_assert (ops->beneath != NULL);
2052 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
2053 readbuf, writebuf, offset, len,
2054 xfered_len);
2055 }
2056 }
2057
2058 /* Target vector read/write partial wrapper functions. */
2059
2060 static enum target_xfer_status
2061 target_read_partial (struct target_ops *ops,
2062 enum target_object object,
2063 const char *annex, gdb_byte *buf,
2064 ULONGEST offset, ULONGEST len,
2065 ULONGEST *xfered_len)
2066 {
2067 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len,
2068 xfered_len);
2069 }
2070
2071 static enum target_xfer_status
2072 target_write_partial (struct target_ops *ops,
2073 enum target_object object,
2074 const char *annex, const gdb_byte *buf,
2075 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
2076 {
2077 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len,
2078 xfered_len);
2079 }
2080
2081 /* Wrappers to perform the full transfer. */
2082
2083 /* For docs on target_read see target.h. */
2084
2085 LONGEST
2086 target_read (struct target_ops *ops,
2087 enum target_object object,
2088 const char *annex, gdb_byte *buf,
2089 ULONGEST offset, LONGEST len)
2090 {
2091 LONGEST xfered = 0;
2092
2093 while (xfered < len)
2094 {
2095 ULONGEST xfered_len;
2096 enum target_xfer_status status;
2097
2098 status = target_read_partial (ops, object, annex,
2099 (gdb_byte *) buf + xfered,
2100 offset + xfered, len - xfered,
2101 &xfered_len);
2102
2103 /* Call an observer, notifying them of the xfer progress? */
2104 if (status == TARGET_XFER_EOF)
2105 return xfered;
2106 else if (status == TARGET_XFER_OK)
2107 {
2108 xfered += xfered_len;
2109 QUIT;
2110 }
2111 else
2112 return -1;
2113
2114 }
2115 return len;
2116 }
2117
2118 /* Assuming that the entire [begin, end) range of memory cannot be
2119 read, try to read whatever subrange is possible to read.
2120
2121 The function returns, in RESULT, either zero or one memory block.
2122 If there's a readable subrange at the beginning, it is completely
2123 read and returned. Any further readable subrange will not be read.
2124 Otherwise, if there's a readable subrange at the end, it will be
2125 completely read and returned. Any readable subranges before it
2126 (obviously, not starting at the beginning), will be ignored. In
2127 other cases -- either no readable subrange, or readable subrange(s)
2128 that is neither at the beginning, or end, nothing is returned.
2129
2130 The purpose of this function is to handle a read across a boundary
2131 of accessible memory in a case when memory map is not available.
2132 The above restrictions are fine for this case, but will give
2133 incorrect results if the memory is 'patchy'. However, supporting
2134 'patchy' memory would require trying to read every single byte,
2135 and it seems unacceptable solution. Explicit memory map is
2136 recommended for this case -- and target_read_memory_robust will
2137 take care of reading multiple ranges then. */
2138
2139 static void
2140 read_whatever_is_readable (struct target_ops *ops,
2141 ULONGEST begin, ULONGEST end,
2142 VEC(memory_read_result_s) **result)
2143 {
2144 gdb_byte *buf = xmalloc (end - begin);
2145 ULONGEST current_begin = begin;
2146 ULONGEST current_end = end;
2147 int forward;
2148 memory_read_result_s r;
2149 ULONGEST xfered_len;
2150
2151 /* If we previously failed to read 1 byte, nothing can be done here. */
2152 if (end - begin <= 1)
2153 {
2154 xfree (buf);
2155 return;
2156 }
2157
2158 /* Check that either first or the last byte is readable, and give up
2159 if not. This heuristic is meant to permit reading accessible memory
2160 at the boundary of accessible region. */
2161 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2162 buf, begin, 1, &xfered_len) == TARGET_XFER_OK)
2163 {
2164 forward = 1;
2165 ++current_begin;
2166 }
2167 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2168 buf + (end-begin) - 1, end - 1, 1,
2169 &xfered_len) == TARGET_XFER_OK)
2170 {
2171 forward = 0;
2172 --current_end;
2173 }
2174 else
2175 {
2176 xfree (buf);
2177 return;
2178 }
2179
2180 /* Loop invariant is that the [current_begin, current_end) was previously
2181 found to be not readable as a whole.
2182
2183 Note loop condition -- if the range has 1 byte, we can't divide the range
2184 so there's no point trying further. */
2185 while (current_end - current_begin > 1)
2186 {
2187 ULONGEST first_half_begin, first_half_end;
2188 ULONGEST second_half_begin, second_half_end;
2189 LONGEST xfer;
2190 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2191
2192 if (forward)
2193 {
2194 first_half_begin = current_begin;
2195 first_half_end = middle;
2196 second_half_begin = middle;
2197 second_half_end = current_end;
2198 }
2199 else
2200 {
2201 first_half_begin = middle;
2202 first_half_end = current_end;
2203 second_half_begin = current_begin;
2204 second_half_end = middle;
2205 }
2206
2207 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2208 buf + (first_half_begin - begin),
2209 first_half_begin,
2210 first_half_end - first_half_begin);
2211
2212 if (xfer == first_half_end - first_half_begin)
2213 {
2214 /* This half reads up fine. So, the error must be in the
2215 other half. */
2216 current_begin = second_half_begin;
2217 current_end = second_half_end;
2218 }
2219 else
2220 {
2221 /* This half is not readable. Because we've tried one byte, we
2222 know some part of this half if actually redable. Go to the next
2223 iteration to divide again and try to read.
2224
2225 We don't handle the other half, because this function only tries
2226 to read a single readable subrange. */
2227 current_begin = first_half_begin;
2228 current_end = first_half_end;
2229 }
2230 }
2231
2232 if (forward)
2233 {
2234 /* The [begin, current_begin) range has been read. */
2235 r.begin = begin;
2236 r.end = current_begin;
2237 r.data = buf;
2238 }
2239 else
2240 {
2241 /* The [current_end, end) range has been read. */
2242 LONGEST rlen = end - current_end;
2243
2244 r.data = xmalloc (rlen);
2245 memcpy (r.data, buf + current_end - begin, rlen);
2246 r.begin = current_end;
2247 r.end = end;
2248 xfree (buf);
2249 }
2250 VEC_safe_push(memory_read_result_s, (*result), &r);
2251 }
2252
2253 void
2254 free_memory_read_result_vector (void *x)
2255 {
2256 VEC(memory_read_result_s) *v = x;
2257 memory_read_result_s *current;
2258 int ix;
2259
2260 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2261 {
2262 xfree (current->data);
2263 }
2264 VEC_free (memory_read_result_s, v);
2265 }
2266
2267 VEC(memory_read_result_s) *
2268 read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2269 {
2270 VEC(memory_read_result_s) *result = 0;
2271
2272 LONGEST xfered = 0;
2273 while (xfered < len)
2274 {
2275 struct mem_region *region = lookup_mem_region (offset + xfered);
2276 LONGEST rlen;
2277
2278 /* If there is no explicit region, a fake one should be created. */
2279 gdb_assert (region);
2280
2281 if (region->hi == 0)
2282 rlen = len - xfered;
2283 else
2284 rlen = region->hi - offset;
2285
2286 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
2287 {
2288 /* Cannot read this region. Note that we can end up here only
2289 if the region is explicitly marked inaccessible, or
2290 'inaccessible-by-default' is in effect. */
2291 xfered += rlen;
2292 }
2293 else
2294 {
2295 LONGEST to_read = min (len - xfered, rlen);
2296 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2297
2298 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2299 (gdb_byte *) buffer,
2300 offset + xfered, to_read);
2301 /* Call an observer, notifying them of the xfer progress? */
2302 if (xfer <= 0)
2303 {
2304 /* Got an error reading full chunk. See if maybe we can read
2305 some subrange. */
2306 xfree (buffer);
2307 read_whatever_is_readable (ops, offset + xfered,
2308 offset + xfered + to_read, &result);
2309 xfered += to_read;
2310 }
2311 else
2312 {
2313 struct memory_read_result r;
2314 r.data = buffer;
2315 r.begin = offset + xfered;
2316 r.end = r.begin + xfer;
2317 VEC_safe_push (memory_read_result_s, result, &r);
2318 xfered += xfer;
2319 }
2320 QUIT;
2321 }
2322 }
2323 return result;
2324 }
2325
2326
2327 /* An alternative to target_write with progress callbacks. */
2328
2329 LONGEST
2330 target_write_with_progress (struct target_ops *ops,
2331 enum target_object object,
2332 const char *annex, const gdb_byte *buf,
2333 ULONGEST offset, LONGEST len,
2334 void (*progress) (ULONGEST, void *), void *baton)
2335 {
2336 LONGEST xfered = 0;
2337
2338 /* Give the progress callback a chance to set up. */
2339 if (progress)
2340 (*progress) (0, baton);
2341
2342 while (xfered < len)
2343 {
2344 ULONGEST xfered_len;
2345 enum target_xfer_status status;
2346
2347 status = target_write_partial (ops, object, annex,
2348 (gdb_byte *) buf + xfered,
2349 offset + xfered, len - xfered,
2350 &xfered_len);
2351
2352 if (status == TARGET_XFER_EOF)
2353 return xfered;
2354 if (TARGET_XFER_STATUS_ERROR_P (status))
2355 return -1;
2356
2357 gdb_assert (status == TARGET_XFER_OK);
2358 if (progress)
2359 (*progress) (xfered_len, baton);
2360
2361 xfered += xfered_len;
2362 QUIT;
2363 }
2364 return len;
2365 }
2366
2367 /* For docs on target_write see target.h. */
2368
2369 LONGEST
2370 target_write (struct target_ops *ops,
2371 enum target_object object,
2372 const char *annex, const gdb_byte *buf,
2373 ULONGEST offset, LONGEST len)
2374 {
2375 return target_write_with_progress (ops, object, annex, buf, offset, len,
2376 NULL, NULL);
2377 }
2378
2379 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2380 the size of the transferred data. PADDING additional bytes are
2381 available in *BUF_P. This is a helper function for
2382 target_read_alloc; see the declaration of that function for more
2383 information. */
2384
2385 static LONGEST
2386 target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2387 const char *annex, gdb_byte **buf_p, int padding)
2388 {
2389 size_t buf_alloc, buf_pos;
2390 gdb_byte *buf;
2391
2392 /* This function does not have a length parameter; it reads the
2393 entire OBJECT). Also, it doesn't support objects fetched partly
2394 from one target and partly from another (in a different stratum,
2395 e.g. a core file and an executable). Both reasons make it
2396 unsuitable for reading memory. */
2397 gdb_assert (object != TARGET_OBJECT_MEMORY);
2398
2399 /* Start by reading up to 4K at a time. The target will throttle
2400 this number down if necessary. */
2401 buf_alloc = 4096;
2402 buf = xmalloc (buf_alloc);
2403 buf_pos = 0;
2404 while (1)
2405 {
2406 ULONGEST xfered_len;
2407 enum target_xfer_status status;
2408
2409 status = target_read_partial (ops, object, annex, &buf[buf_pos],
2410 buf_pos, buf_alloc - buf_pos - padding,
2411 &xfered_len);
2412
2413 if (status == TARGET_XFER_EOF)
2414 {
2415 /* Read all there was. */
2416 if (buf_pos == 0)
2417 xfree (buf);
2418 else
2419 *buf_p = buf;
2420 return buf_pos;
2421 }
2422 else if (status != TARGET_XFER_OK)
2423 {
2424 /* An error occurred. */
2425 xfree (buf);
2426 return TARGET_XFER_E_IO;
2427 }
2428
2429 buf_pos += xfered_len;
2430
2431 /* If the buffer is filling up, expand it. */
2432 if (buf_alloc < buf_pos * 2)
2433 {
2434 buf_alloc *= 2;
2435 buf = xrealloc (buf, buf_alloc);
2436 }
2437
2438 QUIT;
2439 }
2440 }
2441
2442 /* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2443 the size of the transferred data. See the declaration in "target.h"
2444 function for more information about the return value. */
2445
2446 LONGEST
2447 target_read_alloc (struct target_ops *ops, enum target_object object,
2448 const char *annex, gdb_byte **buf_p)
2449 {
2450 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2451 }
2452
2453 /* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2454 returned as a string, allocated using xmalloc. If an error occurs
2455 or the transfer is unsupported, NULL is returned. Empty objects
2456 are returned as allocated but empty strings. A warning is issued
2457 if the result contains any embedded NUL bytes. */
2458
2459 char *
2460 target_read_stralloc (struct target_ops *ops, enum target_object object,
2461 const char *annex)
2462 {
2463 gdb_byte *buffer;
2464 char *bufstr;
2465 LONGEST i, transferred;
2466
2467 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
2468 bufstr = (char *) buffer;
2469
2470 if (transferred < 0)
2471 return NULL;
2472
2473 if (transferred == 0)
2474 return xstrdup ("");
2475
2476 bufstr[transferred] = 0;
2477
2478 /* Check for embedded NUL bytes; but allow trailing NULs. */
2479 for (i = strlen (bufstr); i < transferred; i++)
2480 if (bufstr[i] != 0)
2481 {
2482 warning (_("target object %d, annex %s, "
2483 "contained unexpected null characters"),
2484 (int) object, annex ? annex : "(none)");
2485 break;
2486 }
2487
2488 return bufstr;
2489 }
2490
2491 /* Memory transfer methods. */
2492
2493 void
2494 get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2495 LONGEST len)
2496 {
2497 /* This method is used to read from an alternate, non-current
2498 target. This read must bypass the overlay support (as symbols
2499 don't match this target), and GDB's internal cache (wrong cache
2500 for this target). */
2501 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2502 != len)
2503 memory_error (TARGET_XFER_E_IO, addr);
2504 }
2505
2506 ULONGEST
2507 get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2508 int len, enum bfd_endian byte_order)
2509 {
2510 gdb_byte buf[sizeof (ULONGEST)];
2511
2512 gdb_assert (len <= sizeof (buf));
2513 get_target_memory (ops, addr, buf, len);
2514 return extract_unsigned_integer (buf, len, byte_order);
2515 }
2516
2517 /* See target.h. */
2518
2519 int
2520 target_insert_breakpoint (struct gdbarch *gdbarch,
2521 struct bp_target_info *bp_tgt)
2522 {
2523 if (!may_insert_breakpoints)
2524 {
2525 warning (_("May not insert breakpoints"));
2526 return 1;
2527 }
2528
2529 return current_target.to_insert_breakpoint (&current_target,
2530 gdbarch, bp_tgt);
2531 }
2532
2533 /* See target.h. */
2534
2535 int
2536 target_remove_breakpoint (struct gdbarch *gdbarch,
2537 struct bp_target_info *bp_tgt)
2538 {
2539 /* This is kind of a weird case to handle, but the permission might
2540 have been changed after breakpoints were inserted - in which case
2541 we should just take the user literally and assume that any
2542 breakpoints should be left in place. */
2543 if (!may_insert_breakpoints)
2544 {
2545 warning (_("May not remove breakpoints"));
2546 return 1;
2547 }
2548
2549 return current_target.to_remove_breakpoint (&current_target,
2550 gdbarch, bp_tgt);
2551 }
2552
2553 static void
2554 target_info (char *args, int from_tty)
2555 {
2556 struct target_ops *t;
2557 int has_all_mem = 0;
2558
2559 if (symfile_objfile != NULL)
2560 printf_unfiltered (_("Symbols from \"%s\".\n"),
2561 objfile_name (symfile_objfile));
2562
2563 for (t = target_stack; t != NULL; t = t->beneath)
2564 {
2565 if (!(*t->to_has_memory) (t))
2566 continue;
2567
2568 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2569 continue;
2570 if (has_all_mem)
2571 printf_unfiltered (_("\tWhile running this, "
2572 "GDB does not access memory from...\n"));
2573 printf_unfiltered ("%s:\n", t->to_longname);
2574 (t->to_files_info) (t);
2575 has_all_mem = (*t->to_has_all_memory) (t);
2576 }
2577 }
2578
2579 /* This function is called before any new inferior is created, e.g.
2580 by running a program, attaching, or connecting to a target.
2581 It cleans up any state from previous invocations which might
2582 change between runs. This is a subset of what target_preopen
2583 resets (things which might change between targets). */
2584
2585 void
2586 target_pre_inferior (int from_tty)
2587 {
2588 /* Clear out solib state. Otherwise the solib state of the previous
2589 inferior might have survived and is entirely wrong for the new
2590 target. This has been observed on GNU/Linux using glibc 2.3. How
2591 to reproduce:
2592
2593 bash$ ./foo&
2594 [1] 4711
2595 bash$ ./foo&
2596 [1] 4712
2597 bash$ gdb ./foo
2598 [...]
2599 (gdb) attach 4711
2600 (gdb) detach
2601 (gdb) attach 4712
2602 Cannot access memory at address 0xdeadbeef
2603 */
2604
2605 /* In some OSs, the shared library list is the same/global/shared
2606 across inferiors. If code is shared between processes, so are
2607 memory regions and features. */
2608 if (!gdbarch_has_global_solist (target_gdbarch ()))
2609 {
2610 no_shared_libraries (NULL, from_tty);
2611
2612 invalidate_target_mem_regions ();
2613
2614 target_clear_description ();
2615 }
2616
2617 agent_capability_invalidate ();
2618 }
2619
2620 /* Callback for iterate_over_inferiors. Gets rid of the given
2621 inferior. */
2622
2623 static int
2624 dispose_inferior (struct inferior *inf, void *args)
2625 {
2626 struct thread_info *thread;
2627
2628 thread = any_thread_of_process (inf->pid);
2629 if (thread)
2630 {
2631 switch_to_thread (thread->ptid);
2632
2633 /* Core inferiors actually should be detached, not killed. */
2634 if (target_has_execution)
2635 target_kill ();
2636 else
2637 target_detach (NULL, 0);
2638 }
2639
2640 return 0;
2641 }
2642
2643 /* This is to be called by the open routine before it does
2644 anything. */
2645
2646 void
2647 target_preopen (int from_tty)
2648 {
2649 dont_repeat ();
2650
2651 if (have_inferiors ())
2652 {
2653 if (!from_tty
2654 || !have_live_inferiors ()
2655 || query (_("A program is being debugged already. Kill it? ")))
2656 iterate_over_inferiors (dispose_inferior, NULL);
2657 else
2658 error (_("Program not killed."));
2659 }
2660
2661 /* Calling target_kill may remove the target from the stack. But if
2662 it doesn't (which seems like a win for UDI), remove it now. */
2663 /* Leave the exec target, though. The user may be switching from a
2664 live process to a core of the same program. */
2665 pop_all_targets_above (file_stratum);
2666
2667 target_pre_inferior (from_tty);
2668 }
2669
2670 /* Detach a target after doing deferred register stores. */
2671
2672 void
2673 target_detach (const char *args, int from_tty)
2674 {
2675 struct target_ops* t;
2676
2677 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
2678 /* Don't remove global breakpoints here. They're removed on
2679 disconnection from the target. */
2680 ;
2681 else
2682 /* If we're in breakpoints-always-inserted mode, have to remove
2683 them before detaching. */
2684 remove_breakpoints_pid (ptid_get_pid (inferior_ptid));
2685
2686 prepare_for_detach ();
2687
2688 for (t = current_target.beneath; t != NULL; t = t->beneath)
2689 {
2690 if (t->to_detach != NULL)
2691 {
2692 t->to_detach (t, args, from_tty);
2693 if (targetdebug)
2694 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2695 args, from_tty);
2696 return;
2697 }
2698 }
2699
2700 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
2701 }
2702
2703 void
2704 target_disconnect (char *args, int from_tty)
2705 {
2706 struct target_ops *t;
2707
2708 /* If we're in breakpoints-always-inserted mode or if breakpoints
2709 are global across processes, we have to remove them before
2710 disconnecting. */
2711 remove_breakpoints ();
2712
2713 for (t = current_target.beneath; t != NULL; t = t->beneath)
2714 if (t->to_disconnect != NULL)
2715 {
2716 if (targetdebug)
2717 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2718 args, from_tty);
2719 t->to_disconnect (t, args, from_tty);
2720 return;
2721 }
2722
2723 tcomplain ();
2724 }
2725
2726 ptid_t
2727 target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2728 {
2729 struct target_ops *t;
2730 ptid_t retval = (current_target.to_wait) (&current_target, ptid,
2731 status, options);
2732
2733 if (targetdebug)
2734 {
2735 char *status_string;
2736 char *options_string;
2737
2738 status_string = target_waitstatus_to_string (status);
2739 options_string = target_options_to_string (options);
2740 fprintf_unfiltered (gdb_stdlog,
2741 "target_wait (%d, status, options={%s})"
2742 " = %d, %s\n",
2743 ptid_get_pid (ptid), options_string,
2744 ptid_get_pid (retval), status_string);
2745 xfree (status_string);
2746 xfree (options_string);
2747 }
2748
2749 return retval;
2750 }
2751
2752 char *
2753 target_pid_to_str (ptid_t ptid)
2754 {
2755 struct target_ops *t;
2756
2757 for (t = current_target.beneath; t != NULL; t = t->beneath)
2758 {
2759 if (t->to_pid_to_str != NULL)
2760 return (*t->to_pid_to_str) (t, ptid);
2761 }
2762
2763 return normal_pid_to_str (ptid);
2764 }
2765
2766 char *
2767 target_thread_name (struct thread_info *info)
2768 {
2769 struct target_ops *t;
2770
2771 for (t = current_target.beneath; t != NULL; t = t->beneath)
2772 {
2773 if (t->to_thread_name != NULL)
2774 return (*t->to_thread_name) (t, info);
2775 }
2776
2777 return NULL;
2778 }
2779
2780 void
2781 target_resume (ptid_t ptid, int step, enum gdb_signal signal)
2782 {
2783 struct target_ops *t;
2784
2785 target_dcache_invalidate ();
2786
2787 current_target.to_resume (&current_target, ptid, step, signal);
2788 if (targetdebug)
2789 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2790 ptid_get_pid (ptid),
2791 step ? "step" : "continue",
2792 gdb_signal_to_name (signal));
2793
2794 registers_changed_ptid (ptid);
2795 set_executing (ptid, 1);
2796 set_running (ptid, 1);
2797 clear_inline_frame_state (ptid);
2798 }
2799
2800 void
2801 target_pass_signals (int numsigs, unsigned char *pass_signals)
2802 {
2803 struct target_ops *t;
2804
2805 for (t = current_target.beneath; t != NULL; t = t->beneath)
2806 {
2807 if (t->to_pass_signals != NULL)
2808 {
2809 if (targetdebug)
2810 {
2811 int i;
2812
2813 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2814 numsigs);
2815
2816 for (i = 0; i < numsigs; i++)
2817 if (pass_signals[i])
2818 fprintf_unfiltered (gdb_stdlog, " %s",
2819 gdb_signal_to_name (i));
2820
2821 fprintf_unfiltered (gdb_stdlog, " })\n");
2822 }
2823
2824 (*t->to_pass_signals) (t, numsigs, pass_signals);
2825 return;
2826 }
2827 }
2828 }
2829
2830 void
2831 target_program_signals (int numsigs, unsigned char *program_signals)
2832 {
2833 struct target_ops *t;
2834
2835 for (t = current_target.beneath; t != NULL; t = t->beneath)
2836 {
2837 if (t->to_program_signals != NULL)
2838 {
2839 if (targetdebug)
2840 {
2841 int i;
2842
2843 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2844 numsigs);
2845
2846 for (i = 0; i < numsigs; i++)
2847 if (program_signals[i])
2848 fprintf_unfiltered (gdb_stdlog, " %s",
2849 gdb_signal_to_name (i));
2850
2851 fprintf_unfiltered (gdb_stdlog, " })\n");
2852 }
2853
2854 (*t->to_program_signals) (t, numsigs, program_signals);
2855 return;
2856 }
2857 }
2858 }
2859
2860 /* Look through the list of possible targets for a target that can
2861 follow forks. */
2862
2863 int
2864 target_follow_fork (int follow_child, int detach_fork)
2865 {
2866 struct target_ops *t;
2867
2868 for (t = current_target.beneath; t != NULL; t = t->beneath)
2869 {
2870 if (t->to_follow_fork != NULL)
2871 {
2872 int retval = t->to_follow_fork (t, follow_child, detach_fork);
2873
2874 if (targetdebug)
2875 fprintf_unfiltered (gdb_stdlog,
2876 "target_follow_fork (%d, %d) = %d\n",
2877 follow_child, detach_fork, retval);
2878 return retval;
2879 }
2880 }
2881
2882 /* Some target returned a fork event, but did not know how to follow it. */
2883 internal_error (__FILE__, __LINE__,
2884 _("could not find a target to follow fork"));
2885 }
2886
2887 void
2888 target_mourn_inferior (void)
2889 {
2890 struct target_ops *t;
2891
2892 for (t = current_target.beneath; t != NULL; t = t->beneath)
2893 {
2894 if (t->to_mourn_inferior != NULL)
2895 {
2896 t->to_mourn_inferior (t);
2897 if (targetdebug)
2898 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2899
2900 /* We no longer need to keep handles on any of the object files.
2901 Make sure to release them to avoid unnecessarily locking any
2902 of them while we're not actually debugging. */
2903 bfd_cache_close_all ();
2904
2905 return;
2906 }
2907 }
2908
2909 internal_error (__FILE__, __LINE__,
2910 _("could not find a target to follow mourn inferior"));
2911 }
2912
2913 /* Look for a target which can describe architectural features, starting
2914 from TARGET. If we find one, return its description. */
2915
2916 const struct target_desc *
2917 target_read_description (struct target_ops *target)
2918 {
2919 struct target_ops *t;
2920
2921 for (t = target; t != NULL; t = t->beneath)
2922 if (t->to_read_description != NULL)
2923 {
2924 const struct target_desc *tdesc;
2925
2926 tdesc = t->to_read_description (t);
2927 if (tdesc)
2928 return tdesc;
2929 }
2930
2931 return NULL;
2932 }
2933
2934 /* The default implementation of to_search_memory.
2935 This implements a basic search of memory, reading target memory and
2936 performing the search here (as opposed to performing the search in on the
2937 target side with, for example, gdbserver). */
2938
2939 int
2940 simple_search_memory (struct target_ops *ops,
2941 CORE_ADDR start_addr, ULONGEST search_space_len,
2942 const gdb_byte *pattern, ULONGEST pattern_len,
2943 CORE_ADDR *found_addrp)
2944 {
2945 /* NOTE: also defined in find.c testcase. */
2946 #define SEARCH_CHUNK_SIZE 16000
2947 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2948 /* Buffer to hold memory contents for searching. */
2949 gdb_byte *search_buf;
2950 unsigned search_buf_size;
2951 struct cleanup *old_cleanups;
2952
2953 search_buf_size = chunk_size + pattern_len - 1;
2954
2955 /* No point in trying to allocate a buffer larger than the search space. */
2956 if (search_space_len < search_buf_size)
2957 search_buf_size = search_space_len;
2958
2959 search_buf = malloc (search_buf_size);
2960 if (search_buf == NULL)
2961 error (_("Unable to allocate memory to perform the search."));
2962 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2963
2964 /* Prime the search buffer. */
2965
2966 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2967 search_buf, start_addr, search_buf_size) != search_buf_size)
2968 {
2969 warning (_("Unable to access %s bytes of target "
2970 "memory at %s, halting search."),
2971 pulongest (search_buf_size), hex_string (start_addr));
2972 do_cleanups (old_cleanups);
2973 return -1;
2974 }
2975
2976 /* Perform the search.
2977
2978 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2979 When we've scanned N bytes we copy the trailing bytes to the start and
2980 read in another N bytes. */
2981
2982 while (search_space_len >= pattern_len)
2983 {
2984 gdb_byte *found_ptr;
2985 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2986
2987 found_ptr = memmem (search_buf, nr_search_bytes,
2988 pattern, pattern_len);
2989
2990 if (found_ptr != NULL)
2991 {
2992 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
2993
2994 *found_addrp = found_addr;
2995 do_cleanups (old_cleanups);
2996 return 1;
2997 }
2998
2999 /* Not found in this chunk, skip to next chunk. */
3000
3001 /* Don't let search_space_len wrap here, it's unsigned. */
3002 if (search_space_len >= chunk_size)
3003 search_space_len -= chunk_size;
3004 else
3005 search_space_len = 0;
3006
3007 if (search_space_len >= pattern_len)
3008 {
3009 unsigned keep_len = search_buf_size - chunk_size;
3010 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
3011 int nr_to_read;
3012
3013 /* Copy the trailing part of the previous iteration to the front
3014 of the buffer for the next iteration. */
3015 gdb_assert (keep_len == pattern_len - 1);
3016 memcpy (search_buf, search_buf + chunk_size, keep_len);
3017
3018 nr_to_read = min (search_space_len - keep_len, chunk_size);
3019
3020 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
3021 search_buf + keep_len, read_addr,
3022 nr_to_read) != nr_to_read)
3023 {
3024 warning (_("Unable to access %s bytes of target "
3025 "memory at %s, halting search."),
3026 plongest (nr_to_read),
3027 hex_string (read_addr));
3028 do_cleanups (old_cleanups);
3029 return -1;
3030 }
3031
3032 start_addr += chunk_size;
3033 }
3034 }
3035
3036 /* Not found. */
3037
3038 do_cleanups (old_cleanups);
3039 return 0;
3040 }
3041
3042 /* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
3043 sequence of bytes in PATTERN with length PATTERN_LEN.
3044
3045 The result is 1 if found, 0 if not found, and -1 if there was an error
3046 requiring halting of the search (e.g. memory read error).
3047 If the pattern is found the address is recorded in FOUND_ADDRP. */
3048
3049 int
3050 target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
3051 const gdb_byte *pattern, ULONGEST pattern_len,
3052 CORE_ADDR *found_addrp)
3053 {
3054 struct target_ops *t;
3055 int found;
3056
3057 /* We don't use INHERIT to set current_target.to_search_memory,
3058 so we have to scan the target stack and handle targetdebug
3059 ourselves. */
3060
3061 if (targetdebug)
3062 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
3063 hex_string (start_addr));
3064
3065 for (t = current_target.beneath; t != NULL; t = t->beneath)
3066 if (t->to_search_memory != NULL)
3067 break;
3068
3069 if (t != NULL)
3070 {
3071 found = t->to_search_memory (t, start_addr, search_space_len,
3072 pattern, pattern_len, found_addrp);
3073 }
3074 else
3075 {
3076 /* If a special version of to_search_memory isn't available, use the
3077 simple version. */
3078 found = simple_search_memory (current_target.beneath,
3079 start_addr, search_space_len,
3080 pattern, pattern_len, found_addrp);
3081 }
3082
3083 if (targetdebug)
3084 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3085
3086 return found;
3087 }
3088
3089 /* Look through the currently pushed targets. If none of them will
3090 be able to restart the currently running process, issue an error
3091 message. */
3092
3093 void
3094 target_require_runnable (void)
3095 {
3096 struct target_ops *t;
3097
3098 for (t = target_stack; t != NULL; t = t->beneath)
3099 {
3100 /* If this target knows how to create a new program, then
3101 assume we will still be able to after killing the current
3102 one. Either killing and mourning will not pop T, or else
3103 find_default_run_target will find it again. */
3104 if (t->to_create_inferior != NULL)
3105 return;
3106
3107 /* Do not worry about thread_stratum targets that can not
3108 create inferiors. Assume they will be pushed again if
3109 necessary, and continue to the process_stratum. */
3110 if (t->to_stratum == thread_stratum
3111 || t->to_stratum == arch_stratum)
3112 continue;
3113
3114 error (_("The \"%s\" target does not support \"run\". "
3115 "Try \"help target\" or \"continue\"."),
3116 t->to_shortname);
3117 }
3118
3119 /* This function is only called if the target is running. In that
3120 case there should have been a process_stratum target and it
3121 should either know how to create inferiors, or not... */
3122 internal_error (__FILE__, __LINE__, _("No targets found"));
3123 }
3124
3125 /* Look through the list of possible targets for a target that can
3126 execute a run or attach command without any other data. This is
3127 used to locate the default process stratum.
3128
3129 If DO_MESG is not NULL, the result is always valid (error() is
3130 called for errors); else, return NULL on error. */
3131
3132 static struct target_ops *
3133 find_default_run_target (char *do_mesg)
3134 {
3135 struct target_ops **t;
3136 struct target_ops *runable = NULL;
3137 int count;
3138
3139 count = 0;
3140
3141 for (t = target_structs; t < target_structs + target_struct_size;
3142 ++t)
3143 {
3144 if ((*t)->to_can_run && target_can_run (*t))
3145 {
3146 runable = *t;
3147 ++count;
3148 }
3149 }
3150
3151 if (count != 1)
3152 {
3153 if (do_mesg)
3154 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3155 else
3156 return NULL;
3157 }
3158
3159 return runable;
3160 }
3161
3162 void
3163 find_default_attach (struct target_ops *ops, char *args, int from_tty)
3164 {
3165 struct target_ops *t;
3166
3167 t = find_default_run_target ("attach");
3168 (t->to_attach) (t, args, from_tty);
3169 return;
3170 }
3171
3172 void
3173 find_default_create_inferior (struct target_ops *ops,
3174 char *exec_file, char *allargs, char **env,
3175 int from_tty)
3176 {
3177 struct target_ops *t;
3178
3179 t = find_default_run_target ("run");
3180 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3181 return;
3182 }
3183
3184 static int
3185 find_default_can_async_p (struct target_ops *ignore)
3186 {
3187 struct target_ops *t;
3188
3189 /* This may be called before the target is pushed on the stack;
3190 look for the default process stratum. If there's none, gdb isn't
3191 configured with a native debugger, and target remote isn't
3192 connected yet. */
3193 t = find_default_run_target (NULL);
3194 if (t && t->to_can_async_p != delegate_can_async_p)
3195 return (t->to_can_async_p) (t);
3196 return 0;
3197 }
3198
3199 static int
3200 find_default_is_async_p (struct target_ops *ignore)
3201 {
3202 struct target_ops *t;
3203
3204 /* This may be called before the target is pushed on the stack;
3205 look for the default process stratum. If there's none, gdb isn't
3206 configured with a native debugger, and target remote isn't
3207 connected yet. */
3208 t = find_default_run_target (NULL);
3209 if (t && t->to_is_async_p != delegate_is_async_p)
3210 return (t->to_is_async_p) (t);
3211 return 0;
3212 }
3213
3214 static int
3215 find_default_supports_non_stop (struct target_ops *self)
3216 {
3217 struct target_ops *t;
3218
3219 t = find_default_run_target (NULL);
3220 if (t && t->to_supports_non_stop)
3221 return (t->to_supports_non_stop) (t);
3222 return 0;
3223 }
3224
3225 int
3226 target_supports_non_stop (void)
3227 {
3228 struct target_ops *t;
3229
3230 for (t = &current_target; t != NULL; t = t->beneath)
3231 if (t->to_supports_non_stop)
3232 return t->to_supports_non_stop (t);
3233
3234 return 0;
3235 }
3236
3237 /* Implement the "info proc" command. */
3238
3239 int
3240 target_info_proc (char *args, enum info_proc_what what)
3241 {
3242 struct target_ops *t;
3243
3244 /* If we're already connected to something that can get us OS
3245 related data, use it. Otherwise, try using the native
3246 target. */
3247 if (current_target.to_stratum >= process_stratum)
3248 t = current_target.beneath;
3249 else
3250 t = find_default_run_target (NULL);
3251
3252 for (; t != NULL; t = t->beneath)
3253 {
3254 if (t->to_info_proc != NULL)
3255 {
3256 t->to_info_proc (t, args, what);
3257
3258 if (targetdebug)
3259 fprintf_unfiltered (gdb_stdlog,
3260 "target_info_proc (\"%s\", %d)\n", args, what);
3261
3262 return 1;
3263 }
3264 }
3265
3266 return 0;
3267 }
3268
3269 static int
3270 find_default_supports_disable_randomization (struct target_ops *self)
3271 {
3272 struct target_ops *t;
3273
3274 t = find_default_run_target (NULL);
3275 if (t && t->to_supports_disable_randomization)
3276 return (t->to_supports_disable_randomization) (t);
3277 return 0;
3278 }
3279
3280 int
3281 target_supports_disable_randomization (void)
3282 {
3283 struct target_ops *t;
3284
3285 for (t = &current_target; t != NULL; t = t->beneath)
3286 if (t->to_supports_disable_randomization)
3287 return t->to_supports_disable_randomization (t);
3288
3289 return 0;
3290 }
3291
3292 char *
3293 target_get_osdata (const char *type)
3294 {
3295 struct target_ops *t;
3296
3297 /* If we're already connected to something that can get us OS
3298 related data, use it. Otherwise, try using the native
3299 target. */
3300 if (current_target.to_stratum >= process_stratum)
3301 t = current_target.beneath;
3302 else
3303 t = find_default_run_target ("get OS data");
3304
3305 if (!t)
3306 return NULL;
3307
3308 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3309 }
3310
3311 /* Determine the current address space of thread PTID. */
3312
3313 struct address_space *
3314 target_thread_address_space (ptid_t ptid)
3315 {
3316 struct address_space *aspace;
3317 struct inferior *inf;
3318 struct target_ops *t;
3319
3320 for (t = current_target.beneath; t != NULL; t = t->beneath)
3321 {
3322 if (t->to_thread_address_space != NULL)
3323 {
3324 aspace = t->to_thread_address_space (t, ptid);
3325 gdb_assert (aspace);
3326
3327 if (targetdebug)
3328 fprintf_unfiltered (gdb_stdlog,
3329 "target_thread_address_space (%s) = %d\n",
3330 target_pid_to_str (ptid),
3331 address_space_num (aspace));
3332 return aspace;
3333 }
3334 }
3335
3336 /* Fall-back to the "main" address space of the inferior. */
3337 inf = find_inferior_pid (ptid_get_pid (ptid));
3338
3339 if (inf == NULL || inf->aspace == NULL)
3340 internal_error (__FILE__, __LINE__,
3341 _("Can't determine the current "
3342 "address space of thread %s\n"),
3343 target_pid_to_str (ptid));
3344
3345 return inf->aspace;
3346 }
3347
3348
3349 /* Target file operations. */
3350
3351 static struct target_ops *
3352 default_fileio_target (void)
3353 {
3354 /* If we're already connected to something that can perform
3355 file I/O, use it. Otherwise, try using the native target. */
3356 if (current_target.to_stratum >= process_stratum)
3357 return current_target.beneath;
3358 else
3359 return find_default_run_target ("file I/O");
3360 }
3361
3362 /* Open FILENAME on the target, using FLAGS and MODE. Return a
3363 target file descriptor, or -1 if an error occurs (and set
3364 *TARGET_ERRNO). */
3365 int
3366 target_fileio_open (const char *filename, int flags, int mode,
3367 int *target_errno)
3368 {
3369 struct target_ops *t;
3370
3371 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3372 {
3373 if (t->to_fileio_open != NULL)
3374 {
3375 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3376
3377 if (targetdebug)
3378 fprintf_unfiltered (gdb_stdlog,
3379 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3380 filename, flags, mode,
3381 fd, fd != -1 ? 0 : *target_errno);
3382 return fd;
3383 }
3384 }
3385
3386 *target_errno = FILEIO_ENOSYS;
3387 return -1;
3388 }
3389
3390 /* Write up to LEN bytes from WRITE_BUF to FD on the target.
3391 Return the number of bytes written, or -1 if an error occurs
3392 (and set *TARGET_ERRNO). */
3393 int
3394 target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3395 ULONGEST offset, int *target_errno)
3396 {
3397 struct target_ops *t;
3398
3399 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3400 {
3401 if (t->to_fileio_pwrite != NULL)
3402 {
3403 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3404 target_errno);
3405
3406 if (targetdebug)
3407 fprintf_unfiltered (gdb_stdlog,
3408 "target_fileio_pwrite (%d,...,%d,%s) "
3409 "= %d (%d)\n",
3410 fd, len, pulongest (offset),
3411 ret, ret != -1 ? 0 : *target_errno);
3412 return ret;
3413 }
3414 }
3415
3416 *target_errno = FILEIO_ENOSYS;
3417 return -1;
3418 }
3419
3420 /* Read up to LEN bytes FD on the target into READ_BUF.
3421 Return the number of bytes read, or -1 if an error occurs
3422 (and set *TARGET_ERRNO). */
3423 int
3424 target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3425 ULONGEST offset, int *target_errno)
3426 {
3427 struct target_ops *t;
3428
3429 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3430 {
3431 if (t->to_fileio_pread != NULL)
3432 {
3433 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3434 target_errno);
3435
3436 if (targetdebug)
3437 fprintf_unfiltered (gdb_stdlog,
3438 "target_fileio_pread (%d,...,%d,%s) "
3439 "= %d (%d)\n",
3440 fd, len, pulongest (offset),
3441 ret, ret != -1 ? 0 : *target_errno);
3442 return ret;
3443 }
3444 }
3445
3446 *target_errno = FILEIO_ENOSYS;
3447 return -1;
3448 }
3449
3450 /* Close FD on the target. Return 0, or -1 if an error occurs
3451 (and set *TARGET_ERRNO). */
3452 int
3453 target_fileio_close (int fd, int *target_errno)
3454 {
3455 struct target_ops *t;
3456
3457 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3458 {
3459 if (t->to_fileio_close != NULL)
3460 {
3461 int ret = t->to_fileio_close (fd, target_errno);
3462
3463 if (targetdebug)
3464 fprintf_unfiltered (gdb_stdlog,
3465 "target_fileio_close (%d) = %d (%d)\n",
3466 fd, ret, ret != -1 ? 0 : *target_errno);
3467 return ret;
3468 }
3469 }
3470
3471 *target_errno = FILEIO_ENOSYS;
3472 return -1;
3473 }
3474
3475 /* Unlink FILENAME on the target. Return 0, or -1 if an error
3476 occurs (and set *TARGET_ERRNO). */
3477 int
3478 target_fileio_unlink (const char *filename, int *target_errno)
3479 {
3480 struct target_ops *t;
3481
3482 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3483 {
3484 if (t->to_fileio_unlink != NULL)
3485 {
3486 int ret = t->to_fileio_unlink (filename, target_errno);
3487
3488 if (targetdebug)
3489 fprintf_unfiltered (gdb_stdlog,
3490 "target_fileio_unlink (%s) = %d (%d)\n",
3491 filename, ret, ret != -1 ? 0 : *target_errno);
3492 return ret;
3493 }
3494 }
3495
3496 *target_errno = FILEIO_ENOSYS;
3497 return -1;
3498 }
3499
3500 /* Read value of symbolic link FILENAME on the target. Return a
3501 null-terminated string allocated via xmalloc, or NULL if an error
3502 occurs (and set *TARGET_ERRNO). */
3503 char *
3504 target_fileio_readlink (const char *filename, int *target_errno)
3505 {
3506 struct target_ops *t;
3507
3508 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3509 {
3510 if (t->to_fileio_readlink != NULL)
3511 {
3512 char *ret = t->to_fileio_readlink (filename, target_errno);
3513
3514 if (targetdebug)
3515 fprintf_unfiltered (gdb_stdlog,
3516 "target_fileio_readlink (%s) = %s (%d)\n",
3517 filename, ret? ret : "(nil)",
3518 ret? 0 : *target_errno);
3519 return ret;
3520 }
3521 }
3522
3523 *target_errno = FILEIO_ENOSYS;
3524 return NULL;
3525 }
3526
3527 static void
3528 target_fileio_close_cleanup (void *opaque)
3529 {
3530 int fd = *(int *) opaque;
3531 int target_errno;
3532
3533 target_fileio_close (fd, &target_errno);
3534 }
3535
3536 /* Read target file FILENAME. Store the result in *BUF_P and
3537 return the size of the transferred data. PADDING additional bytes are
3538 available in *BUF_P. This is a helper function for
3539 target_fileio_read_alloc; see the declaration of that function for more
3540 information. */
3541
3542 static LONGEST
3543 target_fileio_read_alloc_1 (const char *filename,
3544 gdb_byte **buf_p, int padding)
3545 {
3546 struct cleanup *close_cleanup;
3547 size_t buf_alloc, buf_pos;
3548 gdb_byte *buf;
3549 LONGEST n;
3550 int fd;
3551 int target_errno;
3552
3553 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3554 if (fd == -1)
3555 return -1;
3556
3557 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3558
3559 /* Start by reading up to 4K at a time. The target will throttle
3560 this number down if necessary. */
3561 buf_alloc = 4096;
3562 buf = xmalloc (buf_alloc);
3563 buf_pos = 0;
3564 while (1)
3565 {
3566 n = target_fileio_pread (fd, &buf[buf_pos],
3567 buf_alloc - buf_pos - padding, buf_pos,
3568 &target_errno);
3569 if (n < 0)
3570 {
3571 /* An error occurred. */
3572 do_cleanups (close_cleanup);
3573 xfree (buf);
3574 return -1;
3575 }
3576 else if (n == 0)
3577 {
3578 /* Read all there was. */
3579 do_cleanups (close_cleanup);
3580 if (buf_pos == 0)
3581 xfree (buf);
3582 else
3583 *buf_p = buf;
3584 return buf_pos;
3585 }
3586
3587 buf_pos += n;
3588
3589 /* If the buffer is filling up, expand it. */
3590 if (buf_alloc < buf_pos * 2)
3591 {
3592 buf_alloc *= 2;
3593 buf = xrealloc (buf, buf_alloc);
3594 }
3595
3596 QUIT;
3597 }
3598 }
3599
3600 /* Read target file FILENAME. Store the result in *BUF_P and return
3601 the size of the transferred data. See the declaration in "target.h"
3602 function for more information about the return value. */
3603
3604 LONGEST
3605 target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3606 {
3607 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3608 }
3609
3610 /* Read target file FILENAME. The result is NUL-terminated and
3611 returned as a string, allocated using xmalloc. If an error occurs
3612 or the transfer is unsupported, NULL is returned. Empty objects
3613 are returned as allocated but empty strings. A warning is issued
3614 if the result contains any embedded NUL bytes. */
3615
3616 char *
3617 target_fileio_read_stralloc (const char *filename)
3618 {
3619 gdb_byte *buffer;
3620 char *bufstr;
3621 LONGEST i, transferred;
3622
3623 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3624 bufstr = (char *) buffer;
3625
3626 if (transferred < 0)
3627 return NULL;
3628
3629 if (transferred == 0)
3630 return xstrdup ("");
3631
3632 bufstr[transferred] = 0;
3633
3634 /* Check for embedded NUL bytes; but allow trailing NULs. */
3635 for (i = strlen (bufstr); i < transferred; i++)
3636 if (bufstr[i] != 0)
3637 {
3638 warning (_("target file %s "
3639 "contained unexpected null characters"),
3640 filename);
3641 break;
3642 }
3643
3644 return bufstr;
3645 }
3646
3647
3648 static int
3649 default_region_ok_for_hw_watchpoint (struct target_ops *self,
3650 CORE_ADDR addr, int len)
3651 {
3652 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
3653 }
3654
3655 static int
3656 default_watchpoint_addr_within_range (struct target_ops *target,
3657 CORE_ADDR addr,
3658 CORE_ADDR start, int length)
3659 {
3660 return addr >= start && addr < start + length;
3661 }
3662
3663 static struct gdbarch *
3664 default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3665 {
3666 return target_gdbarch ();
3667 }
3668
3669 static int
3670 return_zero (void)
3671 {
3672 return 0;
3673 }
3674
3675 static int
3676 return_one (void)
3677 {
3678 return 1;
3679 }
3680
3681 static int
3682 return_minus_one (void)
3683 {
3684 return -1;
3685 }
3686
3687 static void *
3688 return_null (void)
3689 {
3690 return 0;
3691 }
3692
3693 /*
3694 * Find the next target down the stack from the specified target.
3695 */
3696
3697 struct target_ops *
3698 find_target_beneath (struct target_ops *t)
3699 {
3700 return t->beneath;
3701 }
3702
3703 /* See target.h. */
3704
3705 struct target_ops *
3706 find_target_at (enum strata stratum)
3707 {
3708 struct target_ops *t;
3709
3710 for (t = current_target.beneath; t != NULL; t = t->beneath)
3711 if (t->to_stratum == stratum)
3712 return t;
3713
3714 return NULL;
3715 }
3716
3717 \f
3718 /* The inferior process has died. Long live the inferior! */
3719
3720 void
3721 generic_mourn_inferior (void)
3722 {
3723 ptid_t ptid;
3724
3725 ptid = inferior_ptid;
3726 inferior_ptid = null_ptid;
3727
3728 /* Mark breakpoints uninserted in case something tries to delete a
3729 breakpoint while we delete the inferior's threads (which would
3730 fail, since the inferior is long gone). */
3731 mark_breakpoints_out ();
3732
3733 if (!ptid_equal (ptid, null_ptid))
3734 {
3735 int pid = ptid_get_pid (ptid);
3736 exit_inferior (pid);
3737 }
3738
3739 /* Note this wipes step-resume breakpoints, so needs to be done
3740 after exit_inferior, which ends up referencing the step-resume
3741 breakpoints through clear_thread_inferior_resources. */
3742 breakpoint_init_inferior (inf_exited);
3743
3744 registers_changed ();
3745
3746 reopen_exec_file ();
3747 reinit_frame_cache ();
3748
3749 if (deprecated_detach_hook)
3750 deprecated_detach_hook ();
3751 }
3752 \f
3753 /* Convert a normal process ID to a string. Returns the string in a
3754 static buffer. */
3755
3756 char *
3757 normal_pid_to_str (ptid_t ptid)
3758 {
3759 static char buf[32];
3760
3761 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3762 return buf;
3763 }
3764
3765 static char *
3766 dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3767 {
3768 return normal_pid_to_str (ptid);
3769 }
3770
3771 /* Error-catcher for target_find_memory_regions. */
3772 static int
3773 dummy_find_memory_regions (struct target_ops *self,
3774 find_memory_region_ftype ignore1, void *ignore2)
3775 {
3776 error (_("Command not implemented for this target."));
3777 return 0;
3778 }
3779
3780 /* Error-catcher for target_make_corefile_notes. */
3781 static char *
3782 dummy_make_corefile_notes (struct target_ops *self,
3783 bfd *ignore1, int *ignore2)
3784 {
3785 error (_("Command not implemented for this target."));
3786 return NULL;
3787 }
3788
3789 /* Error-catcher for target_get_bookmark. */
3790 static gdb_byte *
3791 dummy_get_bookmark (struct target_ops *self, char *ignore1, int ignore2)
3792 {
3793 tcomplain ();
3794 return NULL;
3795 }
3796
3797 /* Error-catcher for target_goto_bookmark. */
3798 static void
3799 dummy_goto_bookmark (struct target_ops *self, gdb_byte *ignore, int from_tty)
3800 {
3801 tcomplain ();
3802 }
3803
3804 /* Set up the handful of non-empty slots needed by the dummy target
3805 vector. */
3806
3807 static void
3808 init_dummy_target (void)
3809 {
3810 dummy_target.to_shortname = "None";
3811 dummy_target.to_longname = "None";
3812 dummy_target.to_doc = "";
3813 dummy_target.to_attach = find_default_attach;
3814 dummy_target.to_detach =
3815 (void (*)(struct target_ops *, const char *, int))target_ignore;
3816 dummy_target.to_create_inferior = find_default_create_inferior;
3817 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
3818 dummy_target.to_supports_disable_randomization
3819 = find_default_supports_disable_randomization;
3820 dummy_target.to_pid_to_str = dummy_pid_to_str;
3821 dummy_target.to_stratum = dummy_stratum;
3822 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3823 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
3824 dummy_target.to_get_bookmark = dummy_get_bookmark;
3825 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
3826 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3827 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3828 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3829 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
3830 dummy_target.to_has_execution
3831 = (int (*) (struct target_ops *, ptid_t)) return_zero;
3832 dummy_target.to_magic = OPS_MAGIC;
3833
3834 install_dummy_methods (&dummy_target);
3835 }
3836 \f
3837 static void
3838 debug_to_open (char *args, int from_tty)
3839 {
3840 debug_target.to_open (args, from_tty);
3841
3842 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3843 }
3844
3845 void
3846 target_close (struct target_ops *targ)
3847 {
3848 gdb_assert (!target_is_pushed (targ));
3849
3850 if (targ->to_xclose != NULL)
3851 targ->to_xclose (targ);
3852 else if (targ->to_close != NULL)
3853 targ->to_close (targ);
3854
3855 if (targetdebug)
3856 fprintf_unfiltered (gdb_stdlog, "target_close ()\n");
3857 }
3858
3859 void
3860 target_attach (char *args, int from_tty)
3861 {
3862 struct target_ops *t;
3863
3864 for (t = current_target.beneath; t != NULL; t = t->beneath)
3865 {
3866 if (t->to_attach != NULL)
3867 {
3868 t->to_attach (t, args, from_tty);
3869 if (targetdebug)
3870 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3871 args, from_tty);
3872 return;
3873 }
3874 }
3875
3876 internal_error (__FILE__, __LINE__,
3877 _("could not find a target to attach"));
3878 }
3879
3880 int
3881 target_thread_alive (ptid_t ptid)
3882 {
3883 struct target_ops *t;
3884
3885 for (t = current_target.beneath; t != NULL; t = t->beneath)
3886 {
3887 if (t->to_thread_alive != NULL)
3888 {
3889 int retval;
3890
3891 retval = t->to_thread_alive (t, ptid);
3892 if (targetdebug)
3893 fprintf_unfiltered (gdb_stdlog, "target_thread_alive (%d) = %d\n",
3894 ptid_get_pid (ptid), retval);
3895
3896 return retval;
3897 }
3898 }
3899
3900 return 0;
3901 }
3902
3903 void
3904 target_find_new_threads (void)
3905 {
3906 struct target_ops *t;
3907
3908 for (t = current_target.beneath; t != NULL; t = t->beneath)
3909 {
3910 if (t->to_find_new_threads != NULL)
3911 {
3912 t->to_find_new_threads (t);
3913 if (targetdebug)
3914 fprintf_unfiltered (gdb_stdlog, "target_find_new_threads ()\n");
3915
3916 return;
3917 }
3918 }
3919 }
3920
3921 void
3922 target_stop (ptid_t ptid)
3923 {
3924 if (!may_stop)
3925 {
3926 warning (_("May not interrupt or stop the target, ignoring attempt"));
3927 return;
3928 }
3929
3930 (*current_target.to_stop) (&current_target, ptid);
3931 }
3932
3933 static void
3934 debug_to_post_attach (struct target_ops *self, int pid)
3935 {
3936 debug_target.to_post_attach (&debug_target, pid);
3937
3938 fprintf_unfiltered (gdb_stdlog, "target_post_attach (%d)\n", pid);
3939 }
3940
3941 /* Concatenate ELEM to LIST, a comma separate list, and return the
3942 result. The LIST incoming argument is released. */
3943
3944 static char *
3945 str_comma_list_concat_elem (char *list, const char *elem)
3946 {
3947 if (list == NULL)
3948 return xstrdup (elem);
3949 else
3950 return reconcat (list, list, ", ", elem, (char *) NULL);
3951 }
3952
3953 /* Helper for target_options_to_string. If OPT is present in
3954 TARGET_OPTIONS, append the OPT_STR (string version of OPT) in RET.
3955 Returns the new resulting string. OPT is removed from
3956 TARGET_OPTIONS. */
3957
3958 static char *
3959 do_option (int *target_options, char *ret,
3960 int opt, char *opt_str)
3961 {
3962 if ((*target_options & opt) != 0)
3963 {
3964 ret = str_comma_list_concat_elem (ret, opt_str);
3965 *target_options &= ~opt;
3966 }
3967
3968 return ret;
3969 }
3970
3971 char *
3972 target_options_to_string (int target_options)
3973 {
3974 char *ret = NULL;
3975
3976 #define DO_TARG_OPTION(OPT) \
3977 ret = do_option (&target_options, ret, OPT, #OPT)
3978
3979 DO_TARG_OPTION (TARGET_WNOHANG);
3980
3981 if (target_options != 0)
3982 ret = str_comma_list_concat_elem (ret, "unknown???");
3983
3984 if (ret == NULL)
3985 ret = xstrdup ("");
3986 return ret;
3987 }
3988
3989 static void
3990 debug_print_register (const char * func,
3991 struct regcache *regcache, int regno)
3992 {
3993 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3994
3995 fprintf_unfiltered (gdb_stdlog, "%s ", func);
3996 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch)
3997 && gdbarch_register_name (gdbarch, regno) != NULL
3998 && gdbarch_register_name (gdbarch, regno)[0] != '\0')
3999 fprintf_unfiltered (gdb_stdlog, "(%s)",
4000 gdbarch_register_name (gdbarch, regno));
4001 else
4002 fprintf_unfiltered (gdb_stdlog, "(%d)", regno);
4003 if (regno >= 0 && regno < gdbarch_num_regs (gdbarch))
4004 {
4005 enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
4006 int i, size = register_size (gdbarch, regno);
4007 gdb_byte buf[MAX_REGISTER_SIZE];
4008
4009 regcache_raw_collect (regcache, regno, buf);
4010 fprintf_unfiltered (gdb_stdlog, " = ");
4011 for (i = 0; i < size; i++)
4012 {
4013 fprintf_unfiltered (gdb_stdlog, "%02x", buf[i]);
4014 }
4015 if (size <= sizeof (LONGEST))
4016 {
4017 ULONGEST val = extract_unsigned_integer (buf, size, byte_order);
4018
4019 fprintf_unfiltered (gdb_stdlog, " %s %s",
4020 core_addr_to_string_nz (val), plongest (val));
4021 }
4022 }
4023 fprintf_unfiltered (gdb_stdlog, "\n");
4024 }
4025
4026 void
4027 target_fetch_registers (struct regcache *regcache, int regno)
4028 {
4029 struct target_ops *t;
4030
4031 for (t = current_target.beneath; t != NULL; t = t->beneath)
4032 {
4033 if (t->to_fetch_registers != NULL)
4034 {
4035 t->to_fetch_registers (t, regcache, regno);
4036 if (targetdebug)
4037 debug_print_register ("target_fetch_registers", regcache, regno);
4038 return;
4039 }
4040 }
4041 }
4042
4043 void
4044 target_store_registers (struct regcache *regcache, int regno)
4045 {
4046 struct target_ops *t;
4047
4048 if (!may_write_registers)
4049 error (_("Writing to registers is not allowed (regno %d)"), regno);
4050
4051 current_target.to_store_registers (&current_target, regcache, regno);
4052 if (targetdebug)
4053 {
4054 debug_print_register ("target_store_registers", regcache, regno);
4055 }
4056 }
4057
4058 int
4059 target_core_of_thread (ptid_t ptid)
4060 {
4061 struct target_ops *t;
4062
4063 for (t = current_target.beneath; t != NULL; t = t->beneath)
4064 {
4065 if (t->to_core_of_thread != NULL)
4066 {
4067 int retval = t->to_core_of_thread (t, ptid);
4068
4069 if (targetdebug)
4070 fprintf_unfiltered (gdb_stdlog,
4071 "target_core_of_thread (%d) = %d\n",
4072 ptid_get_pid (ptid), retval);
4073 return retval;
4074 }
4075 }
4076
4077 return -1;
4078 }
4079
4080 int
4081 target_verify_memory (const gdb_byte *data, CORE_ADDR memaddr, ULONGEST size)
4082 {
4083 struct target_ops *t;
4084
4085 for (t = current_target.beneath; t != NULL; t = t->beneath)
4086 {
4087 if (t->to_verify_memory != NULL)
4088 {
4089 int retval = t->to_verify_memory (t, data, memaddr, size);
4090
4091 if (targetdebug)
4092 fprintf_unfiltered (gdb_stdlog,
4093 "target_verify_memory (%s, %s) = %d\n",
4094 paddress (target_gdbarch (), memaddr),
4095 pulongest (size),
4096 retval);
4097 return retval;
4098 }
4099 }
4100
4101 tcomplain ();
4102 }
4103
4104 /* The documentation for this function is in its prototype declaration in
4105 target.h. */
4106
4107 int
4108 target_insert_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4109 {
4110 struct target_ops *t;
4111
4112 for (t = current_target.beneath; t != NULL; t = t->beneath)
4113 if (t->to_insert_mask_watchpoint != NULL)
4114 {
4115 int ret;
4116
4117 ret = t->to_insert_mask_watchpoint (t, addr, mask, rw);
4118
4119 if (targetdebug)
4120 fprintf_unfiltered (gdb_stdlog, "\
4121 target_insert_mask_watchpoint (%s, %s, %d) = %d\n",
4122 core_addr_to_string (addr),
4123 core_addr_to_string (mask), rw, ret);
4124
4125 return ret;
4126 }
4127
4128 return 1;
4129 }
4130
4131 /* The documentation for this function is in its prototype declaration in
4132 target.h. */
4133
4134 int
4135 target_remove_mask_watchpoint (CORE_ADDR addr, CORE_ADDR mask, int rw)
4136 {
4137 struct target_ops *t;
4138
4139 for (t = current_target.beneath; t != NULL; t = t->beneath)
4140 if (t->to_remove_mask_watchpoint != NULL)
4141 {
4142 int ret;
4143
4144 ret = t->to_remove_mask_watchpoint (t, addr, mask, rw);
4145
4146 if (targetdebug)
4147 fprintf_unfiltered (gdb_stdlog, "\
4148 target_remove_mask_watchpoint (%s, %s, %d) = %d\n",
4149 core_addr_to_string (addr),
4150 core_addr_to_string (mask), rw, ret);
4151
4152 return ret;
4153 }
4154
4155 return 1;
4156 }
4157
4158 /* The documentation for this function is in its prototype declaration
4159 in target.h. */
4160
4161 int
4162 target_masked_watch_num_registers (CORE_ADDR addr, CORE_ADDR mask)
4163 {
4164 struct target_ops *t;
4165
4166 for (t = current_target.beneath; t != NULL; t = t->beneath)
4167 if (t->to_masked_watch_num_registers != NULL)
4168 return t->to_masked_watch_num_registers (t, addr, mask);
4169
4170 return -1;
4171 }
4172
4173 /* The documentation for this function is in its prototype declaration
4174 in target.h. */
4175
4176 int
4177 target_ranged_break_num_registers (void)
4178 {
4179 struct target_ops *t;
4180
4181 for (t = current_target.beneath; t != NULL; t = t->beneath)
4182 if (t->to_ranged_break_num_registers != NULL)
4183 return t->to_ranged_break_num_registers (t);
4184
4185 return -1;
4186 }
4187
4188 /* See target.h. */
4189
4190 struct btrace_target_info *
4191 target_enable_btrace (ptid_t ptid)
4192 {
4193 struct target_ops *t;
4194
4195 for (t = current_target.beneath; t != NULL; t = t->beneath)
4196 if (t->to_enable_btrace != NULL)
4197 return t->to_enable_btrace (ptid);
4198
4199 tcomplain ();
4200 return NULL;
4201 }
4202
4203 /* See target.h. */
4204
4205 void
4206 target_disable_btrace (struct btrace_target_info *btinfo)
4207 {
4208 struct target_ops *t;
4209
4210 for (t = current_target.beneath; t != NULL; t = t->beneath)
4211 if (t->to_disable_btrace != NULL)
4212 {
4213 t->to_disable_btrace (btinfo);
4214 return;
4215 }
4216
4217 tcomplain ();
4218 }
4219
4220 /* See target.h. */
4221
4222 void
4223 target_teardown_btrace (struct btrace_target_info *btinfo)
4224 {
4225 struct target_ops *t;
4226
4227 for (t = current_target.beneath; t != NULL; t = t->beneath)
4228 if (t->to_teardown_btrace != NULL)
4229 {
4230 t->to_teardown_btrace (btinfo);
4231 return;
4232 }
4233
4234 tcomplain ();
4235 }
4236
4237 /* See target.h. */
4238
4239 enum btrace_error
4240 target_read_btrace (VEC (btrace_block_s) **btrace,
4241 struct btrace_target_info *btinfo,
4242 enum btrace_read_type type)
4243 {
4244 struct target_ops *t;
4245
4246 for (t = current_target.beneath; t != NULL; t = t->beneath)
4247 if (t->to_read_btrace != NULL)
4248 return t->to_read_btrace (btrace, btinfo, type);
4249
4250 tcomplain ();
4251 return BTRACE_ERR_NOT_SUPPORTED;
4252 }
4253
4254 /* See target.h. */
4255
4256 void
4257 target_stop_recording (void)
4258 {
4259 struct target_ops *t;
4260
4261 for (t = current_target.beneath; t != NULL; t = t->beneath)
4262 if (t->to_stop_recording != NULL)
4263 {
4264 t->to_stop_recording ();
4265 return;
4266 }
4267
4268 /* This is optional. */
4269 }
4270
4271 /* See target.h. */
4272
4273 void
4274 target_info_record (void)
4275 {
4276 struct target_ops *t;
4277
4278 for (t = current_target.beneath; t != NULL; t = t->beneath)
4279 if (t->to_info_record != NULL)
4280 {
4281 t->to_info_record ();
4282 return;
4283 }
4284
4285 tcomplain ();
4286 }
4287
4288 /* See target.h. */
4289
4290 void
4291 target_save_record (const char *filename)
4292 {
4293 struct target_ops *t;
4294
4295 for (t = current_target.beneath; t != NULL; t = t->beneath)
4296 if (t->to_save_record != NULL)
4297 {
4298 t->to_save_record (filename);
4299 return;
4300 }
4301
4302 tcomplain ();
4303 }
4304
4305 /* See target.h. */
4306
4307 int
4308 target_supports_delete_record (void)
4309 {
4310 struct target_ops *t;
4311
4312 for (t = current_target.beneath; t != NULL; t = t->beneath)
4313 if (t->to_delete_record != NULL)
4314 return 1;
4315
4316 return 0;
4317 }
4318
4319 /* See target.h. */
4320
4321 void
4322 target_delete_record (void)
4323 {
4324 struct target_ops *t;
4325
4326 for (t = current_target.beneath; t != NULL; t = t->beneath)
4327 if (t->to_delete_record != NULL)
4328 {
4329 t->to_delete_record ();
4330 return;
4331 }
4332
4333 tcomplain ();
4334 }
4335
4336 /* See target.h. */
4337
4338 int
4339 target_record_is_replaying (void)
4340 {
4341 struct target_ops *t;
4342
4343 for (t = current_target.beneath; t != NULL; t = t->beneath)
4344 if (t->to_record_is_replaying != NULL)
4345 return t->to_record_is_replaying ();
4346
4347 return 0;
4348 }
4349
4350 /* See target.h. */
4351
4352 void
4353 target_goto_record_begin (void)
4354 {
4355 struct target_ops *t;
4356
4357 for (t = current_target.beneath; t != NULL; t = t->beneath)
4358 if (t->to_goto_record_begin != NULL)
4359 {
4360 t->to_goto_record_begin ();
4361 return;
4362 }
4363
4364 tcomplain ();
4365 }
4366
4367 /* See target.h. */
4368
4369 void
4370 target_goto_record_end (void)
4371 {
4372 struct target_ops *t;
4373
4374 for (t = current_target.beneath; t != NULL; t = t->beneath)
4375 if (t->to_goto_record_end != NULL)
4376 {
4377 t->to_goto_record_end ();
4378 return;
4379 }
4380
4381 tcomplain ();
4382 }
4383
4384 /* See target.h. */
4385
4386 void
4387 target_goto_record (ULONGEST insn)
4388 {
4389 struct target_ops *t;
4390
4391 for (t = current_target.beneath; t != NULL; t = t->beneath)
4392 if (t->to_goto_record != NULL)
4393 {
4394 t->to_goto_record (insn);
4395 return;
4396 }
4397
4398 tcomplain ();
4399 }
4400
4401 /* See target.h. */
4402
4403 void
4404 target_insn_history (int size, int flags)
4405 {
4406 struct target_ops *t;
4407
4408 for (t = current_target.beneath; t != NULL; t = t->beneath)
4409 if (t->to_insn_history != NULL)
4410 {
4411 t->to_insn_history (size, flags);
4412 return;
4413 }
4414
4415 tcomplain ();
4416 }
4417
4418 /* See target.h. */
4419
4420 void
4421 target_insn_history_from (ULONGEST from, int size, int flags)
4422 {
4423 struct target_ops *t;
4424
4425 for (t = current_target.beneath; t != NULL; t = t->beneath)
4426 if (t->to_insn_history_from != NULL)
4427 {
4428 t->to_insn_history_from (from, size, flags);
4429 return;
4430 }
4431
4432 tcomplain ();
4433 }
4434
4435 /* See target.h. */
4436
4437 void
4438 target_insn_history_range (ULONGEST begin, ULONGEST end, int flags)
4439 {
4440 struct target_ops *t;
4441
4442 for (t = current_target.beneath; t != NULL; t = t->beneath)
4443 if (t->to_insn_history_range != NULL)
4444 {
4445 t->to_insn_history_range (begin, end, flags);
4446 return;
4447 }
4448
4449 tcomplain ();
4450 }
4451
4452 /* See target.h. */
4453
4454 void
4455 target_call_history (int size, int flags)
4456 {
4457 struct target_ops *t;
4458
4459 for (t = current_target.beneath; t != NULL; t = t->beneath)
4460 if (t->to_call_history != NULL)
4461 {
4462 t->to_call_history (size, flags);
4463 return;
4464 }
4465
4466 tcomplain ();
4467 }
4468
4469 /* See target.h. */
4470
4471 void
4472 target_call_history_from (ULONGEST begin, int size, int flags)
4473 {
4474 struct target_ops *t;
4475
4476 for (t = current_target.beneath; t != NULL; t = t->beneath)
4477 if (t->to_call_history_from != NULL)
4478 {
4479 t->to_call_history_from (begin, size, flags);
4480 return;
4481 }
4482
4483 tcomplain ();
4484 }
4485
4486 /* See target.h. */
4487
4488 void
4489 target_call_history_range (ULONGEST begin, ULONGEST end, int flags)
4490 {
4491 struct target_ops *t;
4492
4493 for (t = current_target.beneath; t != NULL; t = t->beneath)
4494 if (t->to_call_history_range != NULL)
4495 {
4496 t->to_call_history_range (begin, end, flags);
4497 return;
4498 }
4499
4500 tcomplain ();
4501 }
4502
4503 static void
4504 debug_to_prepare_to_store (struct target_ops *self, struct regcache *regcache)
4505 {
4506 debug_target.to_prepare_to_store (&debug_target, regcache);
4507
4508 fprintf_unfiltered (gdb_stdlog, "target_prepare_to_store ()\n");
4509 }
4510
4511 /* See target.h. */
4512
4513 const struct frame_unwind *
4514 target_get_unwinder (void)
4515 {
4516 struct target_ops *t;
4517
4518 for (t = current_target.beneath; t != NULL; t = t->beneath)
4519 if (t->to_get_unwinder != NULL)
4520 return t->to_get_unwinder;
4521
4522 return NULL;
4523 }
4524
4525 /* See target.h. */
4526
4527 const struct frame_unwind *
4528 target_get_tailcall_unwinder (void)
4529 {
4530 struct target_ops *t;
4531
4532 for (t = current_target.beneath; t != NULL; t = t->beneath)
4533 if (t->to_get_tailcall_unwinder != NULL)
4534 return t->to_get_tailcall_unwinder;
4535
4536 return NULL;
4537 }
4538
4539 /* See target.h. */
4540
4541 CORE_ADDR
4542 forward_target_decr_pc_after_break (struct target_ops *ops,
4543 struct gdbarch *gdbarch)
4544 {
4545 for (; ops != NULL; ops = ops->beneath)
4546 if (ops->to_decr_pc_after_break != NULL)
4547 return ops->to_decr_pc_after_break (ops, gdbarch);
4548
4549 return gdbarch_decr_pc_after_break (gdbarch);
4550 }
4551
4552 /* See target.h. */
4553
4554 CORE_ADDR
4555 target_decr_pc_after_break (struct gdbarch *gdbarch)
4556 {
4557 return forward_target_decr_pc_after_break (current_target.beneath, gdbarch);
4558 }
4559
4560 static int
4561 deprecated_debug_xfer_memory (CORE_ADDR memaddr, bfd_byte *myaddr, int len,
4562 int write, struct mem_attrib *attrib,
4563 struct target_ops *target)
4564 {
4565 int retval;
4566
4567 retval = debug_target.deprecated_xfer_memory (memaddr, myaddr, len, write,
4568 attrib, target);
4569
4570 fprintf_unfiltered (gdb_stdlog,
4571 "target_xfer_memory (%s, xxx, %d, %s, xxx) = %d",
4572 paddress (target_gdbarch (), memaddr), len,
4573 write ? "write" : "read", retval);
4574
4575 if (retval > 0)
4576 {
4577 int i;
4578
4579 fputs_unfiltered (", bytes =", gdb_stdlog);
4580 for (i = 0; i < retval; i++)
4581 {
4582 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
4583 {
4584 if (targetdebug < 2 && i > 0)
4585 {
4586 fprintf_unfiltered (gdb_stdlog, " ...");
4587 break;
4588 }
4589 fprintf_unfiltered (gdb_stdlog, "\n");
4590 }
4591
4592 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
4593 }
4594 }
4595
4596 fputc_unfiltered ('\n', gdb_stdlog);
4597
4598 return retval;
4599 }
4600
4601 static void
4602 debug_to_files_info (struct target_ops *target)
4603 {
4604 debug_target.to_files_info (target);
4605
4606 fprintf_unfiltered (gdb_stdlog, "target_files_info (xxx)\n");
4607 }
4608
4609 static int
4610 debug_to_insert_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4611 struct bp_target_info *bp_tgt)
4612 {
4613 int retval;
4614
4615 retval = debug_target.to_insert_breakpoint (&debug_target, gdbarch, bp_tgt);
4616
4617 fprintf_unfiltered (gdb_stdlog,
4618 "target_insert_breakpoint (%s, xxx) = %ld\n",
4619 core_addr_to_string (bp_tgt->placed_address),
4620 (unsigned long) retval);
4621 return retval;
4622 }
4623
4624 static int
4625 debug_to_remove_breakpoint (struct target_ops *ops, struct gdbarch *gdbarch,
4626 struct bp_target_info *bp_tgt)
4627 {
4628 int retval;
4629
4630 retval = debug_target.to_remove_breakpoint (&debug_target, gdbarch, bp_tgt);
4631
4632 fprintf_unfiltered (gdb_stdlog,
4633 "target_remove_breakpoint (%s, xxx) = %ld\n",
4634 core_addr_to_string (bp_tgt->placed_address),
4635 (unsigned long) retval);
4636 return retval;
4637 }
4638
4639 static int
4640 debug_to_can_use_hw_breakpoint (struct target_ops *self,
4641 int type, int cnt, int from_tty)
4642 {
4643 int retval;
4644
4645 retval = debug_target.to_can_use_hw_breakpoint (&debug_target,
4646 type, cnt, from_tty);
4647
4648 fprintf_unfiltered (gdb_stdlog,
4649 "target_can_use_hw_breakpoint (%ld, %ld, %ld) = %ld\n",
4650 (unsigned long) type,
4651 (unsigned long) cnt,
4652 (unsigned long) from_tty,
4653 (unsigned long) retval);
4654 return retval;
4655 }
4656
4657 static int
4658 debug_to_region_ok_for_hw_watchpoint (struct target_ops *self,
4659 CORE_ADDR addr, int len)
4660 {
4661 CORE_ADDR retval;
4662
4663 retval = debug_target.to_region_ok_for_hw_watchpoint (&debug_target,
4664 addr, len);
4665
4666 fprintf_unfiltered (gdb_stdlog,
4667 "target_region_ok_for_hw_watchpoint (%s, %ld) = %s\n",
4668 core_addr_to_string (addr), (unsigned long) len,
4669 core_addr_to_string (retval));
4670 return retval;
4671 }
4672
4673 static int
4674 debug_to_can_accel_watchpoint_condition (struct target_ops *self,
4675 CORE_ADDR addr, int len, int rw,
4676 struct expression *cond)
4677 {
4678 int retval;
4679
4680 retval = debug_target.to_can_accel_watchpoint_condition (&debug_target,
4681 addr, len,
4682 rw, cond);
4683
4684 fprintf_unfiltered (gdb_stdlog,
4685 "target_can_accel_watchpoint_condition "
4686 "(%s, %d, %d, %s) = %ld\n",
4687 core_addr_to_string (addr), len, rw,
4688 host_address_to_string (cond), (unsigned long) retval);
4689 return retval;
4690 }
4691
4692 static int
4693 debug_to_stopped_by_watchpoint (struct target_ops *ops)
4694 {
4695 int retval;
4696
4697 retval = debug_target.to_stopped_by_watchpoint (&debug_target);
4698
4699 fprintf_unfiltered (gdb_stdlog,
4700 "target_stopped_by_watchpoint () = %ld\n",
4701 (unsigned long) retval);
4702 return retval;
4703 }
4704
4705 static int
4706 debug_to_stopped_data_address (struct target_ops *target, CORE_ADDR *addr)
4707 {
4708 int retval;
4709
4710 retval = debug_target.to_stopped_data_address (target, addr);
4711
4712 fprintf_unfiltered (gdb_stdlog,
4713 "target_stopped_data_address ([%s]) = %ld\n",
4714 core_addr_to_string (*addr),
4715 (unsigned long)retval);
4716 return retval;
4717 }
4718
4719 static int
4720 debug_to_watchpoint_addr_within_range (struct target_ops *target,
4721 CORE_ADDR addr,
4722 CORE_ADDR start, int length)
4723 {
4724 int retval;
4725
4726 retval = debug_target.to_watchpoint_addr_within_range (target, addr,
4727 start, length);
4728
4729 fprintf_filtered (gdb_stdlog,
4730 "target_watchpoint_addr_within_range (%s, %s, %d) = %d\n",
4731 core_addr_to_string (addr), core_addr_to_string (start),
4732 length, retval);
4733 return retval;
4734 }
4735
4736 static int
4737 debug_to_insert_hw_breakpoint (struct target_ops *self,
4738 struct gdbarch *gdbarch,
4739 struct bp_target_info *bp_tgt)
4740 {
4741 int retval;
4742
4743 retval = debug_target.to_insert_hw_breakpoint (&debug_target,
4744 gdbarch, bp_tgt);
4745
4746 fprintf_unfiltered (gdb_stdlog,
4747 "target_insert_hw_breakpoint (%s, xxx) = %ld\n",
4748 core_addr_to_string (bp_tgt->placed_address),
4749 (unsigned long) retval);
4750 return retval;
4751 }
4752
4753 static int
4754 debug_to_remove_hw_breakpoint (struct target_ops *self,
4755 struct gdbarch *gdbarch,
4756 struct bp_target_info *bp_tgt)
4757 {
4758 int retval;
4759
4760 retval = debug_target.to_remove_hw_breakpoint (&debug_target,
4761 gdbarch, bp_tgt);
4762
4763 fprintf_unfiltered (gdb_stdlog,
4764 "target_remove_hw_breakpoint (%s, xxx) = %ld\n",
4765 core_addr_to_string (bp_tgt->placed_address),
4766 (unsigned long) retval);
4767 return retval;
4768 }
4769
4770 static int
4771 debug_to_insert_watchpoint (struct target_ops *self,
4772 CORE_ADDR addr, int len, int type,
4773 struct expression *cond)
4774 {
4775 int retval;
4776
4777 retval = debug_target.to_insert_watchpoint (&debug_target,
4778 addr, len, type, cond);
4779
4780 fprintf_unfiltered (gdb_stdlog,
4781 "target_insert_watchpoint (%s, %d, %d, %s) = %ld\n",
4782 core_addr_to_string (addr), len, type,
4783 host_address_to_string (cond), (unsigned long) retval);
4784 return retval;
4785 }
4786
4787 static int
4788 debug_to_remove_watchpoint (struct target_ops *self,
4789 CORE_ADDR addr, int len, int type,
4790 struct expression *cond)
4791 {
4792 int retval;
4793
4794 retval = debug_target.to_remove_watchpoint (&debug_target,
4795 addr, len, type, cond);
4796
4797 fprintf_unfiltered (gdb_stdlog,
4798 "target_remove_watchpoint (%s, %d, %d, %s) = %ld\n",
4799 core_addr_to_string (addr), len, type,
4800 host_address_to_string (cond), (unsigned long) retval);
4801 return retval;
4802 }
4803
4804 static void
4805 debug_to_terminal_init (struct target_ops *self)
4806 {
4807 debug_target.to_terminal_init (&debug_target);
4808
4809 fprintf_unfiltered (gdb_stdlog, "target_terminal_init ()\n");
4810 }
4811
4812 static void
4813 debug_to_terminal_inferior (struct target_ops *self)
4814 {
4815 debug_target.to_terminal_inferior (&debug_target);
4816
4817 fprintf_unfiltered (gdb_stdlog, "target_terminal_inferior ()\n");
4818 }
4819
4820 static void
4821 debug_to_terminal_ours_for_output (struct target_ops *self)
4822 {
4823 debug_target.to_terminal_ours_for_output (&debug_target);
4824
4825 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours_for_output ()\n");
4826 }
4827
4828 static void
4829 debug_to_terminal_ours (struct target_ops *self)
4830 {
4831 debug_target.to_terminal_ours (&debug_target);
4832
4833 fprintf_unfiltered (gdb_stdlog, "target_terminal_ours ()\n");
4834 }
4835
4836 static void
4837 debug_to_terminal_save_ours (struct target_ops *self)
4838 {
4839 debug_target.to_terminal_save_ours (&debug_target);
4840
4841 fprintf_unfiltered (gdb_stdlog, "target_terminal_save_ours ()\n");
4842 }
4843
4844 static void
4845 debug_to_terminal_info (struct target_ops *self,
4846 const char *arg, int from_tty)
4847 {
4848 debug_target.to_terminal_info (&debug_target, arg, from_tty);
4849
4850 fprintf_unfiltered (gdb_stdlog, "target_terminal_info (%s, %d)\n", arg,
4851 from_tty);
4852 }
4853
4854 static void
4855 debug_to_load (struct target_ops *self, char *args, int from_tty)
4856 {
4857 debug_target.to_load (&debug_target, args, from_tty);
4858
4859 fprintf_unfiltered (gdb_stdlog, "target_load (%s, %d)\n", args, from_tty);
4860 }
4861
4862 static void
4863 debug_to_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4864 {
4865 debug_target.to_post_startup_inferior (&debug_target, ptid);
4866
4867 fprintf_unfiltered (gdb_stdlog, "target_post_startup_inferior (%d)\n",
4868 ptid_get_pid (ptid));
4869 }
4870
4871 static int
4872 debug_to_insert_fork_catchpoint (struct target_ops *self, int pid)
4873 {
4874 int retval;
4875
4876 retval = debug_target.to_insert_fork_catchpoint (&debug_target, pid);
4877
4878 fprintf_unfiltered (gdb_stdlog, "target_insert_fork_catchpoint (%d) = %d\n",
4879 pid, retval);
4880
4881 return retval;
4882 }
4883
4884 static int
4885 debug_to_remove_fork_catchpoint (struct target_ops *self, int pid)
4886 {
4887 int retval;
4888
4889 retval = debug_target.to_remove_fork_catchpoint (&debug_target, pid);
4890
4891 fprintf_unfiltered (gdb_stdlog, "target_remove_fork_catchpoint (%d) = %d\n",
4892 pid, retval);
4893
4894 return retval;
4895 }
4896
4897 static int
4898 debug_to_insert_vfork_catchpoint (struct target_ops *self, int pid)
4899 {
4900 int retval;
4901
4902 retval = debug_target.to_insert_vfork_catchpoint (&debug_target, pid);
4903
4904 fprintf_unfiltered (gdb_stdlog, "target_insert_vfork_catchpoint (%d) = %d\n",
4905 pid, retval);
4906
4907 return retval;
4908 }
4909
4910 static int
4911 debug_to_remove_vfork_catchpoint (struct target_ops *self, int pid)
4912 {
4913 int retval;
4914
4915 retval = debug_target.to_remove_vfork_catchpoint (&debug_target, pid);
4916
4917 fprintf_unfiltered (gdb_stdlog, "target_remove_vfork_catchpoint (%d) = %d\n",
4918 pid, retval);
4919
4920 return retval;
4921 }
4922
4923 static int
4924 debug_to_insert_exec_catchpoint (struct target_ops *self, int pid)
4925 {
4926 int retval;
4927
4928 retval = debug_target.to_insert_exec_catchpoint (&debug_target, pid);
4929
4930 fprintf_unfiltered (gdb_stdlog, "target_insert_exec_catchpoint (%d) = %d\n",
4931 pid, retval);
4932
4933 return retval;
4934 }
4935
4936 static int
4937 debug_to_remove_exec_catchpoint (struct target_ops *self, int pid)
4938 {
4939 int retval;
4940
4941 retval = debug_target.to_remove_exec_catchpoint (&debug_target, pid);
4942
4943 fprintf_unfiltered (gdb_stdlog, "target_remove_exec_catchpoint (%d) = %d\n",
4944 pid, retval);
4945
4946 return retval;
4947 }
4948
4949 static int
4950 debug_to_has_exited (struct target_ops *self,
4951 int pid, int wait_status, int *exit_status)
4952 {
4953 int has_exited;
4954
4955 has_exited = debug_target.to_has_exited (&debug_target,
4956 pid, wait_status, exit_status);
4957
4958 fprintf_unfiltered (gdb_stdlog, "target_has_exited (%d, %d, %d) = %d\n",
4959 pid, wait_status, *exit_status, has_exited);
4960
4961 return has_exited;
4962 }
4963
4964 static int
4965 debug_to_can_run (struct target_ops *self)
4966 {
4967 int retval;
4968
4969 retval = debug_target.to_can_run (&debug_target);
4970
4971 fprintf_unfiltered (gdb_stdlog, "target_can_run () = %d\n", retval);
4972
4973 return retval;
4974 }
4975
4976 static struct gdbarch *
4977 debug_to_thread_architecture (struct target_ops *ops, ptid_t ptid)
4978 {
4979 struct gdbarch *retval;
4980
4981 retval = debug_target.to_thread_architecture (ops, ptid);
4982
4983 fprintf_unfiltered (gdb_stdlog,
4984 "target_thread_architecture (%s) = %s [%s]\n",
4985 target_pid_to_str (ptid),
4986 host_address_to_string (retval),
4987 gdbarch_bfd_arch_info (retval)->printable_name);
4988 return retval;
4989 }
4990
4991 static void
4992 debug_to_stop (struct target_ops *self, ptid_t ptid)
4993 {
4994 debug_target.to_stop (&debug_target, ptid);
4995
4996 fprintf_unfiltered (gdb_stdlog, "target_stop (%s)\n",
4997 target_pid_to_str (ptid));
4998 }
4999
5000 static void
5001 debug_to_rcmd (struct target_ops *self, char *command,
5002 struct ui_file *outbuf)
5003 {
5004 debug_target.to_rcmd (&debug_target, command, outbuf);
5005 fprintf_unfiltered (gdb_stdlog, "target_rcmd (%s, ...)\n", command);
5006 }
5007
5008 static char *
5009 debug_to_pid_to_exec_file (struct target_ops *self, int pid)
5010 {
5011 char *exec_file;
5012
5013 exec_file = debug_target.to_pid_to_exec_file (&debug_target, pid);
5014
5015 fprintf_unfiltered (gdb_stdlog, "target_pid_to_exec_file (%d) = %s\n",
5016 pid, exec_file);
5017
5018 return exec_file;
5019 }
5020
5021 static void
5022 setup_target_debug (void)
5023 {
5024 memcpy (&debug_target, &current_target, sizeof debug_target);
5025
5026 current_target.to_open = debug_to_open;
5027 current_target.to_post_attach = debug_to_post_attach;
5028 current_target.to_prepare_to_store = debug_to_prepare_to_store;
5029 current_target.deprecated_xfer_memory = deprecated_debug_xfer_memory;
5030 current_target.to_files_info = debug_to_files_info;
5031 current_target.to_insert_breakpoint = debug_to_insert_breakpoint;
5032 current_target.to_remove_breakpoint = debug_to_remove_breakpoint;
5033 current_target.to_can_use_hw_breakpoint = debug_to_can_use_hw_breakpoint;
5034 current_target.to_insert_hw_breakpoint = debug_to_insert_hw_breakpoint;
5035 current_target.to_remove_hw_breakpoint = debug_to_remove_hw_breakpoint;
5036 current_target.to_insert_watchpoint = debug_to_insert_watchpoint;
5037 current_target.to_remove_watchpoint = debug_to_remove_watchpoint;
5038 current_target.to_stopped_by_watchpoint = debug_to_stopped_by_watchpoint;
5039 current_target.to_stopped_data_address = debug_to_stopped_data_address;
5040 current_target.to_watchpoint_addr_within_range
5041 = debug_to_watchpoint_addr_within_range;
5042 current_target.to_region_ok_for_hw_watchpoint
5043 = debug_to_region_ok_for_hw_watchpoint;
5044 current_target.to_can_accel_watchpoint_condition
5045 = debug_to_can_accel_watchpoint_condition;
5046 current_target.to_terminal_init = debug_to_terminal_init;
5047 current_target.to_terminal_inferior = debug_to_terminal_inferior;
5048 current_target.to_terminal_ours_for_output
5049 = debug_to_terminal_ours_for_output;
5050 current_target.to_terminal_ours = debug_to_terminal_ours;
5051 current_target.to_terminal_save_ours = debug_to_terminal_save_ours;
5052 current_target.to_terminal_info = debug_to_terminal_info;
5053 current_target.to_load = debug_to_load;
5054 current_target.to_post_startup_inferior = debug_to_post_startup_inferior;
5055 current_target.to_insert_fork_catchpoint = debug_to_insert_fork_catchpoint;
5056 current_target.to_remove_fork_catchpoint = debug_to_remove_fork_catchpoint;
5057 current_target.to_insert_vfork_catchpoint = debug_to_insert_vfork_catchpoint;
5058 current_target.to_remove_vfork_catchpoint = debug_to_remove_vfork_catchpoint;
5059 current_target.to_insert_exec_catchpoint = debug_to_insert_exec_catchpoint;
5060 current_target.to_remove_exec_catchpoint = debug_to_remove_exec_catchpoint;
5061 current_target.to_has_exited = debug_to_has_exited;
5062 current_target.to_can_run = debug_to_can_run;
5063 current_target.to_stop = debug_to_stop;
5064 current_target.to_rcmd = debug_to_rcmd;
5065 current_target.to_pid_to_exec_file = debug_to_pid_to_exec_file;
5066 current_target.to_thread_architecture = debug_to_thread_architecture;
5067 }
5068 \f
5069
5070 static char targ_desc[] =
5071 "Names of targets and files being debugged.\nShows the entire \
5072 stack of targets currently in use (including the exec-file,\n\
5073 core-file, and process, if any), as well as the symbol file name.";
5074
5075 static void
5076 do_monitor_command (char *cmd,
5077 int from_tty)
5078 {
5079 if ((current_target.to_rcmd
5080 == (void (*) (struct target_ops *, char *, struct ui_file *)) tcomplain)
5081 || (current_target.to_rcmd == debug_to_rcmd
5082 && (debug_target.to_rcmd
5083 == (void (*) (struct target_ops *,
5084 char *, struct ui_file *)) tcomplain)))
5085 error (_("\"monitor\" command not supported by this target."));
5086 target_rcmd (cmd, gdb_stdtarg);
5087 }
5088
5089 /* Print the name of each layers of our target stack. */
5090
5091 static void
5092 maintenance_print_target_stack (char *cmd, int from_tty)
5093 {
5094 struct target_ops *t;
5095
5096 printf_filtered (_("The current target stack is:\n"));
5097
5098 for (t = target_stack; t != NULL; t = t->beneath)
5099 {
5100 printf_filtered (" - %s (%s)\n", t->to_shortname, t->to_longname);
5101 }
5102 }
5103
5104 /* Controls if async mode is permitted. */
5105 int target_async_permitted = 0;
5106
5107 /* The set command writes to this variable. If the inferior is
5108 executing, target_async_permitted is *not* updated. */
5109 static int target_async_permitted_1 = 0;
5110
5111 static void
5112 set_target_async_command (char *args, int from_tty,
5113 struct cmd_list_element *c)
5114 {
5115 if (have_live_inferiors ())
5116 {
5117 target_async_permitted_1 = target_async_permitted;
5118 error (_("Cannot change this setting while the inferior is running."));
5119 }
5120
5121 target_async_permitted = target_async_permitted_1;
5122 }
5123
5124 static void
5125 show_target_async_command (struct ui_file *file, int from_tty,
5126 struct cmd_list_element *c,
5127 const char *value)
5128 {
5129 fprintf_filtered (file,
5130 _("Controlling the inferior in "
5131 "asynchronous mode is %s.\n"), value);
5132 }
5133
5134 /* Temporary copies of permission settings. */
5135
5136 static int may_write_registers_1 = 1;
5137 static int may_write_memory_1 = 1;
5138 static int may_insert_breakpoints_1 = 1;
5139 static int may_insert_tracepoints_1 = 1;
5140 static int may_insert_fast_tracepoints_1 = 1;
5141 static int may_stop_1 = 1;
5142
5143 /* Make the user-set values match the real values again. */
5144
5145 void
5146 update_target_permissions (void)
5147 {
5148 may_write_registers_1 = may_write_registers;
5149 may_write_memory_1 = may_write_memory;
5150 may_insert_breakpoints_1 = may_insert_breakpoints;
5151 may_insert_tracepoints_1 = may_insert_tracepoints;
5152 may_insert_fast_tracepoints_1 = may_insert_fast_tracepoints;
5153 may_stop_1 = may_stop;
5154 }
5155
5156 /* The one function handles (most of) the permission flags in the same
5157 way. */
5158
5159 static void
5160 set_target_permissions (char *args, int from_tty,
5161 struct cmd_list_element *c)
5162 {
5163 if (target_has_execution)
5164 {
5165 update_target_permissions ();
5166 error (_("Cannot change this setting while the inferior is running."));
5167 }
5168
5169 /* Make the real values match the user-changed values. */
5170 may_write_registers = may_write_registers_1;
5171 may_insert_breakpoints = may_insert_breakpoints_1;
5172 may_insert_tracepoints = may_insert_tracepoints_1;
5173 may_insert_fast_tracepoints = may_insert_fast_tracepoints_1;
5174 may_stop = may_stop_1;
5175 update_observer_mode ();
5176 }
5177
5178 /* Set memory write permission independently of observer mode. */
5179
5180 static void
5181 set_write_memory_permission (char *args, int from_tty,
5182 struct cmd_list_element *c)
5183 {
5184 /* Make the real values match the user-changed values. */
5185 may_write_memory = may_write_memory_1;
5186 update_observer_mode ();
5187 }
5188
5189
5190 void
5191 initialize_targets (void)
5192 {
5193 init_dummy_target ();
5194 push_target (&dummy_target);
5195
5196 add_info ("target", target_info, targ_desc);
5197 add_info ("files", target_info, targ_desc);
5198
5199 add_setshow_zuinteger_cmd ("target", class_maintenance, &targetdebug, _("\
5200 Set target debugging."), _("\
5201 Show target debugging."), _("\
5202 When non-zero, target debugging is enabled. Higher numbers are more\n\
5203 verbose. Changes do not take effect until the next \"run\" or \"target\"\n\
5204 command."),
5205 NULL,
5206 show_targetdebug,
5207 &setdebuglist, &showdebuglist);
5208
5209 add_setshow_boolean_cmd ("trust-readonly-sections", class_support,
5210 &trust_readonly, _("\
5211 Set mode for reading from readonly sections."), _("\
5212 Show mode for reading from readonly sections."), _("\
5213 When this mode is on, memory reads from readonly sections (such as .text)\n\
5214 will be read from the object file instead of from the target. This will\n\
5215 result in significant performance improvement for remote targets."),
5216 NULL,
5217 show_trust_readonly,
5218 &setlist, &showlist);
5219
5220 add_com ("monitor", class_obscure, do_monitor_command,
5221 _("Send a command to the remote monitor (remote targets only)."));
5222
5223 add_cmd ("target-stack", class_maintenance, maintenance_print_target_stack,
5224 _("Print the name of each layer of the internal target stack."),
5225 &maintenanceprintlist);
5226
5227 add_setshow_boolean_cmd ("target-async", no_class,
5228 &target_async_permitted_1, _("\
5229 Set whether gdb controls the inferior in asynchronous mode."), _("\
5230 Show whether gdb controls the inferior in asynchronous mode."), _("\
5231 Tells gdb whether to control the inferior in asynchronous mode."),
5232 set_target_async_command,
5233 show_target_async_command,
5234 &setlist,
5235 &showlist);
5236
5237 add_setshow_boolean_cmd ("may-write-registers", class_support,
5238 &may_write_registers_1, _("\
5239 Set permission to write into registers."), _("\
5240 Show permission to write into registers."), _("\
5241 When this permission is on, GDB may write into the target's registers.\n\
5242 Otherwise, any sort of write attempt will result in an error."),
5243 set_target_permissions, NULL,
5244 &setlist, &showlist);
5245
5246 add_setshow_boolean_cmd ("may-write-memory", class_support,
5247 &may_write_memory_1, _("\
5248 Set permission to write into target memory."), _("\
5249 Show permission to write into target memory."), _("\
5250 When this permission is on, GDB may write into the target's memory.\n\
5251 Otherwise, any sort of write attempt will result in an error."),
5252 set_write_memory_permission, NULL,
5253 &setlist, &showlist);
5254
5255 add_setshow_boolean_cmd ("may-insert-breakpoints", class_support,
5256 &may_insert_breakpoints_1, _("\
5257 Set permission to insert breakpoints in the target."), _("\
5258 Show permission to insert breakpoints in the target."), _("\
5259 When this permission is on, GDB may insert breakpoints in the program.\n\
5260 Otherwise, any sort of insertion attempt will result in an error."),
5261 set_target_permissions, NULL,
5262 &setlist, &showlist);
5263
5264 add_setshow_boolean_cmd ("may-insert-tracepoints", class_support,
5265 &may_insert_tracepoints_1, _("\
5266 Set permission to insert tracepoints in the target."), _("\
5267 Show permission to insert tracepoints in the target."), _("\
5268 When this permission is on, GDB may insert tracepoints in the program.\n\
5269 Otherwise, any sort of insertion attempt will result in an error."),
5270 set_target_permissions, NULL,
5271 &setlist, &showlist);
5272
5273 add_setshow_boolean_cmd ("may-insert-fast-tracepoints", class_support,
5274 &may_insert_fast_tracepoints_1, _("\
5275 Set permission to insert fast tracepoints in the target."), _("\
5276 Show permission to insert fast tracepoints in the target."), _("\
5277 When this permission is on, GDB may insert fast tracepoints.\n\
5278 Otherwise, any sort of insertion attempt will result in an error."),
5279 set_target_permissions, NULL,
5280 &setlist, &showlist);
5281
5282 add_setshow_boolean_cmd ("may-interrupt", class_support,
5283 &may_stop_1, _("\
5284 Set permission to interrupt or signal the target."), _("\
5285 Show permission to interrupt or signal the target."), _("\
5286 When this permission is on, GDB may interrupt/stop the target's execution.\n\
5287 Otherwise, any attempt to interrupt or stop will be ignored."),
5288 set_target_permissions, NULL,
5289 &setlist, &showlist);
5290 }